1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec.c - kexec system call core code.
4  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/btf.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/highmem.h>
19 #include <linux/syscalls.h>
20 #include <linux/reboot.h>
21 #include <linux/ioport.h>
22 #include <linux/hardirq.h>
23 #include <linux/elf.h>
24 #include <linux/elfcore.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41 #include <linux/objtool.h>
42 #include <linux/kmsg_dump.h>
43 
44 #include <asm/page.h>
45 #include <asm/sections.h>
46 
47 #include <crypto/hash.h>
48 #include "kexec_internal.h"
49 
50 atomic_t __kexec_lock = ATOMIC_INIT(0);
51 
52 /* Flag to indicate we are going to kexec a new kernel */
53 bool kexec_in_progress = false;
54 
55 bool kexec_file_dbg_print;
56 
57 /*
58  * When kexec transitions to the new kernel there is a one-to-one
59  * mapping between physical and virtual addresses.  On processors
60  * where you can disable the MMU this is trivial, and easy.  For
61  * others it is still a simple predictable page table to setup.
62  *
63  * In that environment kexec copies the new kernel to its final
64  * resting place.  This means I can only support memory whose
65  * physical address can fit in an unsigned long.  In particular
66  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
67  * If the assembly stub has more restrictive requirements
68  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
69  * defined more restrictively in <asm/kexec.h>.
70  *
71  * The code for the transition from the current kernel to the
72  * new kernel is placed in the control_code_buffer, whose size
73  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
74  * page of memory is necessary, but some architectures require more.
75  * Because this memory must be identity mapped in the transition from
76  * virtual to physical addresses it must live in the range
77  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
78  * modifiable.
79  *
80  * The assembly stub in the control code buffer is passed a linked list
81  * of descriptor pages detailing the source pages of the new kernel,
82  * and the destination addresses of those source pages.  As this data
83  * structure is not used in the context of the current OS, it must
84  * be self-contained.
85  *
86  * The code has been made to work with highmem pages and will use a
87  * destination page in its final resting place (if it happens
88  * to allocate it).  The end product of this is that most of the
89  * physical address space, and most of RAM can be used.
90  *
91  * Future directions include:
92  *  - allocating a page table with the control code buffer identity
93  *    mapped, to simplify machine_kexec and make kexec_on_panic more
94  *    reliable.
95  */
96 
97 /*
98  * KIMAGE_NO_DEST is an impossible destination address..., for
99  * allocating pages whose destination address we do not care about.
100  */
101 #define KIMAGE_NO_DEST (-1UL)
102 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
103 
104 static struct page *kimage_alloc_page(struct kimage *image,
105 				       gfp_t gfp_mask,
106 				       unsigned long dest);
107 
sanity_check_segment_list(struct kimage * image)108 int sanity_check_segment_list(struct kimage *image)
109 {
110 	int i;
111 	unsigned long nr_segments = image->nr_segments;
112 	unsigned long total_pages = 0;
113 	unsigned long nr_pages = totalram_pages();
114 
115 	/*
116 	 * Verify we have good destination addresses.  The caller is
117 	 * responsible for making certain we don't attempt to load
118 	 * the new image into invalid or reserved areas of RAM.  This
119 	 * just verifies it is an address we can use.
120 	 *
121 	 * Since the kernel does everything in page size chunks ensure
122 	 * the destination addresses are page aligned.  Too many
123 	 * special cases crop of when we don't do this.  The most
124 	 * insidious is getting overlapping destination addresses
125 	 * simply because addresses are changed to page size
126 	 * granularity.
127 	 */
128 	for (i = 0; i < nr_segments; i++) {
129 		unsigned long mstart, mend;
130 
131 		mstart = image->segment[i].mem;
132 		mend   = mstart + image->segment[i].memsz;
133 		if (mstart > mend)
134 			return -EADDRNOTAVAIL;
135 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
136 			return -EADDRNOTAVAIL;
137 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
138 			return -EADDRNOTAVAIL;
139 	}
140 
141 	/* Verify our destination addresses do not overlap.
142 	 * If we alloed overlapping destination addresses
143 	 * through very weird things can happen with no
144 	 * easy explanation as one segment stops on another.
145 	 */
146 	for (i = 0; i < nr_segments; i++) {
147 		unsigned long mstart, mend;
148 		unsigned long j;
149 
150 		mstart = image->segment[i].mem;
151 		mend   = mstart + image->segment[i].memsz;
152 		for (j = 0; j < i; j++) {
153 			unsigned long pstart, pend;
154 
155 			pstart = image->segment[j].mem;
156 			pend   = pstart + image->segment[j].memsz;
157 			/* Do the segments overlap ? */
158 			if ((mend > pstart) && (mstart < pend))
159 				return -EINVAL;
160 		}
161 	}
162 
163 	/* Ensure our buffer sizes are strictly less than
164 	 * our memory sizes.  This should always be the case,
165 	 * and it is easier to check up front than to be surprised
166 	 * later on.
167 	 */
168 	for (i = 0; i < nr_segments; i++) {
169 		if (image->segment[i].bufsz > image->segment[i].memsz)
170 			return -EINVAL;
171 	}
172 
173 	/*
174 	 * Verify that no more than half of memory will be consumed. If the
175 	 * request from userspace is too large, a large amount of time will be
176 	 * wasted allocating pages, which can cause a soft lockup.
177 	 */
178 	for (i = 0; i < nr_segments; i++) {
179 		if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
180 			return -EINVAL;
181 
182 		total_pages += PAGE_COUNT(image->segment[i].memsz);
183 	}
184 
185 	if (total_pages > nr_pages / 2)
186 		return -EINVAL;
187 
188 #ifdef CONFIG_CRASH_DUMP
189 	/*
190 	 * Verify we have good destination addresses.  Normally
191 	 * the caller is responsible for making certain we don't
192 	 * attempt to load the new image into invalid or reserved
193 	 * areas of RAM.  But crash kernels are preloaded into a
194 	 * reserved area of ram.  We must ensure the addresses
195 	 * are in the reserved area otherwise preloading the
196 	 * kernel could corrupt things.
197 	 */
198 
199 	if (image->type == KEXEC_TYPE_CRASH) {
200 		for (i = 0; i < nr_segments; i++) {
201 			unsigned long mstart, mend;
202 
203 			mstart = image->segment[i].mem;
204 			mend = mstart + image->segment[i].memsz - 1;
205 			/* Ensure we are within the crash kernel limits */
206 			if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
207 			    (mend > phys_to_boot_phys(crashk_res.end)))
208 				return -EADDRNOTAVAIL;
209 		}
210 	}
211 #endif
212 
213 	/*
214 	 * The destination addresses are searched from system RAM rather than
215 	 * being allocated from the buddy allocator, so they are not guaranteed
216 	 * to be accepted by the current kernel.  Accept the destination
217 	 * addresses before kexec swaps their content with the segments' source
218 	 * pages to avoid accessing memory before it is accepted.
219 	 */
220 	for (i = 0; i < nr_segments; i++)
221 		accept_memory(image->segment[i].mem, image->segment[i].memsz);
222 
223 	return 0;
224 }
225 
do_kimage_alloc_init(void)226 struct kimage *do_kimage_alloc_init(void)
227 {
228 	struct kimage *image;
229 
230 	/* Allocate a controlling structure */
231 	image = kzalloc(sizeof(*image), GFP_KERNEL);
232 	if (!image)
233 		return NULL;
234 
235 	image->head = 0;
236 	image->entry = &image->head;
237 	image->last_entry = &image->head;
238 	image->control_page = ~0; /* By default this does not apply */
239 	image->type = KEXEC_TYPE_DEFAULT;
240 
241 	/* Initialize the list of control pages */
242 	INIT_LIST_HEAD(&image->control_pages);
243 
244 	/* Initialize the list of destination pages */
245 	INIT_LIST_HEAD(&image->dest_pages);
246 
247 	/* Initialize the list of unusable pages */
248 	INIT_LIST_HEAD(&image->unusable_pages);
249 
250 #ifdef CONFIG_CRASH_HOTPLUG
251 	image->hp_action = KEXEC_CRASH_HP_NONE;
252 	image->elfcorehdr_index = -1;
253 	image->elfcorehdr_updated = false;
254 #endif
255 
256 	return image;
257 }
258 
kimage_is_destination_range(struct kimage * image,unsigned long start,unsigned long end)259 int kimage_is_destination_range(struct kimage *image,
260 					unsigned long start,
261 					unsigned long end)
262 {
263 	unsigned long i;
264 
265 	for (i = 0; i < image->nr_segments; i++) {
266 		unsigned long mstart, mend;
267 
268 		mstart = image->segment[i].mem;
269 		mend = mstart + image->segment[i].memsz - 1;
270 		if ((end >= mstart) && (start <= mend))
271 			return 1;
272 	}
273 
274 	return 0;
275 }
276 
kimage_alloc_pages(gfp_t gfp_mask,unsigned int order)277 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
278 {
279 	struct page *pages;
280 
281 	if (fatal_signal_pending(current))
282 		return NULL;
283 	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
284 	if (pages) {
285 		unsigned int count, i;
286 
287 		pages->mapping = NULL;
288 		set_page_private(pages, order);
289 		count = 1 << order;
290 		for (i = 0; i < count; i++)
291 			SetPageReserved(pages + i);
292 
293 		arch_kexec_post_alloc_pages(page_address(pages), count,
294 					    gfp_mask);
295 
296 		if (gfp_mask & __GFP_ZERO)
297 			for (i = 0; i < count; i++)
298 				clear_highpage(pages + i);
299 	}
300 
301 	return pages;
302 }
303 
kimage_free_pages(struct page * page)304 static void kimage_free_pages(struct page *page)
305 {
306 	unsigned int order, count, i;
307 
308 	order = page_private(page);
309 	count = 1 << order;
310 
311 	arch_kexec_pre_free_pages(page_address(page), count);
312 
313 	for (i = 0; i < count; i++)
314 		ClearPageReserved(page + i);
315 	__free_pages(page, order);
316 }
317 
kimage_free_page_list(struct list_head * list)318 void kimage_free_page_list(struct list_head *list)
319 {
320 	struct page *page, *next;
321 
322 	list_for_each_entry_safe(page, next, list, lru) {
323 		list_del(&page->lru);
324 		kimage_free_pages(page);
325 	}
326 }
327 
kimage_alloc_normal_control_pages(struct kimage * image,unsigned int order)328 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
329 							unsigned int order)
330 {
331 	/* Control pages are special, they are the intermediaries
332 	 * that are needed while we copy the rest of the pages
333 	 * to their final resting place.  As such they must
334 	 * not conflict with either the destination addresses
335 	 * or memory the kernel is already using.
336 	 *
337 	 * The only case where we really need more than one of
338 	 * these are for architectures where we cannot disable
339 	 * the MMU and must instead generate an identity mapped
340 	 * page table for all of the memory.
341 	 *
342 	 * At worst this runs in O(N) of the image size.
343 	 */
344 	struct list_head extra_pages;
345 	struct page *pages;
346 	unsigned int count;
347 
348 	count = 1 << order;
349 	INIT_LIST_HEAD(&extra_pages);
350 
351 	/* Loop while I can allocate a page and the page allocated
352 	 * is a destination page.
353 	 */
354 	do {
355 		unsigned long pfn, epfn, addr, eaddr;
356 
357 		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
358 		if (!pages)
359 			break;
360 		pfn   = page_to_boot_pfn(pages);
361 		epfn  = pfn + count;
362 		addr  = pfn << PAGE_SHIFT;
363 		eaddr = (epfn << PAGE_SHIFT) - 1;
364 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
365 			      kimage_is_destination_range(image, addr, eaddr)) {
366 			list_add(&pages->lru, &extra_pages);
367 			pages = NULL;
368 		}
369 	} while (!pages);
370 
371 	if (pages) {
372 		/* Remember the allocated page... */
373 		list_add(&pages->lru, &image->control_pages);
374 
375 		/* Because the page is already in it's destination
376 		 * location we will never allocate another page at
377 		 * that address.  Therefore kimage_alloc_pages
378 		 * will not return it (again) and we don't need
379 		 * to give it an entry in image->segment[].
380 		 */
381 	}
382 	/* Deal with the destination pages I have inadvertently allocated.
383 	 *
384 	 * Ideally I would convert multi-page allocations into single
385 	 * page allocations, and add everything to image->dest_pages.
386 	 *
387 	 * For now it is simpler to just free the pages.
388 	 */
389 	kimage_free_page_list(&extra_pages);
390 
391 	return pages;
392 }
393 
394 #ifdef CONFIG_CRASH_DUMP
kimage_alloc_crash_control_pages(struct kimage * image,unsigned int order)395 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
396 						      unsigned int order)
397 {
398 	/* Control pages are special, they are the intermediaries
399 	 * that are needed while we copy the rest of the pages
400 	 * to their final resting place.  As such they must
401 	 * not conflict with either the destination addresses
402 	 * or memory the kernel is already using.
403 	 *
404 	 * Control pages are also the only pags we must allocate
405 	 * when loading a crash kernel.  All of the other pages
406 	 * are specified by the segments and we just memcpy
407 	 * into them directly.
408 	 *
409 	 * The only case where we really need more than one of
410 	 * these are for architectures where we cannot disable
411 	 * the MMU and must instead generate an identity mapped
412 	 * page table for all of the memory.
413 	 *
414 	 * Given the low demand this implements a very simple
415 	 * allocator that finds the first hole of the appropriate
416 	 * size in the reserved memory region, and allocates all
417 	 * of the memory up to and including the hole.
418 	 */
419 	unsigned long hole_start, hole_end, size;
420 	struct page *pages;
421 
422 	pages = NULL;
423 	size = (1 << order) << PAGE_SHIFT;
424 	hole_start = ALIGN(image->control_page, size);
425 	hole_end   = hole_start + size - 1;
426 	while (hole_end <= crashk_res.end) {
427 		unsigned long i;
428 
429 		cond_resched();
430 
431 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
432 			break;
433 		/* See if I overlap any of the segments */
434 		for (i = 0; i < image->nr_segments; i++) {
435 			unsigned long mstart, mend;
436 
437 			mstart = image->segment[i].mem;
438 			mend   = mstart + image->segment[i].memsz - 1;
439 			if ((hole_end >= mstart) && (hole_start <= mend)) {
440 				/* Advance the hole to the end of the segment */
441 				hole_start = ALIGN(mend, size);
442 				hole_end   = hole_start + size - 1;
443 				break;
444 			}
445 		}
446 		/* If I don't overlap any segments I have found my hole! */
447 		if (i == image->nr_segments) {
448 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
449 			image->control_page = hole_end + 1;
450 			break;
451 		}
452 	}
453 
454 	/* Ensure that these pages are decrypted if SME is enabled. */
455 	if (pages)
456 		arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
457 
458 	return pages;
459 }
460 #endif
461 
462 
kimage_alloc_control_pages(struct kimage * image,unsigned int order)463 struct page *kimage_alloc_control_pages(struct kimage *image,
464 					 unsigned int order)
465 {
466 	struct page *pages = NULL;
467 
468 	switch (image->type) {
469 	case KEXEC_TYPE_DEFAULT:
470 		pages = kimage_alloc_normal_control_pages(image, order);
471 		break;
472 #ifdef CONFIG_CRASH_DUMP
473 	case KEXEC_TYPE_CRASH:
474 		pages = kimage_alloc_crash_control_pages(image, order);
475 		break;
476 #endif
477 	}
478 
479 	return pages;
480 }
481 
kimage_add_entry(struct kimage * image,kimage_entry_t entry)482 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
483 {
484 	if (*image->entry != 0)
485 		image->entry++;
486 
487 	if (image->entry == image->last_entry) {
488 		kimage_entry_t *ind_page;
489 		struct page *page;
490 
491 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
492 		if (!page)
493 			return -ENOMEM;
494 
495 		ind_page = page_address(page);
496 		*image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
497 		image->entry = ind_page;
498 		image->last_entry = ind_page +
499 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
500 	}
501 	*image->entry = entry;
502 	image->entry++;
503 	*image->entry = 0;
504 
505 	return 0;
506 }
507 
kimage_set_destination(struct kimage * image,unsigned long destination)508 static int kimage_set_destination(struct kimage *image,
509 				   unsigned long destination)
510 {
511 	destination &= PAGE_MASK;
512 
513 	return kimage_add_entry(image, destination | IND_DESTINATION);
514 }
515 
516 
kimage_add_page(struct kimage * image,unsigned long page)517 static int kimage_add_page(struct kimage *image, unsigned long page)
518 {
519 	page &= PAGE_MASK;
520 
521 	return kimage_add_entry(image, page | IND_SOURCE);
522 }
523 
524 
kimage_free_extra_pages(struct kimage * image)525 static void kimage_free_extra_pages(struct kimage *image)
526 {
527 	/* Walk through and free any extra destination pages I may have */
528 	kimage_free_page_list(&image->dest_pages);
529 
530 	/* Walk through and free any unusable pages I have cached */
531 	kimage_free_page_list(&image->unusable_pages);
532 
533 }
534 
kimage_terminate(struct kimage * image)535 void kimage_terminate(struct kimage *image)
536 {
537 	if (*image->entry != 0)
538 		image->entry++;
539 
540 	*image->entry = IND_DONE;
541 }
542 
543 #define for_each_kimage_entry(image, ptr, entry) \
544 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
545 		ptr = (entry & IND_INDIRECTION) ? \
546 			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
547 
kimage_free_entry(kimage_entry_t entry)548 static void kimage_free_entry(kimage_entry_t entry)
549 {
550 	struct page *page;
551 
552 	page = boot_pfn_to_page(entry >> PAGE_SHIFT);
553 	kimage_free_pages(page);
554 }
555 
kimage_free(struct kimage * image)556 void kimage_free(struct kimage *image)
557 {
558 	kimage_entry_t *ptr, entry;
559 	kimage_entry_t ind = 0;
560 
561 	if (!image)
562 		return;
563 
564 #ifdef CONFIG_CRASH_DUMP
565 	if (image->vmcoreinfo_data_copy) {
566 		crash_update_vmcoreinfo_safecopy(NULL);
567 		vunmap(image->vmcoreinfo_data_copy);
568 	}
569 #endif
570 
571 	kimage_free_extra_pages(image);
572 	for_each_kimage_entry(image, ptr, entry) {
573 		if (entry & IND_INDIRECTION) {
574 			/* Free the previous indirection page */
575 			if (ind & IND_INDIRECTION)
576 				kimage_free_entry(ind);
577 			/* Save this indirection page until we are
578 			 * done with it.
579 			 */
580 			ind = entry;
581 		} else if (entry & IND_SOURCE)
582 			kimage_free_entry(entry);
583 	}
584 	/* Free the final indirection page */
585 	if (ind & IND_INDIRECTION)
586 		kimage_free_entry(ind);
587 
588 	/* Handle any machine specific cleanup */
589 	machine_kexec_cleanup(image);
590 
591 	/* Free the kexec control pages... */
592 	kimage_free_page_list(&image->control_pages);
593 
594 	/*
595 	 * Free up any temporary buffers allocated. This might hit if
596 	 * error occurred much later after buffer allocation.
597 	 */
598 	if (image->file_mode)
599 		kimage_file_post_load_cleanup(image);
600 
601 	kfree(image);
602 }
603 
kimage_dst_used(struct kimage * image,unsigned long page)604 static kimage_entry_t *kimage_dst_used(struct kimage *image,
605 					unsigned long page)
606 {
607 	kimage_entry_t *ptr, entry;
608 	unsigned long destination = 0;
609 
610 	for_each_kimage_entry(image, ptr, entry) {
611 		if (entry & IND_DESTINATION)
612 			destination = entry & PAGE_MASK;
613 		else if (entry & IND_SOURCE) {
614 			if (page == destination)
615 				return ptr;
616 			destination += PAGE_SIZE;
617 		}
618 	}
619 
620 	return NULL;
621 }
622 
kimage_alloc_page(struct kimage * image,gfp_t gfp_mask,unsigned long destination)623 static struct page *kimage_alloc_page(struct kimage *image,
624 					gfp_t gfp_mask,
625 					unsigned long destination)
626 {
627 	/*
628 	 * Here we implement safeguards to ensure that a source page
629 	 * is not copied to its destination page before the data on
630 	 * the destination page is no longer useful.
631 	 *
632 	 * To do this we maintain the invariant that a source page is
633 	 * either its own destination page, or it is not a
634 	 * destination page at all.
635 	 *
636 	 * That is slightly stronger than required, but the proof
637 	 * that no problems will not occur is trivial, and the
638 	 * implementation is simply to verify.
639 	 *
640 	 * When allocating all pages normally this algorithm will run
641 	 * in O(N) time, but in the worst case it will run in O(N^2)
642 	 * time.   If the runtime is a problem the data structures can
643 	 * be fixed.
644 	 */
645 	struct page *page;
646 	unsigned long addr;
647 
648 	/*
649 	 * Walk through the list of destination pages, and see if I
650 	 * have a match.
651 	 */
652 	list_for_each_entry(page, &image->dest_pages, lru) {
653 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
654 		if (addr == destination) {
655 			list_del(&page->lru);
656 			return page;
657 		}
658 	}
659 	page = NULL;
660 	while (1) {
661 		kimage_entry_t *old;
662 
663 		/* Allocate a page, if we run out of memory give up */
664 		page = kimage_alloc_pages(gfp_mask, 0);
665 		if (!page)
666 			return NULL;
667 		/* If the page cannot be used file it away */
668 		if (page_to_boot_pfn(page) >
669 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
670 			list_add(&page->lru, &image->unusable_pages);
671 			continue;
672 		}
673 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
674 
675 		/* If it is the destination page we want use it */
676 		if (addr == destination)
677 			break;
678 
679 		/* If the page is not a destination page use it */
680 		if (!kimage_is_destination_range(image, addr,
681 						  addr + PAGE_SIZE - 1))
682 			break;
683 
684 		/*
685 		 * I know that the page is someones destination page.
686 		 * See if there is already a source page for this
687 		 * destination page.  And if so swap the source pages.
688 		 */
689 		old = kimage_dst_used(image, addr);
690 		if (old) {
691 			/* If so move it */
692 			unsigned long old_addr;
693 			struct page *old_page;
694 
695 			old_addr = *old & PAGE_MASK;
696 			old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
697 			copy_highpage(page, old_page);
698 			*old = addr | (*old & ~PAGE_MASK);
699 
700 			/* The old page I have found cannot be a
701 			 * destination page, so return it if it's
702 			 * gfp_flags honor the ones passed in.
703 			 */
704 			if (!(gfp_mask & __GFP_HIGHMEM) &&
705 			    PageHighMem(old_page)) {
706 				kimage_free_pages(old_page);
707 				continue;
708 			}
709 			page = old_page;
710 			break;
711 		}
712 		/* Place the page on the destination list, to be used later */
713 		list_add(&page->lru, &image->dest_pages);
714 	}
715 
716 	return page;
717 }
718 
kimage_load_normal_segment(struct kimage * image,struct kexec_segment * segment)719 static int kimage_load_normal_segment(struct kimage *image,
720 					 struct kexec_segment *segment)
721 {
722 	unsigned long maddr;
723 	size_t ubytes, mbytes;
724 	int result;
725 	unsigned char __user *buf = NULL;
726 	unsigned char *kbuf = NULL;
727 
728 	if (image->file_mode)
729 		kbuf = segment->kbuf;
730 	else
731 		buf = segment->buf;
732 	ubytes = segment->bufsz;
733 	mbytes = segment->memsz;
734 	maddr = segment->mem;
735 
736 	result = kimage_set_destination(image, maddr);
737 	if (result < 0)
738 		goto out;
739 
740 	while (mbytes) {
741 		struct page *page;
742 		char *ptr;
743 		size_t uchunk, mchunk;
744 
745 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
746 		if (!page) {
747 			result  = -ENOMEM;
748 			goto out;
749 		}
750 		result = kimage_add_page(image, page_to_boot_pfn(page)
751 								<< PAGE_SHIFT);
752 		if (result < 0)
753 			goto out;
754 
755 		ptr = kmap_local_page(page);
756 		/* Start with a clear page */
757 		clear_page(ptr);
758 		ptr += maddr & ~PAGE_MASK;
759 		mchunk = min_t(size_t, mbytes,
760 				PAGE_SIZE - (maddr & ~PAGE_MASK));
761 		uchunk = min(ubytes, mchunk);
762 
763 		if (uchunk) {
764 			/* For file based kexec, source pages are in kernel memory */
765 			if (image->file_mode)
766 				memcpy(ptr, kbuf, uchunk);
767 			else
768 				result = copy_from_user(ptr, buf, uchunk);
769 			ubytes -= uchunk;
770 			if (image->file_mode)
771 				kbuf += uchunk;
772 			else
773 				buf += uchunk;
774 		}
775 		kunmap_local(ptr);
776 		if (result) {
777 			result = -EFAULT;
778 			goto out;
779 		}
780 		maddr  += mchunk;
781 		mbytes -= mchunk;
782 
783 		cond_resched();
784 	}
785 out:
786 	return result;
787 }
788 
789 #ifdef CONFIG_CRASH_DUMP
kimage_load_crash_segment(struct kimage * image,struct kexec_segment * segment)790 static int kimage_load_crash_segment(struct kimage *image,
791 					struct kexec_segment *segment)
792 {
793 	/* For crash dumps kernels we simply copy the data from
794 	 * user space to it's destination.
795 	 * We do things a page at a time for the sake of kmap.
796 	 */
797 	unsigned long maddr;
798 	size_t ubytes, mbytes;
799 	int result;
800 	unsigned char __user *buf = NULL;
801 	unsigned char *kbuf = NULL;
802 
803 	result = 0;
804 	if (image->file_mode)
805 		kbuf = segment->kbuf;
806 	else
807 		buf = segment->buf;
808 	ubytes = segment->bufsz;
809 	mbytes = segment->memsz;
810 	maddr = segment->mem;
811 	while (mbytes) {
812 		struct page *page;
813 		char *ptr;
814 		size_t uchunk, mchunk;
815 
816 		page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
817 		if (!page) {
818 			result  = -ENOMEM;
819 			goto out;
820 		}
821 		arch_kexec_post_alloc_pages(page_address(page), 1, 0);
822 		ptr = kmap_local_page(page);
823 		ptr += maddr & ~PAGE_MASK;
824 		mchunk = min_t(size_t, mbytes,
825 				PAGE_SIZE - (maddr & ~PAGE_MASK));
826 		uchunk = min(ubytes, mchunk);
827 		if (mchunk > uchunk) {
828 			/* Zero the trailing part of the page */
829 			memset(ptr + uchunk, 0, mchunk - uchunk);
830 		}
831 
832 		if (uchunk) {
833 			/* For file based kexec, source pages are in kernel memory */
834 			if (image->file_mode)
835 				memcpy(ptr, kbuf, uchunk);
836 			else
837 				result = copy_from_user(ptr, buf, uchunk);
838 			ubytes -= uchunk;
839 			if (image->file_mode)
840 				kbuf += uchunk;
841 			else
842 				buf += uchunk;
843 		}
844 		kexec_flush_icache_page(page);
845 		kunmap_local(ptr);
846 		arch_kexec_pre_free_pages(page_address(page), 1);
847 		if (result) {
848 			result = -EFAULT;
849 			goto out;
850 		}
851 		maddr  += mchunk;
852 		mbytes -= mchunk;
853 
854 		cond_resched();
855 	}
856 out:
857 	return result;
858 }
859 #endif
860 
kimage_load_segment(struct kimage * image,struct kexec_segment * segment)861 int kimage_load_segment(struct kimage *image,
862 				struct kexec_segment *segment)
863 {
864 	int result = -ENOMEM;
865 
866 	switch (image->type) {
867 	case KEXEC_TYPE_DEFAULT:
868 		result = kimage_load_normal_segment(image, segment);
869 		break;
870 #ifdef CONFIG_CRASH_DUMP
871 	case KEXEC_TYPE_CRASH:
872 		result = kimage_load_crash_segment(image, segment);
873 		break;
874 #endif
875 	}
876 
877 	return result;
878 }
879 
880 struct kexec_load_limit {
881 	/* Mutex protects the limit count. */
882 	struct mutex mutex;
883 	int limit;
884 };
885 
886 static struct kexec_load_limit load_limit_reboot = {
887 	.mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
888 	.limit = -1,
889 };
890 
891 static struct kexec_load_limit load_limit_panic = {
892 	.mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
893 	.limit = -1,
894 };
895 
896 struct kimage *kexec_image;
897 struct kimage *kexec_crash_image;
898 static int kexec_load_disabled;
899 
900 #ifdef CONFIG_SYSCTL
kexec_limit_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)901 static int kexec_limit_handler(const struct ctl_table *table, int write,
902 			       void *buffer, size_t *lenp, loff_t *ppos)
903 {
904 	struct kexec_load_limit *limit = table->data;
905 	int val;
906 	struct ctl_table tmp = {
907 		.data = &val,
908 		.maxlen = sizeof(val),
909 		.mode = table->mode,
910 	};
911 	int ret;
912 
913 	if (write) {
914 		ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
915 		if (ret)
916 			return ret;
917 
918 		if (val < 0)
919 			return -EINVAL;
920 
921 		mutex_lock(&limit->mutex);
922 		if (limit->limit != -1 && val >= limit->limit)
923 			ret = -EINVAL;
924 		else
925 			limit->limit = val;
926 		mutex_unlock(&limit->mutex);
927 
928 		return ret;
929 	}
930 
931 	mutex_lock(&limit->mutex);
932 	val = limit->limit;
933 	mutex_unlock(&limit->mutex);
934 
935 	return proc_dointvec(&tmp, write, buffer, lenp, ppos);
936 }
937 
938 static const struct ctl_table kexec_core_sysctls[] = {
939 	{
940 		.procname	= "kexec_load_disabled",
941 		.data		= &kexec_load_disabled,
942 		.maxlen		= sizeof(int),
943 		.mode		= 0644,
944 		/* only handle a transition from default "0" to "1" */
945 		.proc_handler	= proc_dointvec_minmax,
946 		.extra1		= SYSCTL_ONE,
947 		.extra2		= SYSCTL_ONE,
948 	},
949 	{
950 		.procname	= "kexec_load_limit_panic",
951 		.data		= &load_limit_panic,
952 		.mode		= 0644,
953 		.proc_handler	= kexec_limit_handler,
954 	},
955 	{
956 		.procname	= "kexec_load_limit_reboot",
957 		.data		= &load_limit_reboot,
958 		.mode		= 0644,
959 		.proc_handler	= kexec_limit_handler,
960 	},
961 };
962 
kexec_core_sysctl_init(void)963 static int __init kexec_core_sysctl_init(void)
964 {
965 	register_sysctl_init("kernel", kexec_core_sysctls);
966 	return 0;
967 }
968 late_initcall(kexec_core_sysctl_init);
969 #endif
970 
kexec_load_permitted(int kexec_image_type)971 bool kexec_load_permitted(int kexec_image_type)
972 {
973 	struct kexec_load_limit *limit;
974 
975 	/*
976 	 * Only the superuser can use the kexec syscall and if it has not
977 	 * been disabled.
978 	 */
979 	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
980 		return false;
981 
982 	/* Check limit counter and decrease it.*/
983 	limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
984 		&load_limit_panic : &load_limit_reboot;
985 	mutex_lock(&limit->mutex);
986 	if (!limit->limit) {
987 		mutex_unlock(&limit->mutex);
988 		return false;
989 	}
990 	if (limit->limit != -1)
991 		limit->limit--;
992 	mutex_unlock(&limit->mutex);
993 
994 	return true;
995 }
996 
997 /*
998  * Move into place and start executing a preloaded standalone
999  * executable.  If nothing was preloaded return an error.
1000  */
kernel_kexec(void)1001 int kernel_kexec(void)
1002 {
1003 	int error = 0;
1004 
1005 	if (!kexec_trylock())
1006 		return -EBUSY;
1007 	if (!kexec_image) {
1008 		error = -EINVAL;
1009 		goto Unlock;
1010 	}
1011 
1012 #ifdef CONFIG_KEXEC_JUMP
1013 	if (kexec_image->preserve_context) {
1014 		/*
1015 		 * This flow is analogous to hibernation flows that occur
1016 		 * before creating an image and before jumping from the
1017 		 * restore kernel to the image one, so it uses the same
1018 		 * device callbacks as those two flows.
1019 		 */
1020 		pm_prepare_console();
1021 		error = freeze_processes();
1022 		if (error) {
1023 			error = -EBUSY;
1024 			goto Restore_console;
1025 		}
1026 		console_suspend_all();
1027 		error = dpm_suspend_start(PMSG_FREEZE);
1028 		if (error)
1029 			goto Resume_console;
1030 		/*
1031 		 * dpm_suspend_end() must be called after dpm_suspend_start()
1032 		 * to complete the transition, like in the hibernation flows
1033 		 * mentioned above.
1034 		 */
1035 		error = dpm_suspend_end(PMSG_FREEZE);
1036 		if (error)
1037 			goto Resume_devices;
1038 		error = suspend_disable_secondary_cpus();
1039 		if (error)
1040 			goto Enable_cpus;
1041 		local_irq_disable();
1042 		error = syscore_suspend();
1043 		if (error)
1044 			goto Enable_irqs;
1045 	} else
1046 #endif
1047 	{
1048 		kexec_in_progress = true;
1049 		kernel_restart_prepare("kexec reboot");
1050 		migrate_to_reboot_cpu();
1051 		syscore_shutdown();
1052 
1053 		/*
1054 		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1055 		 * no further code needs to use CPU hotplug (which is true in
1056 		 * the reboot case). However, the kexec path depends on using
1057 		 * CPU hotplug again; so re-enable it here.
1058 		 */
1059 		cpu_hotplug_enable();
1060 		pr_notice("Starting new kernel\n");
1061 		machine_shutdown();
1062 	}
1063 
1064 	kmsg_dump(KMSG_DUMP_SHUTDOWN);
1065 	machine_kexec(kexec_image);
1066 
1067 #ifdef CONFIG_KEXEC_JUMP
1068 	if (kexec_image->preserve_context) {
1069 		/*
1070 		 * This flow is analogous to hibernation flows that occur after
1071 		 * creating an image and after the image kernel has got control
1072 		 * back, and in case the devices have been reset or otherwise
1073 		 * manipulated in the meantime, it uses the device callbacks
1074 		 * used by the latter.
1075 		 */
1076 		syscore_resume();
1077  Enable_irqs:
1078 		local_irq_enable();
1079  Enable_cpus:
1080 		suspend_enable_secondary_cpus();
1081 		dpm_resume_start(PMSG_RESTORE);
1082  Resume_devices:
1083 		dpm_resume_end(PMSG_RESTORE);
1084  Resume_console:
1085 		console_resume_all();
1086 		thaw_processes();
1087  Restore_console:
1088 		pm_restore_console();
1089 	}
1090 #endif
1091 
1092  Unlock:
1093 	kexec_unlock();
1094 	return error;
1095 }
1096