1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ppc64 code to implement the kexec_file_load syscall
4  *
5  * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
6  * Copyright (C) 2004  IBM Corp.
7  * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
8  * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
9  * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
10  * Copyright (C) 2020  IBM Corporation
11  *
12  * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13  * Heavily modified for the kernel by
14  * Hari Bathini, IBM Corporation.
15  */
16 
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <asm/setup.h>
26 #include <asm/drmem.h>
27 #include <asm/firmware.h>
28 #include <asm/kexec_ranges.h>
29 #include <asm/crashdump-ppc64.h>
30 #include <asm/mmzone.h>
31 #include <asm/iommu.h>
32 #include <asm/prom.h>
33 #include <asm/plpks.h>
34 #include <asm/cputhreads.h>
35 
36 struct umem_info {
37 	__be64 *buf;		/* data buffer for usable-memory property */
38 	u32 size;		/* size allocated for the data buffer */
39 	u32 max_entries;	/* maximum no. of entries */
40 	u32 idx;		/* index of current entry */
41 
42 	/* usable memory ranges to look up */
43 	unsigned int nr_ranges;
44 	const struct range *ranges;
45 };
46 
47 const struct kexec_file_ops * const kexec_file_loaders[] = {
48 	&kexec_elf64_ops,
49 	NULL
50 };
51 
52 int arch_check_excluded_range(struct kimage *image, unsigned long start,
53 			      unsigned long end)
54 {
55 	struct crash_mem *emem;
56 	int i;
57 
58 	emem = image->arch.exclude_ranges;
59 	for (i = 0; i < emem->nr_ranges; i++)
60 		if (start < emem->ranges[i].end && end > emem->ranges[i].start)
61 			return 1;
62 
63 	return 0;
64 }
65 
66 #ifdef CONFIG_CRASH_DUMP
67 /**
68  * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
69  * @um_info:                  Usable memory buffer and ranges info.
70  * @cnt:                      No. of entries to accommodate.
71  *
72  * Frees up the old buffer if memory reallocation fails.
73  *
74  * Returns buffer on success, NULL on error.
75  */
76 static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
77 {
78 	u32 new_size;
79 	__be64 *tbuf;
80 
81 	if ((um_info->idx + cnt) <= um_info->max_entries)
82 		return um_info->buf;
83 
84 	new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
85 	tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
86 	if (tbuf) {
87 		um_info->buf = tbuf;
88 		um_info->size = new_size;
89 		um_info->max_entries = (um_info->size / sizeof(u64));
90 	}
91 
92 	return tbuf;
93 }
94 
95 /**
96  * add_usable_mem - Add the usable memory ranges within the given memory range
97  *                  to the buffer
98  * @um_info:        Usable memory buffer and ranges info.
99  * @base:           Base address of memory range to look for.
100  * @end:            End address of memory range to look for.
101  *
102  * Returns 0 on success, negative errno on error.
103  */
104 static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
105 {
106 	u64 loc_base, loc_end;
107 	bool add;
108 	int i;
109 
110 	for (i = 0; i < um_info->nr_ranges; i++) {
111 		add = false;
112 		loc_base = um_info->ranges[i].start;
113 		loc_end = um_info->ranges[i].end;
114 		if (loc_base >= base && loc_end <= end)
115 			add = true;
116 		else if (base < loc_end && end > loc_base) {
117 			if (loc_base < base)
118 				loc_base = base;
119 			if (loc_end > end)
120 				loc_end = end;
121 			add = true;
122 		}
123 
124 		if (add) {
125 			if (!check_realloc_usable_mem(um_info, 2))
126 				return -ENOMEM;
127 
128 			um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
129 			um_info->buf[um_info->idx++] =
130 					cpu_to_be64(loc_end - loc_base + 1);
131 		}
132 	}
133 
134 	return 0;
135 }
136 
137 /**
138  * kdump_setup_usable_lmb - This is a callback function that gets called by
139  *                          walk_drmem_lmbs for every LMB to set its
140  *                          usable memory ranges.
141  * @lmb:                    LMB info.
142  * @usm:                    linux,drconf-usable-memory property value.
143  * @data:                   Pointer to usable memory buffer and ranges info.
144  *
145  * Returns 0 on success, negative errno on error.
146  */
147 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
148 				  void *data)
149 {
150 	struct umem_info *um_info;
151 	int tmp_idx, ret;
152 	u64 base, end;
153 
154 	/*
155 	 * kdump load isn't supported on kernels already booted with
156 	 * linux,drconf-usable-memory property.
157 	 */
158 	if (*usm) {
159 		pr_err("linux,drconf-usable-memory property already exists!");
160 		return -EINVAL;
161 	}
162 
163 	um_info = data;
164 	tmp_idx = um_info->idx;
165 	if (!check_realloc_usable_mem(um_info, 1))
166 		return -ENOMEM;
167 
168 	um_info->idx++;
169 	base = lmb->base_addr;
170 	end = base + drmem_lmb_size() - 1;
171 	ret = add_usable_mem(um_info, base, end);
172 	if (!ret) {
173 		/*
174 		 * Update the no. of ranges added. Two entries (base & size)
175 		 * for every range added.
176 		 */
177 		um_info->buf[tmp_idx] =
178 				cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
179 	}
180 
181 	return ret;
182 }
183 
184 #define NODE_PATH_LEN		256
185 /**
186  * add_usable_mem_property - Add usable memory property for the given
187  *                           memory node.
188  * @fdt:                     Flattened device tree for the kdump kernel.
189  * @dn:                      Memory node.
190  * @um_info:                 Usable memory buffer and ranges info.
191  *
192  * Returns 0 on success, negative errno on error.
193  */
194 static int add_usable_mem_property(void *fdt, struct device_node *dn,
195 				   struct umem_info *um_info)
196 {
197 	int node;
198 	char path[NODE_PATH_LEN];
199 	int i, ret;
200 	u64 base, size;
201 
202 	of_node_get(dn);
203 
204 	if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
205 		pr_err("Buffer (%d) too small for memory node: %pOF\n",
206 		       NODE_PATH_LEN, dn);
207 		return -EOVERFLOW;
208 	}
209 	kexec_dprintk("Memory node path: %s\n", path);
210 
211 	/* Now that we know the path, find its offset in kdump kernel's fdt */
212 	node = fdt_path_offset(fdt, path);
213 	if (node < 0) {
214 		pr_err("Malformed device tree: error reading %s\n", path);
215 		ret = -EINVAL;
216 		goto out;
217 	}
218 
219 	um_info->idx  = 0;
220 	if (!check_realloc_usable_mem(um_info, 2)) {
221 		ret = -ENOMEM;
222 		goto out;
223 	}
224 
225 	/*
226 	 * "reg" property represents sequence of (addr,size) tuples
227 	 * each representing a memory range.
228 	 */
229 	for (i = 0; ; i++) {
230 		ret = of_property_read_reg(dn, i, &base, &size);
231 		if (ret)
232 			break;
233 
234 		ret = add_usable_mem(um_info, base, base + size - 1);
235 		if (ret)
236 			goto out;
237 	}
238 
239 	// No reg or empty reg? Skip this node.
240 	if (i == 0)
241 		goto out;
242 
243 	/*
244 	 * No kdump kernel usable memory found in this memory node.
245 	 * Write (0,0) tuple in linux,usable-memory property for
246 	 * this region to be ignored.
247 	 */
248 	if (um_info->idx == 0) {
249 		um_info->buf[0] = 0;
250 		um_info->buf[1] = 0;
251 		um_info->idx = 2;
252 	}
253 
254 	ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
255 			  (um_info->idx * sizeof(u64)));
256 
257 out:
258 	of_node_put(dn);
259 	return ret;
260 }
261 
262 
263 /**
264  * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
265  *                         and linux,drconf-usable-memory DT properties as
266  *                         appropriate to restrict its memory usage.
267  * @fdt:                   Flattened device tree for the kdump kernel.
268  * @usable_mem:            Usable memory ranges for kdump kernel.
269  *
270  * Returns 0 on success, negative errno on error.
271  */
272 static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
273 {
274 	struct umem_info um_info;
275 	struct device_node *dn;
276 	int node, ret = 0;
277 
278 	if (!usable_mem) {
279 		pr_err("Usable memory ranges for kdump kernel not found\n");
280 		return -ENOENT;
281 	}
282 
283 	node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
284 	if (node == -FDT_ERR_NOTFOUND)
285 		kexec_dprintk("No dynamic reconfiguration memory found\n");
286 	else if (node < 0) {
287 		pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
288 		return -EINVAL;
289 	}
290 
291 	um_info.buf  = NULL;
292 	um_info.size = 0;
293 	um_info.max_entries = 0;
294 	um_info.idx  = 0;
295 	/* Memory ranges to look up */
296 	um_info.ranges = &(usable_mem->ranges[0]);
297 	um_info.nr_ranges = usable_mem->nr_ranges;
298 
299 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
300 	if (dn) {
301 		ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
302 		of_node_put(dn);
303 
304 		if (ret) {
305 			pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
306 			goto out;
307 		}
308 
309 		ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
310 				  um_info.buf, (um_info.idx * sizeof(u64)));
311 		if (ret) {
312 			pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
313 			       fdt_strerror(ret));
314 			goto out;
315 		}
316 	}
317 
318 	/*
319 	 * Walk through each memory node and set linux,usable-memory property
320 	 * for the corresponding node in kdump kernel's fdt.
321 	 */
322 	for_each_node_by_type(dn, "memory") {
323 		ret = add_usable_mem_property(fdt, dn, &um_info);
324 		if (ret) {
325 			pr_err("Failed to set linux,usable-memory property for %s node",
326 			       dn->full_name);
327 			of_node_put(dn);
328 			goto out;
329 		}
330 	}
331 
332 out:
333 	kfree(um_info.buf);
334 	return ret;
335 }
336 
337 /**
338  * load_backup_segment - Locate a memory hole to place the backup region.
339  * @image:               Kexec image.
340  * @kbuf:                Buffer contents and memory parameters.
341  *
342  * Returns 0 on success, negative errno on error.
343  */
344 static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
345 {
346 	void *buf;
347 	int ret;
348 
349 	/*
350 	 * Setup a source buffer for backup segment.
351 	 *
352 	 * A source buffer has no meaning for backup region as data will
353 	 * be copied from backup source, after crash, in the purgatory.
354 	 * But as load segment code doesn't recognize such segments,
355 	 * setup a dummy source buffer to keep it happy for now.
356 	 */
357 	buf = vzalloc(BACKUP_SRC_SIZE);
358 	if (!buf)
359 		return -ENOMEM;
360 
361 	kbuf->buffer = buf;
362 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
363 	kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
364 	kbuf->top_down = false;
365 
366 	ret = kexec_add_buffer(kbuf);
367 	if (ret) {
368 		vfree(buf);
369 		return ret;
370 	}
371 
372 	image->arch.backup_buf = buf;
373 	image->arch.backup_start = kbuf->mem;
374 	return 0;
375 }
376 
377 /**
378  * update_backup_region_phdr - Update backup region's offset for the core to
379  *                             export the region appropriately.
380  * @image:                     Kexec image.
381  * @ehdr:                      ELF core header.
382  *
383  * Assumes an exclusive program header is setup for the backup region
384  * in the ELF headers
385  *
386  * Returns nothing.
387  */
388 static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
389 {
390 	Elf64_Phdr *phdr;
391 	unsigned int i;
392 
393 	phdr = (Elf64_Phdr *)(ehdr + 1);
394 	for (i = 0; i < ehdr->e_phnum; i++) {
395 		if (phdr->p_paddr == BACKUP_SRC_START) {
396 			phdr->p_offset = image->arch.backup_start;
397 			kexec_dprintk("Backup region offset updated to 0x%lx\n",
398 				      image->arch.backup_start);
399 			return;
400 		}
401 	}
402 }
403 
404 static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem)
405 {
406 #if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
407 	unsigned int extra_sz = 0;
408 
409 	if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM)
410 		pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES);
411 	else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES)
412 		pr_warn("Configured crash mem ranges may not be enough\n");
413 	else
414 		extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr);
415 
416 	return extra_sz;
417 #endif
418 	return 0;
419 }
420 
421 /**
422  * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
423  *                           segment needed to load kdump kernel.
424  * @image:                   Kexec image.
425  * @kbuf:                    Buffer contents and memory parameters.
426  *
427  * Returns 0 on success, negative errno on error.
428  */
429 static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
430 {
431 	struct crash_mem *cmem = NULL;
432 	unsigned long headers_sz;
433 	void *headers = NULL;
434 	int ret;
435 
436 	ret = get_crash_memory_ranges(&cmem);
437 	if (ret)
438 		goto out;
439 
440 	/* Setup elfcorehdr segment */
441 	ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
442 	if (ret) {
443 		pr_err("Failed to prepare elf headers for the core\n");
444 		goto out;
445 	}
446 
447 	/* Fix the offset for backup region in the ELF header */
448 	update_backup_region_phdr(image, headers);
449 
450 	kbuf->buffer = headers;
451 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
452 	kbuf->bufsz = headers_sz;
453 	kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
454 	kbuf->top_down = false;
455 
456 	ret = kexec_add_buffer(kbuf);
457 	if (ret) {
458 		vfree(headers);
459 		goto out;
460 	}
461 
462 	image->elf_load_addr = kbuf->mem;
463 	image->elf_headers_sz = headers_sz;
464 	image->elf_headers = headers;
465 out:
466 	kfree(cmem);
467 	return ret;
468 }
469 
470 /**
471  * load_crashdump_segments_ppc64 - Initialize the additional segements needed
472  *                                 to load kdump kernel.
473  * @image:                         Kexec image.
474  * @kbuf:                          Buffer contents and memory parameters.
475  *
476  * Returns 0 on success, negative errno on error.
477  */
478 int load_crashdump_segments_ppc64(struct kimage *image,
479 				  struct kexec_buf *kbuf)
480 {
481 	int ret;
482 
483 	/* Load backup segment - first 64K bytes of the crashing kernel */
484 	ret = load_backup_segment(image, kbuf);
485 	if (ret) {
486 		pr_err("Failed to load backup segment\n");
487 		return ret;
488 	}
489 	kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem);
490 
491 	/* Load elfcorehdr segment - to export crashing kernel's vmcore */
492 	ret = load_elfcorehdr_segment(image, kbuf);
493 	if (ret) {
494 		pr_err("Failed to load elfcorehdr segment\n");
495 		return ret;
496 	}
497 	kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
498 		      image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
499 
500 	return 0;
501 }
502 #endif
503 
504 /**
505  * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
506  *                         variables and call setup_purgatory() to initialize
507  *                         common global variable.
508  * @image:                 kexec image.
509  * @slave_code:            Slave code for the purgatory.
510  * @fdt:                   Flattened device tree for the next kernel.
511  * @kernel_load_addr:      Address where the kernel is loaded.
512  * @fdt_load_addr:         Address where the flattened device tree is loaded.
513  *
514  * Returns 0 on success, negative errno on error.
515  */
516 int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
517 			  const void *fdt, unsigned long kernel_load_addr,
518 			  unsigned long fdt_load_addr)
519 {
520 	struct device_node *dn = NULL;
521 	int ret;
522 
523 	ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
524 			      fdt_load_addr);
525 	if (ret)
526 		goto out;
527 
528 	if (image->type == KEXEC_TYPE_CRASH) {
529 		u32 my_run_at_load = 1;
530 
531 		/*
532 		 * Tell relocatable kernel to run at load address
533 		 * via the word meant for that at 0x5c.
534 		 */
535 		ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
536 						     &my_run_at_load,
537 						     sizeof(my_run_at_load),
538 						     false);
539 		if (ret)
540 			goto out;
541 	}
542 
543 	/* Tell purgatory where to look for backup region */
544 	ret = kexec_purgatory_get_set_symbol(image, "backup_start",
545 					     &image->arch.backup_start,
546 					     sizeof(image->arch.backup_start),
547 					     false);
548 	if (ret)
549 		goto out;
550 
551 	/* Setup OPAL base & entry values */
552 	dn = of_find_node_by_path("/ibm,opal");
553 	if (dn) {
554 		u64 val;
555 
556 		ret = of_property_read_u64(dn, "opal-base-address", &val);
557 		if (ret)
558 			goto out;
559 
560 		ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
561 						     sizeof(val), false);
562 		if (ret)
563 			goto out;
564 
565 		ret = of_property_read_u64(dn, "opal-entry-address", &val);
566 		if (ret)
567 			goto out;
568 		ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
569 						     sizeof(val), false);
570 	}
571 out:
572 	if (ret)
573 		pr_err("Failed to setup purgatory symbols");
574 	of_node_put(dn);
575 	return ret;
576 }
577 
578 /**
579  * cpu_node_size - Compute the size of a CPU node in the FDT.
580  *                 This should be done only once and the value is stored in
581  *                 a static variable.
582  * Returns the max size of a CPU node in the FDT.
583  */
584 static unsigned int cpu_node_size(void)
585 {
586 	static unsigned int size;
587 	struct device_node *dn;
588 	struct property *pp;
589 
590 	/*
591 	 * Don't compute it twice, we are assuming that the per CPU node size
592 	 * doesn't change during the system's life.
593 	 */
594 	if (size)
595 		return size;
596 
597 	dn = of_find_node_by_type(NULL, "cpu");
598 	if (WARN_ON_ONCE(!dn)) {
599 		// Unlikely to happen
600 		return 0;
601 	}
602 
603 	/*
604 	 * We compute the sub node size for a CPU node, assuming it
605 	 * will be the same for all.
606 	 */
607 	size += strlen(dn->name) + 5;
608 	for_each_property_of_node(dn, pp) {
609 		size += strlen(pp->name);
610 		size += pp->length;
611 	}
612 
613 	of_node_put(dn);
614 	return size;
615 }
616 
617 static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image, unsigned int cpu_nodes)
618 {
619 	unsigned int extra_size = 0;
620 	u64 usm_entries;
621 #ifdef CONFIG_CRASH_HOTPLUG
622 	unsigned int possible_cpu_nodes;
623 #endif
624 
625 	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
626 		return 0;
627 
628 	/*
629 	 * For kdump kernel, account for linux,usable-memory and
630 	 * linux,drconf-usable-memory properties. Get an approximate on the
631 	 * number of usable memory entries and use for FDT size estimation.
632 	 */
633 	if (drmem_lmb_size()) {
634 		usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
635 			       (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
636 		extra_size += (unsigned int)(usm_entries * sizeof(u64));
637 	}
638 
639 #ifdef CONFIG_CRASH_HOTPLUG
640 	/*
641 	 * Make sure enough space is reserved to accommodate possible CPU nodes
642 	 * in the crash FDT. This allows packing possible CPU nodes which are
643 	 * not yet present in the system without regenerating the entire FDT.
644 	 */
645 	if (image->type == KEXEC_TYPE_CRASH) {
646 		possible_cpu_nodes = num_possible_cpus() / threads_per_core;
647 		if (possible_cpu_nodes > cpu_nodes)
648 			extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size();
649 	}
650 #endif
651 
652 	return extra_size;
653 }
654 
655 /**
656  * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
657  *                              setup FDT for kexec/kdump kernel.
658  * @image:                      kexec image being loaded.
659  *
660  * Returns the estimated extra size needed for kexec/kdump kernel FDT.
661  */
662 unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem)
663 {
664 	struct device_node *dn;
665 	unsigned int cpu_nodes = 0, extra_size = 0;
666 
667 	// Budget some space for the password blob. There's already extra space
668 	// for the key name
669 	if (plpks_is_available())
670 		extra_size += (unsigned int)plpks_get_passwordlen();
671 
672 	/* Get the number of CPU nodes in the current device tree */
673 	for_each_node_by_type(dn, "cpu") {
674 		cpu_nodes++;
675 	}
676 
677 	/* Consider extra space for CPU nodes added since the boot time */
678 	if (cpu_nodes > boot_cpu_node_count)
679 		extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
680 
681 	/* Consider extra space for reserved memory ranges if any */
682 	if (rmem->nr_ranges > 0)
683 		extra_size += sizeof(struct fdt_reserve_entry) * rmem->nr_ranges;
684 
685 	return extra_size + kdump_extra_fdt_size_ppc64(image, cpu_nodes);
686 }
687 
688 static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
689 			 const char *propname)
690 {
691 	const void *prop, *fdtprop;
692 	int len = 0, fdtlen = 0;
693 
694 	prop = of_get_property(dn, propname, &len);
695 	fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
696 
697 	if (fdtprop && !prop)
698 		return fdt_delprop(fdt, node_offset, propname);
699 	else if (prop)
700 		return fdt_setprop(fdt, node_offset, propname, prop, len);
701 	else
702 		return -FDT_ERR_NOTFOUND;
703 }
704 
705 static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
706 {
707 	struct device_node *dn;
708 	int pci_offset, root_offset, ret = 0;
709 
710 	if (!firmware_has_feature(FW_FEATURE_LPAR))
711 		return 0;
712 
713 	root_offset = fdt_path_offset(fdt, "/");
714 	for_each_node_with_property(dn, dmapropname) {
715 		pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
716 		if (pci_offset < 0)
717 			continue;
718 
719 		ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
720 		if (ret < 0) {
721 			of_node_put(dn);
722 			break;
723 		}
724 		ret = copy_property(fdt, pci_offset, dn, dmapropname);
725 		if (ret < 0) {
726 			of_node_put(dn);
727 			break;
728 		}
729 	}
730 
731 	return ret;
732 }
733 
734 /**
735  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
736  *                       being loaded.
737  * @image:               kexec image being loaded.
738  * @fdt:                 Flattened device tree for the next kernel.
739  * @rmem:                Reserved memory ranges.
740  *
741  * Returns 0 on success, negative errno on error.
742  */
743 int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem)
744 {
745 	struct crash_mem *umem = NULL;
746 	int i, nr_ranges, ret;
747 
748 #ifdef CONFIG_CRASH_DUMP
749 	/*
750 	 * Restrict memory usage for kdump kernel by setting up
751 	 * usable memory ranges and memory reserve map.
752 	 */
753 	if (image->type == KEXEC_TYPE_CRASH) {
754 		ret = get_usable_memory_ranges(&umem);
755 		if (ret)
756 			goto out;
757 
758 		ret = update_usable_mem_fdt(fdt, umem);
759 		if (ret) {
760 			pr_err("Error setting up usable-memory property for kdump kernel\n");
761 			goto out;
762 		}
763 
764 		/*
765 		 * Ensure we don't touch crashed kernel's memory except the
766 		 * first 64K of RAM, which will be backed up.
767 		 */
768 		ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
769 				      crashk_res.start - BACKUP_SRC_SIZE);
770 		if (ret) {
771 			pr_err("Error reserving crash memory: %s\n",
772 			       fdt_strerror(ret));
773 			goto out;
774 		}
775 
776 		/* Ensure backup region is not used by kdump/capture kernel */
777 		ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
778 				      BACKUP_SRC_SIZE);
779 		if (ret) {
780 			pr_err("Error reserving memory for backup: %s\n",
781 			       fdt_strerror(ret));
782 			goto out;
783 		}
784 	}
785 #endif
786 
787 	/* Update cpus nodes information to account hotplug CPUs. */
788 	ret =  update_cpus_node(fdt);
789 	if (ret < 0)
790 		goto out;
791 
792 	ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
793 	if (ret < 0)
794 		goto out;
795 
796 	ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
797 	if (ret < 0)
798 		goto out;
799 
800 	/* Update memory reserve map */
801 	nr_ranges = rmem ? rmem->nr_ranges : 0;
802 	for (i = 0; i < nr_ranges; i++) {
803 		u64 base, size;
804 
805 		base = rmem->ranges[i].start;
806 		size = rmem->ranges[i].end - base + 1;
807 		ret = fdt_add_mem_rsv(fdt, base, size);
808 		if (ret) {
809 			pr_err("Error updating memory reserve map: %s\n",
810 			       fdt_strerror(ret));
811 			goto out;
812 		}
813 	}
814 
815 	// If we have PLPKS active, we need to provide the password to the new kernel
816 	if (plpks_is_available())
817 		ret = plpks_populate_fdt(fdt);
818 
819 out:
820 	kfree(umem);
821 	return ret;
822 }
823 
824 /**
825  * arch_kexec_kernel_image_probe - Does additional handling needed to setup
826  *                                 kexec segments.
827  * @image:                         kexec image being loaded.
828  * @buf:                           Buffer pointing to elf data.
829  * @buf_len:                       Length of the buffer.
830  *
831  * Returns 0 on success, negative errno on error.
832  */
833 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
834 				  unsigned long buf_len)
835 {
836 	int ret;
837 
838 	/* Get exclude memory ranges needed for setting up kexec segments */
839 	ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
840 	if (ret) {
841 		pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
842 		return ret;
843 	}
844 
845 	return kexec_image_probe_default(image, buf, buf_len);
846 }
847 
848 /**
849  * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
850  *                                      while loading the image.
851  * @image:                              kexec image being loaded.
852  *
853  * Returns 0 on success, negative errno on error.
854  */
855 int arch_kimage_file_post_load_cleanup(struct kimage *image)
856 {
857 	kfree(image->arch.exclude_ranges);
858 	image->arch.exclude_ranges = NULL;
859 
860 	vfree(image->arch.backup_buf);
861 	image->arch.backup_buf = NULL;
862 
863 	vfree(image->elf_headers);
864 	image->elf_headers = NULL;
865 	image->elf_headers_sz = 0;
866 
867 	kvfree(image->arch.fdt);
868 	image->arch.fdt = NULL;
869 
870 	return kexec_image_post_load_cleanup_default(image);
871 }
872