Lines Matching +full:start +full:- +full:up
1 // SPDX-License-Identifier: GPL-2.0-only
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
28 #include <asm/crashdump-ppc64.h>
35 __be64 *buf; /* data buffer for usable-memory property */
40 /* usable memory ranges to look up */
51 * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
52 * regions like opal/rtas, tce-table, initrd,
54 * setting up kexec load segments.
100 * get_usable_memory_ranges - Get usable memory ranges. This list includes
101 * regions like crashkernel, opal/rtas & tce-table,
114 * instead of [crashk_res.start, crashk_res.end] to workaround it. in get_usable_memory_ranges()
138 * get_crash_memory_ranges - Get crash memory ranges. This list includes
153 u64 size = end - base; in get_crash_memory_ranges()
159 size -= BACKUP_SRC_SIZE; in get_crash_memory_ranges()
169 if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) in get_crash_memory_ranges()
175 if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { in get_crash_memory_ranges()
182 ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); in get_crash_memory_ranges()
187 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL in get_crash_memory_ranges()
189 * crash, they should actually be backed up just like the in get_crash_memory_ranges()
213 * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
241 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
243 * for the buffer. If found, sets kbuf->mem.
253 int ret = -EADDRNOTAVAIL; in __locate_mem_hole_top_down()
254 phys_addr_t start, end; in __locate_mem_hole_top_down() local
257 for_each_mem_range_rev(i, &start, &end) { in __locate_mem_hole_top_down()
259 * memblock uses [start, end) convention while it is in __locate_mem_hole_top_down()
260 * [start, end] here. Fix the off-by-one to have the in __locate_mem_hole_top_down()
263 end -= 1; in __locate_mem_hole_top_down()
265 if (start > buf_max) in __locate_mem_hole_top_down()
273 if (start < buf_min) in __locate_mem_hole_top_down()
274 start = buf_min; in __locate_mem_hole_top_down()
278 start = ALIGN(start, kbuf->buf_align); in __locate_mem_hole_top_down()
279 if (start < end && (end - start + 1) >= kbuf->memsz) { in __locate_mem_hole_top_down()
280 /* Suitable memory range found. Set kbuf->mem */ in __locate_mem_hole_top_down()
281 kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1, in __locate_mem_hole_top_down()
282 kbuf->buf_align); in __locate_mem_hole_top_down()
292 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
305 int i, ret = 0, err = -EADDRNOTAVAIL; in locate_mem_hole_top_down_ppc64()
306 u64 start, end, tmin, tmax; in locate_mem_hole_top_down_ppc64() local
309 for (i = (emem->nr_ranges - 1); i >= 0; i--) { in locate_mem_hole_top_down_ppc64()
310 start = emem->ranges[i].start; in locate_mem_hole_top_down_ppc64()
311 end = emem->ranges[i].end; in locate_mem_hole_top_down_ppc64()
313 if (start > tmax) in locate_mem_hole_top_down_ppc64()
323 tmax = start - 1; in locate_mem_hole_top_down_ppc64()
340 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
342 * for the buffer. If found, sets kbuf->mem.
352 int ret = -EADDRNOTAVAIL; in __locate_mem_hole_bottom_up()
353 phys_addr_t start, end; in __locate_mem_hole_bottom_up() local
356 for_each_mem_range(i, &start, &end) { in __locate_mem_hole_bottom_up()
358 * memblock uses [start, end) convention while it is in __locate_mem_hole_bottom_up()
359 * [start, end] here. Fix the off-by-one to have the in __locate_mem_hole_bottom_up()
362 end -= 1; in __locate_mem_hole_bottom_up()
368 if (start > buf_max) in __locate_mem_hole_bottom_up()
372 if (start < buf_min) in __locate_mem_hole_bottom_up()
373 start = buf_min; in __locate_mem_hole_bottom_up()
377 start = ALIGN(start, kbuf->buf_align); in __locate_mem_hole_bottom_up()
378 if (start < end && (end - start + 1) >= kbuf->memsz) { in __locate_mem_hole_bottom_up()
379 /* Suitable memory range found. Set kbuf->mem */ in __locate_mem_hole_bottom_up()
380 kbuf->mem = start; in __locate_mem_hole_bottom_up()
390 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
391 * suitable buffer with bottom up approach.
403 int i, ret = 0, err = -EADDRNOTAVAIL; in locate_mem_hole_bottom_up_ppc64()
404 u64 start, end, tmin, tmax; in locate_mem_hole_bottom_up_ppc64() local
407 for (i = 0; i < emem->nr_ranges; i++) { in locate_mem_hole_bottom_up_ppc64()
408 start = emem->ranges[i].start; in locate_mem_hole_bottom_up_ppc64()
409 end = emem->ranges[i].end; in locate_mem_hole_bottom_up_ppc64()
414 if (start > tmin) { in locate_mem_hole_bottom_up_ppc64()
415 tmax = (start > buf_max ? buf_max : start - 1); in locate_mem_hole_bottom_up_ppc64()
438 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
442 * Frees up the old buffer if memory reallocation fails.
451 if ((um_info->idx + cnt) <= um_info->max_entries) in check_realloc_usable_mem()
452 return um_info->buf; in check_realloc_usable_mem()
454 new_size = um_info->size + MEM_RANGE_CHUNK_SZ; in check_realloc_usable_mem()
455 tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL); in check_realloc_usable_mem()
457 um_info->buf = tbuf; in check_realloc_usable_mem()
458 um_info->size = new_size; in check_realloc_usable_mem()
459 um_info->max_entries = (um_info->size / sizeof(u64)); in check_realloc_usable_mem()
466 * add_usable_mem - Add the usable memory ranges within the given memory range
480 for (i = 0; i < um_info->nr_ranges; i++) { in add_usable_mem()
482 loc_base = um_info->ranges[i].start; in add_usable_mem()
483 loc_end = um_info->ranges[i].end; in add_usable_mem()
496 return -ENOMEM; in add_usable_mem()
498 um_info->buf[um_info->idx++] = cpu_to_be64(loc_base); in add_usable_mem()
499 um_info->buf[um_info->idx++] = in add_usable_mem()
500 cpu_to_be64(loc_end - loc_base + 1); in add_usable_mem()
508 * kdump_setup_usable_lmb - This is a callback function that gets called by
512 * @usm: linux,drconf-usable-memory property value.
526 * linux,drconf-usable-memory property. in kdump_setup_usable_lmb()
529 pr_err("linux,drconf-usable-memory property already exists!"); in kdump_setup_usable_lmb()
530 return -EINVAL; in kdump_setup_usable_lmb()
534 tmp_idx = um_info->idx; in kdump_setup_usable_lmb()
536 return -ENOMEM; in kdump_setup_usable_lmb()
538 um_info->idx++; in kdump_setup_usable_lmb()
539 base = lmb->base_addr; in kdump_setup_usable_lmb()
540 end = base + drmem_lmb_size() - 1; in kdump_setup_usable_lmb()
547 um_info->buf[tmp_idx] = in kdump_setup_usable_lmb()
548 cpu_to_be64((um_info->idx - tmp_idx - 1) / 2); in kdump_setup_usable_lmb()
556 * add_usable_mem_property - Add usable memory property for the given
575 if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) { in add_usable_mem_property()
578 return -EOVERFLOW; in add_usable_mem_property()
586 ret = -EINVAL; in add_usable_mem_property()
596 um_info->idx = 0; in add_usable_mem_property()
598 ret = -ENOMEM; in add_usable_mem_property()
617 end = base + of_read_number(prop, n_mem_size_cells) - 1; in add_usable_mem_property()
627 * Write (0,0) tuple in linux,usable-memory property for in add_usable_mem_property()
630 if (um_info->idx == 0) { in add_usable_mem_property()
631 um_info->buf[0] = 0; in add_usable_mem_property()
632 um_info->buf[1] = 0; in add_usable_mem_property()
633 um_info->idx = 2; in add_usable_mem_property()
636 ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf, in add_usable_mem_property()
637 (um_info->idx * sizeof(u64))); in add_usable_mem_property()
646 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
647 * and linux,drconf-usable-memory DT properties as
662 return -ENOENT; in update_usable_mem_fdt()
665 node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory"); in update_usable_mem_fdt()
666 if (node == -FDT_ERR_NOTFOUND) in update_usable_mem_fdt()
669 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n"); in update_usable_mem_fdt()
670 return -EINVAL; in update_usable_mem_fdt()
677 /* Memory ranges to look up */ in update_usable_mem_fdt()
678 um_info.ranges = &(usable_mem->ranges[0]); in update_usable_mem_fdt()
679 um_info.nr_ranges = usable_mem->nr_ranges; in update_usable_mem_fdt()
681 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); in update_usable_mem_fdt()
687 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n"); in update_usable_mem_fdt()
691 ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory", in update_usable_mem_fdt()
694 pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s", in update_usable_mem_fdt()
701 * Walk through each memory node and set linux,usable-memory property in update_usable_mem_fdt()
707 pr_err("Failed to set linux,usable-memory property for %s node", in update_usable_mem_fdt()
708 dn->full_name); in update_usable_mem_fdt()
720 * load_backup_segment - Locate a memory hole to place the backup region.
741 return -ENOMEM; in load_backup_segment()
743 kbuf->buffer = buf; in load_backup_segment()
744 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; in load_backup_segment()
745 kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE; in load_backup_segment()
746 kbuf->top_down = false; in load_backup_segment()
754 image->arch.backup_buf = buf; in load_backup_segment()
755 image->arch.backup_start = kbuf->mem; in load_backup_segment()
760 * update_backup_region_phdr - Update backup region's offset for the core to
776 for (i = 0; i < ehdr->e_phnum; i++) { in update_backup_region_phdr()
777 if (phdr->p_paddr == BACKUP_SRC_START) { in update_backup_region_phdr()
778 phdr->p_offset = image->arch.backup_start; in update_backup_region_phdr()
780 image->arch.backup_start); in update_backup_region_phdr()
787 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
815 kbuf->buffer = headers; in load_elfcorehdr_segment()
816 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; in load_elfcorehdr_segment()
817 kbuf->bufsz = kbuf->memsz = headers_sz; in load_elfcorehdr_segment()
818 kbuf->top_down = false; in load_elfcorehdr_segment()
826 image->elf_load_addr = kbuf->mem; in load_elfcorehdr_segment()
827 image->elf_headers_sz = headers_sz; in load_elfcorehdr_segment()
828 image->elf_headers = headers; in load_elfcorehdr_segment()
835 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
847 /* Load backup segment - first 64K bytes of the crashing kernel */ in load_crashdump_segments_ppc64()
853 kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem); in load_crashdump_segments_ppc64()
855 /* Load elfcorehdr segment - to export crashing kernel's vmcore */ in load_crashdump_segments_ppc64()
862 image->elf_load_addr, kbuf->bufsz, kbuf->memsz); in load_crashdump_segments_ppc64()
868 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
891 if (image->type == KEXEC_TYPE_CRASH) { in setup_purgatory_ppc64()
908 &image->arch.backup_start, in setup_purgatory_ppc64()
909 sizeof(image->arch.backup_start), in setup_purgatory_ppc64()
919 of_property_read_u64(dn, "opal-base-address", &val); in setup_purgatory_ppc64()
925 of_property_read_u64(dn, "opal-entry-address", &val); in setup_purgatory_ppc64()
937 * cpu_node_size - Compute the size of a CPU node in the FDT.
965 size += strlen(dn->name) + 5; in cpu_node_size()
967 size += strlen(pp->name); in cpu_node_size()
968 size += pp->length; in cpu_node_size()
976 * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
993 if (image->type != KEXEC_TYPE_CRASH) in kexec_extra_fdt_size_ppc64()
997 * For kdump kernel, account for linux,usable-memory and in kexec_extra_fdt_size_ppc64()
998 * linux,drconf-usable-memory properties. Get an approximate on the in kexec_extra_fdt_size_ppc64()
1017 extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size(); in kexec_extra_fdt_size_ppc64()
1023 * add_node_props - Reads node properties from device node structure and add
1037 return -EINVAL; in add_node_props()
1040 ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length); in add_node_props()
1042 pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret)); in add_node_props()
1050 * update_cpus_node - Update cpus node of flattened device tree using of_root
1062 if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) { in update_cpus_node()
1072 return -EINVAL; in update_cpus_node()
1080 return -EINVAL; in update_cpus_node()
1092 cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name); in update_cpus_node()
1094 pr_err("Unable to add %s subnode: %s\n", dn->full_name, in update_cpus_node()
1123 return -FDT_ERR_NOTFOUND; in copy_property()
1140 ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window"); in update_pci_dma_nodes()
1156 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
1175 * Restrict memory usage for kdump kernel by setting up in setup_new_fdt_ppc64()
1178 if (image->type == KEXEC_TYPE_CRASH) { in setup_new_fdt_ppc64()
1185 pr_err("Error setting up usable-memory property for kdump kernel\n"); in setup_new_fdt_ppc64()
1191 * first 64K of RAM, which will be backed up. in setup_new_fdt_ppc64()
1194 crashk_res.start - BACKUP_SRC_SIZE); in setup_new_fdt_ppc64()
1202 ret = fdt_add_mem_rsv(fdt, image->arch.backup_start, in setup_new_fdt_ppc64()
1229 nr_ranges = rmem ? rmem->nr_ranges : 0; in setup_new_fdt_ppc64()
1233 base = rmem->ranges[i].start; in setup_new_fdt_ppc64()
1234 size = rmem->ranges[i].end - base + 1; in setup_new_fdt_ppc64()
1254 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1255 * tce-table, reserved-ranges & such (exclude
1257 * segment buffer. Sets kbuf->mem when a suitable
1261 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1271 /* Look up the exclude ranges list while locating the memory hole */ in arch_kexec_locate_mem_hole()
1272 emem = &(kbuf->image->arch.exclude_ranges); in arch_kexec_locate_mem_hole()
1273 if (!(*emem) || ((*emem)->nr_ranges == 0)) { in arch_kexec_locate_mem_hole()
1278 buf_min = kbuf->buf_min; in arch_kexec_locate_mem_hole()
1279 buf_max = kbuf->buf_max; in arch_kexec_locate_mem_hole()
1281 if (kbuf->image->type == KEXEC_TYPE_CRASH) { in arch_kexec_locate_mem_hole()
1282 buf_min = (buf_min < crashk_res.start ? in arch_kexec_locate_mem_hole()
1283 crashk_res.start : buf_min); in arch_kexec_locate_mem_hole()
1290 return -EINVAL; in arch_kexec_locate_mem_hole()
1293 if (kbuf->top_down) in arch_kexec_locate_mem_hole()
1302 add_mem_range(emem, kbuf->mem, kbuf->memsz); in arch_kexec_locate_mem_hole()
1306 kbuf->memsz); in arch_kexec_locate_mem_hole()
1312 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1325 /* Get exclude memory ranges needed for setting up kexec segments */ in arch_kexec_kernel_image_probe()
1326 ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); in arch_kexec_kernel_image_probe()
1336 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1344 kfree(image->arch.exclude_ranges); in arch_kimage_file_post_load_cleanup()
1345 image->arch.exclude_ranges = NULL; in arch_kimage_file_post_load_cleanup()
1347 vfree(image->arch.backup_buf); in arch_kimage_file_post_load_cleanup()
1348 image->arch.backup_buf = NULL; in arch_kimage_file_post_load_cleanup()
1350 vfree(image->elf_headers); in arch_kimage_file_post_load_cleanup()
1351 image->elf_headers = NULL; in arch_kimage_file_post_load_cleanup()
1352 image->elf_headers_sz = 0; in arch_kimage_file_post_load_cleanup()
1354 kvfree(image->arch.fdt); in arch_kimage_file_post_load_cleanup()
1355 image->arch.fdt = NULL; in arch_kimage_file_post_load_cleanup()