Lines Matching +full:start +full:- +full:up
1 // SPDX-License-Identifier: GPL-2.0-only
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
26 #include <asm/crashdump-ppc64.h>
29 u64 *buf; /* data buffer for usable-memory property */
34 /* usable memory ranges to look up */
45 * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
46 * regions like opal/rtas, tce-table, initrd,
48 * setting up kexec load segments.
94 * get_usable_memory_ranges - Get usable memory ranges. This list includes
95 * regions like crashkernel, opal/rtas & tce-table,
108 * instead of [crashk_res.start, crashk_res.end] to workaround it. in get_usable_memory_ranges()
132 * get_crash_memory_ranges - Get crash memory ranges. This list includes
147 u64 size = end - base; in get_crash_memory_ranges()
153 size -= BACKUP_SRC_SIZE; in get_crash_memory_ranges()
163 if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) in get_crash_memory_ranges()
169 if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { in get_crash_memory_ranges()
176 ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); in get_crash_memory_ranges()
181 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL in get_crash_memory_ranges()
183 * crash, they should actually be backed up just like the in get_crash_memory_ranges()
207 * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
235 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
237 * for the buffer. If found, sets kbuf->mem.
247 int ret = -EADDRNOTAVAIL; in __locate_mem_hole_top_down()
248 phys_addr_t start, end; in __locate_mem_hole_top_down() local
251 for_each_mem_range_rev(i, &start, &end) { in __locate_mem_hole_top_down()
253 * memblock uses [start, end) convention while it is in __locate_mem_hole_top_down()
254 * [start, end] here. Fix the off-by-one to have the in __locate_mem_hole_top_down()
257 end -= 1; in __locate_mem_hole_top_down()
259 if (start > buf_max) in __locate_mem_hole_top_down()
267 if (start < buf_min) in __locate_mem_hole_top_down()
268 start = buf_min; in __locate_mem_hole_top_down()
272 start = ALIGN(start, kbuf->buf_align); in __locate_mem_hole_top_down()
273 if (start < end && (end - start + 1) >= kbuf->memsz) { in __locate_mem_hole_top_down()
274 /* Suitable memory range found. Set kbuf->mem */ in __locate_mem_hole_top_down()
275 kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1, in __locate_mem_hole_top_down()
276 kbuf->buf_align); in __locate_mem_hole_top_down()
286 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
299 int i, ret = 0, err = -EADDRNOTAVAIL; in locate_mem_hole_top_down_ppc64()
300 u64 start, end, tmin, tmax; in locate_mem_hole_top_down_ppc64() local
303 for (i = (emem->nr_ranges - 1); i >= 0; i--) { in locate_mem_hole_top_down_ppc64()
304 start = emem->ranges[i].start; in locate_mem_hole_top_down_ppc64()
305 end = emem->ranges[i].end; in locate_mem_hole_top_down_ppc64()
307 if (start > tmax) in locate_mem_hole_top_down_ppc64()
317 tmax = start - 1; in locate_mem_hole_top_down_ppc64()
334 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
336 * for the buffer. If found, sets kbuf->mem.
346 int ret = -EADDRNOTAVAIL; in __locate_mem_hole_bottom_up()
347 phys_addr_t start, end; in __locate_mem_hole_bottom_up() local
350 for_each_mem_range(i, &start, &end) { in __locate_mem_hole_bottom_up()
352 * memblock uses [start, end) convention while it is in __locate_mem_hole_bottom_up()
353 * [start, end] here. Fix the off-by-one to have the in __locate_mem_hole_bottom_up()
356 end -= 1; in __locate_mem_hole_bottom_up()
362 if (start > buf_max) in __locate_mem_hole_bottom_up()
366 if (start < buf_min) in __locate_mem_hole_bottom_up()
367 start = buf_min; in __locate_mem_hole_bottom_up()
371 start = ALIGN(start, kbuf->buf_align); in __locate_mem_hole_bottom_up()
372 if (start < end && (end - start + 1) >= kbuf->memsz) { in __locate_mem_hole_bottom_up()
373 /* Suitable memory range found. Set kbuf->mem */ in __locate_mem_hole_bottom_up()
374 kbuf->mem = start; in __locate_mem_hole_bottom_up()
384 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
385 * suitable buffer with bottom up approach.
397 int i, ret = 0, err = -EADDRNOTAVAIL; in locate_mem_hole_bottom_up_ppc64()
398 u64 start, end, tmin, tmax; in locate_mem_hole_bottom_up_ppc64() local
401 for (i = 0; i < emem->nr_ranges; i++) { in locate_mem_hole_bottom_up_ppc64()
402 start = emem->ranges[i].start; in locate_mem_hole_bottom_up_ppc64()
403 end = emem->ranges[i].end; in locate_mem_hole_bottom_up_ppc64()
408 if (start > tmin) { in locate_mem_hole_bottom_up_ppc64()
409 tmax = (start > buf_max ? buf_max : start - 1); in locate_mem_hole_bottom_up_ppc64()
432 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
436 * Frees up the old buffer if memory reallocation fails.
445 if ((um_info->idx + cnt) <= um_info->max_entries) in check_realloc_usable_mem()
446 return um_info->buf; in check_realloc_usable_mem()
448 new_size = um_info->size + MEM_RANGE_CHUNK_SZ; in check_realloc_usable_mem()
449 tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL); in check_realloc_usable_mem()
451 um_info->buf = tbuf; in check_realloc_usable_mem()
452 um_info->size = new_size; in check_realloc_usable_mem()
453 um_info->max_entries = (um_info->size / sizeof(u64)); in check_realloc_usable_mem()
460 * add_usable_mem - Add the usable memory ranges within the given memory range
474 for (i = 0; i < um_info->nr_ranges; i++) { in add_usable_mem()
476 loc_base = um_info->ranges[i].start; in add_usable_mem()
477 loc_end = um_info->ranges[i].end; in add_usable_mem()
490 return -ENOMEM; in add_usable_mem()
492 um_info->buf[um_info->idx++] = cpu_to_be64(loc_base); in add_usable_mem()
493 um_info->buf[um_info->idx++] = in add_usable_mem()
494 cpu_to_be64(loc_end - loc_base + 1); in add_usable_mem()
502 * kdump_setup_usable_lmb - This is a callback function that gets called by
506 * @usm: linux,drconf-usable-memory property value.
520 * linux,drconf-usable-memory property. in kdump_setup_usable_lmb()
523 pr_err("linux,drconf-usable-memory property already exists!"); in kdump_setup_usable_lmb()
524 return -EINVAL; in kdump_setup_usable_lmb()
528 tmp_idx = um_info->idx; in kdump_setup_usable_lmb()
530 return -ENOMEM; in kdump_setup_usable_lmb()
532 um_info->idx++; in kdump_setup_usable_lmb()
533 base = lmb->base_addr; in kdump_setup_usable_lmb()
534 end = base + drmem_lmb_size() - 1; in kdump_setup_usable_lmb()
541 um_info->buf[tmp_idx] = in kdump_setup_usable_lmb()
542 cpu_to_be64((um_info->idx - tmp_idx - 1) / 2); in kdump_setup_usable_lmb()
550 * add_usable_mem_property - Add usable memory property for the given
569 if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) { in add_usable_mem_property()
572 return -EOVERFLOW; in add_usable_mem_property()
580 ret = -EINVAL; in add_usable_mem_property()
590 um_info->idx = 0; in add_usable_mem_property()
592 ret = -ENOMEM; in add_usable_mem_property()
611 end = base + of_read_number(prop, n_mem_size_cells) - 1; in add_usable_mem_property()
621 * Write (0,0) tuple in linux,usable-memory property for in add_usable_mem_property()
624 if (um_info->idx == 0) { in add_usable_mem_property()
625 um_info->buf[0] = 0; in add_usable_mem_property()
626 um_info->buf[1] = 0; in add_usable_mem_property()
627 um_info->idx = 2; in add_usable_mem_property()
630 ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf, in add_usable_mem_property()
631 (um_info->idx * sizeof(u64))); in add_usable_mem_property()
640 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
641 * and linux,drconf-usable-memory DT properties as
656 return -ENOENT; in update_usable_mem_fdt()
659 node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory"); in update_usable_mem_fdt()
660 if (node == -FDT_ERR_NOTFOUND) in update_usable_mem_fdt()
663 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n"); in update_usable_mem_fdt()
664 return -EINVAL; in update_usable_mem_fdt()
671 /* Memory ranges to look up */ in update_usable_mem_fdt()
672 um_info.ranges = &(usable_mem->ranges[0]); in update_usable_mem_fdt()
673 um_info.nr_ranges = usable_mem->nr_ranges; in update_usable_mem_fdt()
675 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); in update_usable_mem_fdt()
681 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n"); in update_usable_mem_fdt()
685 ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory", in update_usable_mem_fdt()
688 pr_err("Failed to update fdt with linux,drconf-usable-memory property"); in update_usable_mem_fdt()
694 * Walk through each memory node and set linux,usable-memory property in update_usable_mem_fdt()
700 pr_err("Failed to set linux,usable-memory property for %s node", in update_usable_mem_fdt()
701 dn->full_name); in update_usable_mem_fdt()
712 * load_backup_segment - Locate a memory hole to place the backup region.
733 return -ENOMEM; in load_backup_segment()
735 kbuf->buffer = buf; in load_backup_segment()
736 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; in load_backup_segment()
737 kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE; in load_backup_segment()
738 kbuf->top_down = false; in load_backup_segment()
746 image->arch.backup_buf = buf; in load_backup_segment()
747 image->arch.backup_start = kbuf->mem; in load_backup_segment()
752 * update_backup_region_phdr - Update backup region's offset for the core to
768 for (i = 0; i < ehdr->e_phnum; i++) { in update_backup_region_phdr()
769 if (phdr->p_paddr == BACKUP_SRC_START) { in update_backup_region_phdr()
770 phdr->p_offset = image->arch.backup_start; in update_backup_region_phdr()
772 image->arch.backup_start); in update_backup_region_phdr()
779 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
807 kbuf->buffer = headers; in load_elfcorehdr_segment()
808 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; in load_elfcorehdr_segment()
809 kbuf->bufsz = kbuf->memsz = headers_sz; in load_elfcorehdr_segment()
810 kbuf->top_down = false; in load_elfcorehdr_segment()
818 image->arch.elfcorehdr_addr = kbuf->mem; in load_elfcorehdr_segment()
819 image->arch.elf_headers_sz = headers_sz; in load_elfcorehdr_segment()
820 image->arch.elf_headers = headers; in load_elfcorehdr_segment()
827 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
839 /* Load backup segment - first 64K bytes of the crashing kernel */ in load_crashdump_segments_ppc64()
845 pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem); in load_crashdump_segments_ppc64()
847 /* Load elfcorehdr segment - to export crashing kernel's vmcore */ in load_crashdump_segments_ppc64()
854 image->arch.elfcorehdr_addr, kbuf->bufsz, kbuf->memsz); in load_crashdump_segments_ppc64()
860 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
883 if (image->type == KEXEC_TYPE_CRASH) { in setup_purgatory_ppc64()
900 &image->arch.backup_start, in setup_purgatory_ppc64()
901 sizeof(image->arch.backup_start), in setup_purgatory_ppc64()
911 of_property_read_u64(dn, "opal-base-address", &val); in setup_purgatory_ppc64()
917 of_property_read_u64(dn, "opal-entry-address", &val); in setup_purgatory_ppc64()
929 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
952 * Restrict memory usage for kdump kernel by setting up in setup_new_fdt_ppc64()
955 if (image->type == KEXEC_TYPE_CRASH) { in setup_new_fdt_ppc64()
962 pr_err("Error setting up usable-memory property for kdump kernel\n"); in setup_new_fdt_ppc64()
968 * first 64K of RAM, which will be backed up. in setup_new_fdt_ppc64()
971 crashk_res.start - BACKUP_SRC_SIZE); in setup_new_fdt_ppc64()
979 ret = fdt_add_mem_rsv(fdt, image->arch.backup_start, in setup_new_fdt_ppc64()
993 nr_ranges = rmem ? rmem->nr_ranges : 0; in setup_new_fdt_ppc64()
997 base = rmem->ranges[i].start; in setup_new_fdt_ppc64()
998 size = rmem->ranges[i].end - base + 1; in setup_new_fdt_ppc64()
1014 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1015 * tce-table, reserved-ranges & such (exclude
1017 * segment buffer. Sets kbuf->mem when a suitable
1021 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1031 /* Look up the exclude ranges list while locating the memory hole */ in arch_kexec_locate_mem_hole()
1032 emem = &(kbuf->image->arch.exclude_ranges); in arch_kexec_locate_mem_hole()
1033 if (!(*emem) || ((*emem)->nr_ranges == 0)) { in arch_kexec_locate_mem_hole()
1038 buf_min = kbuf->buf_min; in arch_kexec_locate_mem_hole()
1039 buf_max = kbuf->buf_max; in arch_kexec_locate_mem_hole()
1041 if (kbuf->image->type == KEXEC_TYPE_CRASH) { in arch_kexec_locate_mem_hole()
1042 buf_min = (buf_min < crashk_res.start ? in arch_kexec_locate_mem_hole()
1043 crashk_res.start : buf_min); in arch_kexec_locate_mem_hole()
1050 return -EINVAL; in arch_kexec_locate_mem_hole()
1053 if (kbuf->top_down) in arch_kexec_locate_mem_hole()
1062 add_mem_range(emem, kbuf->mem, kbuf->memsz); in arch_kexec_locate_mem_hole()
1066 kbuf->memsz); in arch_kexec_locate_mem_hole()
1072 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1085 /* Get exclude memory ranges needed for setting up kexec segments */ in arch_kexec_kernel_image_probe()
1086 ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges)); in arch_kexec_kernel_image_probe()
1096 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1104 kfree(image->arch.exclude_ranges); in arch_kimage_file_post_load_cleanup()
1105 image->arch.exclude_ranges = NULL; in arch_kimage_file_post_load_cleanup()
1107 vfree(image->arch.backup_buf); in arch_kimage_file_post_load_cleanup()
1108 image->arch.backup_buf = NULL; in arch_kimage_file_post_load_cleanup()
1110 vfree(image->arch.elf_headers); in arch_kimage_file_post_load_cleanup()
1111 image->arch.elf_headers = NULL; in arch_kimage_file_post_load_cleanup()
1112 image->arch.elf_headers_sz = 0; in arch_kimage_file_post_load_cleanup()