Lines Matching full:region

157 			 * memmap=nn@ss specifies usable region, should  in parse_memmap()
183 * system can use. Region above the limit should be avoided. in parse_memmap()
395 * Avoid the region that is unsafe to overlap during in mem_avoid_init()
434 * overlap region with the lowest address.
496 static void store_slot_info(struct mem_vector *region, unsigned long image_size) in store_slot_info() argument
503 slot_area.addr = region->start; in store_slot_info()
504 slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN; in store_slot_info()
511 * Skip as many 1GB huge pages as possible in the passed region
515 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size) in process_gb_huge_pages() argument
522 store_slot_info(region, image_size); in process_gb_huge_pages()
526 /* Are there any 1GB pages in the region? */ in process_gb_huge_pages()
527 pud_start = ALIGN(region->start, PUD_SIZE); in process_gb_huge_pages()
528 pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE); in process_gb_huge_pages()
532 store_slot_info(region, image_size); in process_gb_huge_pages()
536 /* Check if the head part of the region is usable. */ in process_gb_huge_pages()
537 if (pud_start >= region->start + image_size) { in process_gb_huge_pages()
538 tmp.start = region->start; in process_gb_huge_pages()
539 tmp.size = pud_start - region->start; in process_gb_huge_pages()
552 /* Check if the tail part of the region is usable. */ in process_gb_huge_pages()
553 if (region->start + region->size >= pud_end + image_size) { in process_gb_huge_pages()
555 tmp.size = region->start + region->size - pud_end; in process_gb_huge_pages()
588 struct mem_vector region, overlap; in __process_mem_region() local
592 region.start = max_t(u64, entry->start, minimum); in __process_mem_region()
598 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); in __process_mem_region()
601 if (region.start > region_end) in __process_mem_region()
605 region.size = region_end - region.start; in __process_mem_region()
607 /* Return if region can't contain decompressed kernel */ in __process_mem_region()
608 if (region.size < image_size) in __process_mem_region()
611 /* If nothing overlaps, store the region and return. */ in __process_mem_region()
612 if (!mem_avoid_overlap(&region, &overlap)) { in __process_mem_region()
613 process_gb_huge_pages(&region, image_size); in __process_mem_region()
617 /* Store beginning of region if holds at least image_size. */ in __process_mem_region()
618 if (overlap.start >= region.start + image_size) { in __process_mem_region()
619 region.size = overlap.start - region.start; in __process_mem_region()
620 process_gb_huge_pages(&region, image_size); in __process_mem_region()
623 /* Clip off the overlapping region and start over. */ in __process_mem_region()
624 region.start = overlap.start + overlap.size; in __process_mem_region()
628 static bool process_mem_region(struct mem_vector *region, in process_mem_region() argument
635 * use @region directly. in process_mem_region()
638 __process_mem_region(region, minimum, image_size); in process_mem_region()
650 * immovable memory and @region. in process_mem_region()
656 if (!mem_overlaps(region, &immovable_mem[i])) in process_mem_region()
661 region_end = region->start + region->size; in process_mem_region()
663 entry.start = clamp(region->start, start, end); in process_mem_region()
688 struct mem_vector region; in process_efi_entries() local
745 region.start = md->phys_addr; in process_efi_entries()
746 region.size = md->num_pages << EFI_PAGE_SHIFT; in process_efi_entries()
747 if (process_mem_region(&region, minimum, image_size)) in process_efi_entries()
764 struct mem_vector region; in process_e820_entries() local
773 region.start = entry->addr; in process_e820_entries()
774 region.size = entry->size; in process_e820_entries()
775 if (process_mem_region(&region, minimum, image_size)) in process_e820_entries()
865 warn("Physical KASLR disabled: no suitable memory region!"); in choose_random_location()