Lines Matching +full:long +full:- +full:ram +full:- +full:code

1 // SPDX-License-Identifier: GPL-2.0-only
10 * allocation code routines via a platform independent interface (memblock, etc.).
16 #include <linux/firmware-map.h>
26 * - 'e820_table_firmware': the original firmware version passed to us by the
27 * bootloader - not modified by the kernel. It is composed of two parts:
31 * - inform the user about the firmware's notion of memory layout
34 * - the hibernation code uses it to generate a kernel-independent CRC32
37 * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
38 * passed to us by the bootloader - the major difference between
45 * - kexec, which is a bootloader in disguise, uses the original E820
46 * layout to pass to the kexec-ed kernel. This way the original kernel
47 * can have a restricted E820 map while the kexec()-ed kexec-kernel
48 * can have access to full memory - etc.
50 * - 'e820_table': this is the main E820 table that is massaged by the
51 * low level x86 platform code, or modified by boot parameters, before
55 * information its role stops - modifying it has no effect and does not get
56 * re-propagated. So its main role is a temporary bootstrap storage of firmware
67 /* For PCI or other memory-mapped resources */
68 unsigned long pci_mem_start = 0xaeedbabe;
82 for (i = 0; i < table->nr_entries; i++) { in _e820__mapped_any()
83 struct e820_entry *entry = &table->entries[i]; in _e820__mapped_any()
85 if (type && entry->type != type) in _e820__mapped_any()
87 if (entry->addr >= end || entry->addr + entry->size <= start) in _e820__mapped_any()
110 * not-overlapping (at least for the range specified), which is the case normally.
117 for (i = 0; i < e820_table->nr_entries; i++) { in __e820__mapped_all()
118 struct e820_entry *entry = &e820_table->entries[i]; in __e820__mapped_all()
120 if (type && entry->type != type) in __e820__mapped_all()
124 if (entry->addr >= end || entry->addr + entry->size <= start) in __e820__mapped_all()
131 if (entry->addr <= start) in __e820__mapped_all()
132 start = entry->addr + entry->size; in __e820__mapped_all()
160 return entry ? entry->type : -EINVAL; in e820__get_entry_type()
168 int x = table->nr_entries; in __e820__range_add()
170 if (x >= ARRAY_SIZE(table->entries)) { in __e820__range_add()
171 pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n", in __e820__range_add()
172 start, start + size - 1); in __e820__range_add()
176 table->entries[x].addr = start; in __e820__range_add()
177 table->entries[x].size = size; in __e820__range_add()
178 table->entries[x].type = type; in __e820__range_add()
179 table->nr_entries++; in __e820__range_add()
207 for (i = 0; i < e820_table->nr_entries; i++) { in e820__print_table()
208 pr_info("%s: [mem %#018Lx-%#018Lx] ", in e820__print_table()
210 e820_table->entries[i].addr, in e820__print_table()
211 e820_table->entries[i].addr + e820_table->entries[i].size - 1); in e820__print_table()
213 e820_print_type(e820_table->entries[i].type); in e820__print_table()
238 * successfully 'sanitized' the map entries passed in, and is -1
283 unsigned long long addr;
302 if (ap->addr != bp->addr) in cpcompare()
303 return ap->addr > bp->addr ? 1 : -1; in cpcompare()
305 return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr); in cpcompare()
324 struct e820_entry *entries = table->entries; in e820__update_table()
325 u32 max_nr_entries = ARRAY_SIZE(table->entries); in e820__update_table()
327 unsigned long long last_addr; in e820__update_table()
332 if (table->nr_entries < 2) in e820__update_table()
333 return -1; in e820__update_table()
335 BUG_ON(table->nr_entries > max_nr_entries); in e820__update_table()
338 for (i = 0; i < table->nr_entries; i++) { in e820__update_table()
340 return -1; in e820__update_table()
343 /* Create pointers for initial change-point information (for sorting): */ in e820__update_table()
344 for (i = 0; i < 2 * table->nr_entries; i++) in e820__update_table()
348 * Record all known change-points (starting and ending addresses), in e820__update_table()
352 for (i = 0; i < table->nr_entries; i++) { in e820__update_table()
354 change_point[chg_idx]->addr = entries[i].addr; in e820__update_table()
355 change_point[chg_idx++]->entry = &entries[i]; in e820__update_table()
356 change_point[chg_idx]->addr = entries[i].addr + entries[i].size; in e820__update_table()
357 change_point[chg_idx++]->entry = &entries[i]; in e820__update_table()
362 /* Sort change-point list by memory addresses (low -> high): */ in e820__update_table()
371 /* Loop through change-points, determining effect on the new map: */ in e820__update_table()
374 if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) { in e820__update_table()
376 overlap_list[overlap_entries++] = change_point[chg_idx]->entry; in e820__update_table()
380 if (overlap_list[i] == change_point[chg_idx]->entry) in e820__update_table()
381 overlap_list[i] = overlap_list[overlap_entries-1]; in e820__update_table()
383 overlap_entries--; in e820__update_table()
387 * "type" to use (larger value takes precedence -- in e820__update_table()
392 if (overlap_list[i]->type > current_type) in e820__update_table()
393 current_type = overlap_list[i]->type; in e820__update_table()
399 new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr; in e820__update_table()
400 /* Move forward only if the new size was non-zero: */ in e820__update_table()
407 new_entries[new_nr_entries].addr = change_point[chg_idx]->addr; in e820__update_table()
409 last_addr = change_point[chg_idx]->addr; in e820__update_table()
417 table->nr_entries = new_nr_entries; in e820__update_table()
427 u64 start = entry->addr; in __append_e820_table()
428 u64 size = entry->size; in __append_e820_table()
429 u64 end = start + size - 1; in __append_e820_table()
430 u32 type = entry->type; in __append_e820_table()
432 /* Ignore the entry on 64-bit overflow: */ in __append_e820_table()
434 return -1; in __append_e820_table()
439 nr_entries--; in __append_e820_table()
447 * Sanity-check it while we're at it..
449 * If we're lucky and live on a modern system, the setup code
457 return -1; in append_e820_table()
471 if (size > (ULLONG_MAX - start)) in __e820__range_update()
472 size = ULLONG_MAX - start; in __e820__range_update()
475 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", start, end - 1); in __e820__range_update()
481 for (i = 0; i < table->nr_entries; i++) { in __e820__range_update()
482 struct e820_entry *entry = &table->entries[i]; in __e820__range_update()
486 if (entry->type != old_type) in __e820__range_update()
489 entry_end = entry->addr + entry->size; in __e820__range_update()
492 if (entry->addr >= start && entry_end <= end) { in __e820__range_update()
493 entry->type = new_type; in __e820__range_update()
494 real_updated_size += entry->size; in __e820__range_update()
499 if (entry->addr < start && entry_end > end) { in __e820__range_update()
501 __e820__range_add(table, end, entry_end - end, entry->type); in __e820__range_update()
502 entry->size = start - entry->addr; in __e820__range_update()
508 final_start = max(start, entry->addr); in __e820__range_update()
513 __e820__range_add(table, final_start, final_end - final_start, new_type); in __e820__range_update()
515 real_updated_size += final_end - final_start; in __e820__range_update()
521 entry->size -= final_end - final_start; in __e820__range_update()
522 if (entry->addr < final_start) in __e820__range_update()
525 entry->addr = final_end; in __e820__range_update()
547 if (size > (ULLONG_MAX - start)) in e820__range_remove()
548 size = ULLONG_MAX - start; in e820__range_remove()
551 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", start, end - 1); in e820__range_remove()
556 for (i = 0; i < e820_table->nr_entries; i++) { in e820__range_remove()
557 struct e820_entry *entry = &e820_table->entries[i]; in e820__range_remove()
561 if (check_type && entry->type != old_type) in e820__range_remove()
564 entry_end = entry->addr + entry->size; in e820__range_remove()
567 if (entry->addr >= start && entry_end <= end) { in e820__range_remove()
568 real_removed_size += entry->size; in e820__range_remove()
574 if (entry->addr < start && entry_end > end) { in e820__range_remove()
575 e820__range_add(end, entry_end - end, entry->type); in e820__range_remove()
576 entry->size = start - entry->addr; in e820__range_remove()
582 final_start = max(start, entry->addr); in e820__range_remove()
587 real_removed_size += final_end - final_start; in e820__range_remove()
593 entry->size -= final_end - final_start; in e820__range_remove()
594 if (entry->addr < final_start) in e820__range_remove()
597 entry->addr = final_end; in e820__range_remove()
607 pr_info("modified physical RAM map:\n"); in e820__update_table_print()
621 static int __init e820_search_gap(unsigned long *gapstart, unsigned long *gapsize) in e820_search_gap()
623 unsigned long long last = MAX_GAP_END; in e820_search_gap()
624 int i = e820_table->nr_entries; in e820_search_gap()
627 while (--i >= 0) { in e820_search_gap()
628 unsigned long long start = e820_table->entries[i].addr; in e820_search_gap()
629 unsigned long long end = start + e820_table->entries[i].size; in e820_search_gap()
636 unsigned long gap = last - end; in e820_search_gap()
660 unsigned long gapstart, gapsize; in e820__setup_pci_gap()
669 pr_err("Cannot find an available gap in the 32-bit address range\n"); in e820__setup_pci_gap()
670 pr_err("PCI devices with unassigned 32-bit BARs may not work!\n"); in e820__setup_pci_gap()
677 * e820__reserve_resources_late() protects stolen RAM already: in e820__setup_pci_gap()
681 pr_info("[mem %#010lx-%#010lx] available for PCI devices\n", in e820__setup_pci_gap()
682 gapstart, gapstart + gapsize - 1); in e820__setup_pci_gap()
702 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries; in e820__reallocate_tables()
707 …size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entri… in e820__reallocate_tables()
712 …size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_en… in e820__reallocate_tables()
731 entries = sdata->len / sizeof(*extmap); in e820__memory_setup_extended()
732 extmap = (struct boot_e820_entry *)(sdata->data); in e820__memory_setup_extended()
741 pr_info("extended physical RAM map:\n"); in e820__memory_setup_extended()
747 * E820 RAM areas and register the corresponding pages as 'nosave' for
748 * hibernation (32-bit) or software suspend and suspend to RAM (64-bit).
753 void __init e820__register_nosave_regions(unsigned long limit_pfn) in e820__register_nosave_regions()
756 unsigned long pfn = 0; in e820__register_nosave_regions()
758 for (i = 0; i < e820_table->nr_entries; i++) { in e820__register_nosave_regions()
759 struct e820_entry *entry = &e820_table->entries[i]; in e820__register_nosave_regions()
761 if (pfn < PFN_UP(entry->addr)) in e820__register_nosave_regions()
762 register_nosave_region(pfn, PFN_UP(entry->addr)); in e820__register_nosave_regions()
764 pfn = PFN_DOWN(entry->addr + entry->size); in e820__register_nosave_regions()
766 if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) in e820__register_nosave_regions()
767 register_nosave_region(PFN_UP(entry->addr), pfn); in e820__register_nosave_regions()
783 for (i = 0; i < e820_table->nr_entries; i++) { in e820__register_nvs_regions()
784 struct e820_entry *entry = &e820_table->entries[i]; in e820__register_nvs_regions()
786 if (entry->type == E820_TYPE_NVS) in e820__register_nvs_regions()
787 acpi_nvs_register(entry->addr, entry->size); in e820__register_nvs_regions()
819 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
821 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
830 static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type) in e820_end_pfn()
833 unsigned long last_pfn = 0; in e820_end_pfn()
834 unsigned long max_arch_pfn = MAX_ARCH_PFN; in e820_end_pfn()
836 for (i = 0; i < e820_table->nr_entries; i++) { in e820_end_pfn()
837 struct e820_entry *entry = &e820_table->entries[i]; in e820_end_pfn()
838 unsigned long start_pfn; in e820_end_pfn()
839 unsigned long end_pfn; in e820_end_pfn()
841 if (entry->type != type) in e820_end_pfn()
844 start_pfn = entry->addr >> PAGE_SHIFT; in e820_end_pfn()
845 end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT; in e820_end_pfn()
865 unsigned long __init e820__end_of_ram_pfn(void) in e820__end_of_ram_pfn()
870 unsigned long __init e820__end_of_low_ram_pfn(void) in e820__end_of_low_ram_pfn()
872 return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_TYPE_RAM); in e820__end_of_low_ram_pfn()
883 /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */
889 return -EINVAL; in parse_memopt()
897 return -EINVAL; in parse_memopt()
906 return -EINVAL; in parse_memopt()
908 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); in parse_memopt()
924 return -EINVAL; in parse_memmap_one()
927 e820_table->nr_entries = 0; in parse_memmap_one()
935 return -EINVAL; in parse_memmap_one()
954 if (*p == '-') in parse_memmap_one()
959 return -EINVAL; in parse_memmap_one()
969 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); in parse_memmap_one()
972 return *p == '\0' ? 0 : -EINVAL; in parse_memmap_one()
1015 pa_next = data->next; in e820__reserve_setup_data()
1017 e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); in e820__reserve_setup_data()
1023 if (data->type != SETUP_EFI && in e820__reserve_setup_data()
1024 data->type != SETUP_IMA && in e820__reserve_setup_data()
1025 data->type != SETUP_RNG_SEED) in e820__reserve_setup_data()
1027 sizeof(*data) + data->len, in e820__reserve_setup_data()
1030 if (data->type == SETUP_INDIRECT) { in e820__reserve_setup_data()
1031 len += data->len; in e820__reserve_setup_data()
1039 indirect = (struct setup_indirect *)data->data; in e820__reserve_setup_data()
1041 if (indirect->type != SETUP_INDIRECT) { in e820__reserve_setup_data()
1042 e820__range_update(indirect->addr, indirect->len, in e820__reserve_setup_data()
1044 e820__range_update_kexec(indirect->addr, indirect->len, in e820__reserve_setup_data()
1056 pr_info("extended physical RAM map:\n"); in e820__reserve_setup_data()
1071 pr_info("user-defined physical RAM map:\n"); in e820__finish_early_params()
1078 switch (entry->type) { in e820_type_to_string()
1079 case E820_TYPE_RESERVED_KERN: /* Fall-through: */ in e820_type_to_string()
1080 case E820_TYPE_RAM: return "System RAM"; in e820_type_to_string()
1082 case E820_TYPE_NVS: return "ACPI Non-volatile Storage"; in e820_type_to_string()
1092 static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry) in e820_type_to_iomem_type()
1094 switch (entry->type) { in e820_type_to_iomem_type()
1095 case E820_TYPE_RESERVED_KERN: /* Fall-through: */ in e820_type_to_iomem_type()
1097 case E820_TYPE_ACPI: /* Fall-through: */ in e820_type_to_iomem_type()
1098 case E820_TYPE_NVS: /* Fall-through: */ in e820_type_to_iomem_type()
1099 case E820_TYPE_UNUSABLE: /* Fall-through: */ in e820_type_to_iomem_type()
1100 case E820_TYPE_PRAM: /* Fall-through: */ in e820_type_to_iomem_type()
1101 case E820_TYPE_PMEM: /* Fall-through: */ in e820_type_to_iomem_type()
1102 case E820_TYPE_RESERVED: /* Fall-through: */ in e820_type_to_iomem_type()
1103 case E820_TYPE_SOFT_RESERVED: /* Fall-through: */ in e820_type_to_iomem_type()
1108 static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry) in e820_type_to_iores_desc()
1110 switch (entry->type) { in e820_type_to_iores_desc()
1117 case E820_TYPE_RESERVED_KERN: /* Fall-through: */ in e820_type_to_iores_desc()
1118 case E820_TYPE_RAM: /* Fall-through: */ in e820_type_to_iores_desc()
1119 case E820_TYPE_UNUSABLE: /* Fall-through: */ in e820_type_to_iores_desc()
1126 /* this is the legacy bios/dos rom-shadow + mmio region */ in do_mark_busy()
1127 if (res->start < (1ULL<<20)) in do_mark_busy()
1162 res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, in e820__reserve_resources()
1166 sizeof(*res) * e820_table->nr_entries); in e820__reserve_resources()
1169 for (i = 0; i < e820_table->nr_entries; i++) { in e820__reserve_resources()
1170 struct e820_entry *entry = e820_table->entries + i; in e820__reserve_resources()
1172 end = entry->addr + entry->size - 1; in e820__reserve_resources()
1177 res->start = entry->addr; in e820__reserve_resources()
1178 res->end = end; in e820__reserve_resources()
1179 res->name = e820_type_to_string(entry); in e820__reserve_resources()
1180 res->flags = e820_type_to_iomem_type(entry); in e820__reserve_resources()
1181 res->desc = e820_type_to_iores_desc(entry); in e820__reserve_resources()
1188 if (do_mark_busy(entry->type, res)) { in e820__reserve_resources()
1189 res->flags |= IORESOURCE_BUSY; in e820__reserve_resources()
1195 /* Expose the bootloader-provided memory layout to the sysfs. */ in e820__reserve_resources()
1196 for (i = 0; i < e820_table_firmware->nr_entries; i++) { in e820__reserve_resources()
1197 struct e820_entry *entry = e820_table_firmware->entries + i; in e820__reserve_resources()
1199 firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry)); in e820__reserve_resources()
1204 * How much should we pad the end of RAM, depending on where it is?
1206 static unsigned long __init ram_alignment(resource_size_t pos) in ram_alignment()
1208 unsigned long mb = pos >> 20; in ram_alignment()
1222 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1230 for (i = 0; i < e820_table->nr_entries; i++) { in e820__reserve_resources_late()
1231 if (!res->parent && res->end) in e820__reserve_resources_late()
1237 * Try to bump up RAM regions to reasonable boundaries, to in e820__reserve_resources_late()
1238 * avoid stolen RAM: in e820__reserve_resources_late()
1240 for (i = 0; i < e820_table->nr_entries; i++) { in e820__reserve_resources_late()
1241 struct e820_entry *entry = &e820_table->entries[i]; in e820__reserve_resources_late()
1244 if (entry->type != E820_TYPE_RAM) in e820__reserve_resources_late()
1247 start = entry->addr + entry->size; in e820__reserve_resources_late()
1248 end = round_up(start, ram_alignment(start)) - 1; in e820__reserve_resources_late()
1254 printk(KERN_DEBUG "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", start, end); in e820__reserve_resources_late()
1255 reserve_region_with_split(&iomem_resource, start, end, "RAM buffer"); in e820__reserve_resources_late()
1264 char *who = "BIOS-e820"; in e820__memory_setup_default()
1267 * Try to copy the BIOS-supplied E820-map. in e820__memory_setup_default()
1269 * Otherwise fake a memory map; one section from 0k->640k, in e820__memory_setup_default()
1270 * the next section from 1mb->appropriate_mem_k in e820__memory_setup_default()
1275 /* Compare results from other methods and take the one that gives more RAM: */ in e820__memory_setup_default()
1278 who = "BIOS-88"; in e820__memory_setup_default()
1281 who = "BIOS-e801"; in e820__memory_setup_default()
1284 e820_table->nr_entries = 0; in e820__memory_setup_default()
1297 * E820 map - with an optional platform quirk available for virtual platforms
1304 /* This is a firmware interface ABI - make sure we don't break it: */ in e820__memory_setup()
1312 pr_info("BIOS-provided physical RAM map:\n"); in e820__memory_setup()
1324 * than that - so allow memblock resizing. in e820__memblock_setup()
1332 for (i = 0; i < e820_table->nr_entries; i++) { in e820__memblock_setup()
1333 struct e820_entry *entry = &e820_table->entries[i]; in e820__memblock_setup()
1335 end = entry->addr + entry->size; in e820__memblock_setup()
1339 if (entry->type == E820_TYPE_SOFT_RESERVED) in e820__memblock_setup()
1340 memblock_reserve(entry->addr, entry->size); in e820__memblock_setup()
1342 if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) in e820__memblock_setup()
1345 memblock_add(entry->addr, entry->size); in e820__memblock_setup()