1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Low level x86 E820 memory map handling functions.
4 *
5 * The firmware and bootloader passes us the "E820 table", which is the primary
6 * physical memory layout description available about x86 systems.
7 *
8 * The kernel takes the E820 memory layout and optionally modifies it with
9 * quirks and other tweaks, and feeds that into the generic Linux memory
10 * allocation code routines via a platform independent interface (memblock, etc.).
11 */
12 #include <linux/memblock.h>
13 #include <linux/suspend.h>
14 #include <linux/acpi.h>
15 #include <linux/firmware-map.h>
16 #include <linux/sort.h>
17 #include <linux/kvm_types.h>
18
19 #include <asm/e820/api.h>
20 #include <asm/setup.h>
21
22 /*
23 * We organize the E820 table into three main data structures:
24 *
25 * - 'e820_table_firmware': the original firmware version passed to us by the
26 * bootloader - not modified by the kernel. It is composed of two parts:
27 * the first 128 E820 memory entries in boot_params.e820_table and the remaining
28 * (if any) entries of the SETUP_E820_EXT nodes. We use this to:
29 *
30 * - the hibernation code uses it to generate a kernel-independent CRC32
31 * checksum of the physical memory layout of a system.
32 *
33 * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
34 * passed to us by the bootloader - the major difference between
35 * e820_table_firmware[] and this one is that e820_table_kexec[]
36 * might be modified by the kexec itself to fake an mptable.
37 * We use this to:
38 *
39 * - kexec, which is a bootloader in disguise, uses the original E820
40 * layout to pass to the kexec-ed kernel. This way the original kernel
41 * can have a restricted E820 map while the kexec()-ed kexec-kernel
42 * can have access to full memory - etc.
43 *
44 * Export the memory layout via /sys/firmware/memmap. kexec-tools uses
45 * the entries to create an E820 table for the kexec kernel.
46 *
47 * kexec_file_load in-kernel code uses the table for the kexec kernel.
48 *
49 * - 'e820_table': this is the main E820 table that is massaged by the
50 * low level x86 platform code, or modified by boot parameters, before
51 * passed on to higher level MM layers.
52 *
53 * Once the E820 map has been converted to the standard Linux memory layout
54 * information its role stops - modifying it has no effect and does not get
55 * re-propagated. So its main role is a temporary bootstrap storage of firmware
56 * specific memory layout data during early bootup.
57 */
58 __initdata static struct e820_table e820_table_init;
59 __initdata static struct e820_table e820_table_kexec_init;
60 __initdata static struct e820_table e820_table_firmware_init;
61
62 __refdata struct e820_table *e820_table = &e820_table_init;
63 __refdata struct e820_table *e820_table_kexec = &e820_table_kexec_init;
64 __refdata struct e820_table *e820_table_firmware = &e820_table_firmware_init;
65
66 /* For PCI or other memory-mapped resources */
67 unsigned long pci_mem_start = 0xaeedbabe;
68 #ifdef CONFIG_PCI
69 EXPORT_SYMBOL(pci_mem_start);
70 #endif
71
72 /*
73 * This function checks if any part of the range <start,end> is mapped
74 * with type.
75 */
_e820__mapped_any(struct e820_table * table,u64 start,u64 end,enum e820_type type)76 static bool _e820__mapped_any(struct e820_table *table,
77 u64 start, u64 end, enum e820_type type)
78 {
79 u32 idx;
80
81 for (idx = 0; idx < table->nr_entries; idx++) {
82 struct e820_entry *entry = &table->entries[idx];
83
84 if (type && entry->type != type)
85 continue;
86 if (entry->addr >= end || entry->addr + entry->size <= start)
87 continue;
88 return true;
89 }
90 return false;
91 }
92
e820__mapped_raw_any(u64 start,u64 end,enum e820_type type)93 bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type)
94 {
95 return _e820__mapped_any(e820_table_firmware, start, end, type);
96 }
97 EXPORT_SYMBOL_FOR_KVM(e820__mapped_raw_any);
98
e820__mapped_any(u64 start,u64 end,enum e820_type type)99 bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
100 {
101 return _e820__mapped_any(e820_table, start, end, type);
102 }
103 EXPORT_SYMBOL_GPL(e820__mapped_any);
104
105 /*
106 * This function checks if the entire <start,end> range is mapped with 'type'.
107 *
108 * Note: this function only works correctly once the E820 table is sorted and
109 * not-overlapping (at least for the range specified), which is the case normally.
110 */
__e820__mapped_all(u64 start,u64 end,enum e820_type type)111 static struct e820_entry *__e820__mapped_all(u64 start, u64 end,
112 enum e820_type type)
113 {
114 u32 idx;
115
116 for (idx = 0; idx < e820_table->nr_entries; idx++) {
117 struct e820_entry *entry = &e820_table->entries[idx];
118
119 if (type && entry->type != type)
120 continue;
121
122 /* Is the region (part) in overlap with the current region? */
123 if (entry->addr >= end || entry->addr + entry->size <= start)
124 continue;
125
126 /*
127 * If the region is at the beginning of <start,end> we move
128 * 'start' to the end of the region since it's ok until there
129 */
130 if (entry->addr <= start)
131 start = entry->addr + entry->size;
132
133 /*
134 * If 'start' is now at or beyond 'end', we're done, full
135 * coverage of the desired range exists:
136 */
137 if (start >= end)
138 return entry;
139 }
140
141 return NULL;
142 }
143
144 /*
145 * This function checks if the entire range <start,end> is mapped with type.
146 */
e820__mapped_all(u64 start,u64 end,enum e820_type type)147 __init bool e820__mapped_all(u64 start, u64 end, enum e820_type type)
148 {
149 return __e820__mapped_all(start, end, type);
150 }
151
152 /*
153 * This function returns the type associated with the range <start,end>.
154 */
e820__get_entry_type(u64 start,u64 end)155 int e820__get_entry_type(u64 start, u64 end)
156 {
157 struct e820_entry *entry = __e820__mapped_all(start, end, 0);
158
159 return entry ? entry->type : -EINVAL;
160 }
161
162 /*
163 * Add a memory region to the kernel E820 map.
164 */
__e820__range_add(struct e820_table * table,u64 start,u64 size,enum e820_type type)165 __init static void __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type)
166 {
167 u32 idx = table->nr_entries;
168 struct e820_entry *entry_new;
169
170 if (idx >= ARRAY_SIZE(table->entries)) {
171 pr_err("E820 table full; ignoring [mem %#010llx-%#010llx]\n",
172 start, start + size-1);
173 return;
174 }
175
176 entry_new = table->entries + idx;
177
178 entry_new->addr = start;
179 entry_new->size = size;
180 entry_new->type = type;
181
182 table->nr_entries++;
183 }
184
e820__range_add(u64 start,u64 size,enum e820_type type)185 __init void e820__range_add(u64 start, u64 size, enum e820_type type)
186 {
187 __e820__range_add(e820_table, start, size, type);
188 }
189
e820_print_type(enum e820_type type)190 __init static void e820_print_type(enum e820_type type)
191 {
192 switch (type) {
193 case E820_TYPE_RAM: pr_cont(" System RAM"); break;
194 case E820_TYPE_RESERVED: pr_cont(" device reserved"); break;
195 case E820_TYPE_SOFT_RESERVED: pr_cont(" soft reserved"); break;
196 case E820_TYPE_ACPI: pr_cont(" ACPI data"); break;
197 case E820_TYPE_NVS: pr_cont(" ACPI NVS"); break;
198 case E820_TYPE_UNUSABLE: pr_cont(" unusable"); break;
199 case E820_TYPE_PMEM: /* Fall through: */
200 case E820_TYPE_PRAM: pr_cont(" persistent RAM (type %u)", type); break;
201 default: pr_cont(" type %u", type); break;
202 }
203 }
204
e820__print_table(const char * who)205 __init static void e820__print_table(const char *who)
206 {
207 u64 range_end_prev = 0;
208 u32 idx;
209
210 for (idx = 0; idx < e820_table->nr_entries; idx++) {
211 struct e820_entry *entry = e820_table->entries + idx;
212 u64 range_start, range_end;
213
214 range_start = entry->addr;
215 range_end = entry->addr + entry->size;
216
217 /* Out of order E820 maps should not happen: */
218 if (range_start < range_end_prev)
219 pr_info(FW_BUG "out of order E820 entry!\n");
220
221 if (range_start > range_end_prev) {
222 pr_info("%s: [gap %#018Lx-%#018Lx]\n",
223 who,
224 range_end_prev,
225 range_start-1);
226 }
227
228 pr_info("%s: [mem %#018Lx-%#018Lx] ", who, range_start, range_end-1);
229 e820_print_type(entry->type);
230 pr_cont("\n");
231
232 range_end_prev = range_end;
233 }
234 }
235
236 /*
237 * Sanitize an E820 map.
238 *
239 * Some E820 layouts include overlapping entries. The following
240 * replaces the original E820 map with a new one, removing overlaps,
241 * and resolving conflicting memory types in favor of highest
242 * numbered type.
243 *
244 * The input parameter 'entries' points to an array of 'struct
245 * e820_entry' which on entry has elements in the range [0, *nr_entries)
246 * valid, and which has space for up to max_nr_entries entries.
247 * On return, the resulting sanitized E820 map entries will be in
248 * overwritten in the same location, starting at 'entries'.
249 *
250 * The integer pointed to by nr_entries must be valid on entry (the
251 * current number of valid entries located at 'entries'). If the
252 * sanitizing succeeds the *nr_entries will be updated with the new
253 * number of valid entries (something no more than max_nr_entries).
254 *
255 * The return value from e820__update_table() is zero if it
256 * successfully 'sanitized' the map entries passed in, and is -1
257 * if it did nothing, which can happen if either of (1) it was
258 * only passed one map entry, or (2) any of the input map entries
259 * were invalid (start + size < start, meaning that the size was
260 * so big the described memory range wrapped around through zero.)
261 *
262 * Visually we're performing the following
263 * (1,2,3,4 = memory types)...
264 *
265 * Sample memory map (w/overlaps):
266 * ____22__________________
267 * ______________________4_
268 * ____1111________________
269 * _44_____________________
270 * 11111111________________
271 * ____________________33__
272 * ___________44___________
273 * __________33333_________
274 * ______________22________
275 * ___________________2222_
276 * _________111111111______
277 * _____________________11_
278 * _________________4______
279 *
280 * Sanitized equivalent (no overlap):
281 * 1_______________________
282 * _44_____________________
283 * ___1____________________
284 * ____22__________________
285 * ______11________________
286 * _________1______________
287 * __________3_____________
288 * ___________44___________
289 * _____________33_________
290 * _______________2________
291 * ________________1_______
292 * _________________4______
293 * ___________________2____
294 * ____________________33__
295 * ______________________4_
296 */
297 struct change_member {
298 /* Pointer to the original entry: */
299 struct e820_entry *entry;
300 /* Address for this change point: */
301 u64 addr;
302 };
303
304 __initdata static struct change_member change_point_list[2*E820_MAX_ENTRIES];
305 __initdata static struct change_member *change_point[2*E820_MAX_ENTRIES];
306 __initdata static struct e820_entry *overlap_list[E820_MAX_ENTRIES];
307 __initdata static struct e820_entry new_entries[E820_MAX_ENTRIES];
308
cpcompare(const void * a,const void * b)309 __init static int cpcompare(const void *a, const void *b)
310 {
311 struct change_member * const *app = a, * const *bpp = b;
312 const struct change_member *ap = *app, *bp = *bpp;
313
314 /*
315 * Inputs are pointers to two elements of change_point[]. If their
316 * addresses are not equal, their difference dominates. If the addresses
317 * are equal, then consider one that represents the end of its region
318 * to be greater than one that does not.
319 */
320 if (ap->addr != bp->addr)
321 return ap->addr > bp->addr ? 1 : -1;
322
323 return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr);
324 }
325
326 /*
327 * Can two consecutive E820 entries of this same E820 type be merged?
328 */
e820_type_mergeable(enum e820_type type)329 static bool e820_type_mergeable(enum e820_type type)
330 {
331 /*
332 * These types may indicate distinct platform ranges aligned to
333 * NUMA node, protection domain, performance domain, or other
334 * boundaries. Do not merge them.
335 */
336 if (type == E820_TYPE_PRAM)
337 return false;
338 if (type == E820_TYPE_SOFT_RESERVED)
339 return false;
340
341 return true;
342 }
343
e820__update_table(struct e820_table * table)344 __init int e820__update_table(struct e820_table *table)
345 {
346 struct e820_entry *entries = table->entries;
347 u32 max_nr_entries = ARRAY_SIZE(table->entries);
348 enum e820_type current_type, last_type;
349 u64 last_addr;
350 u32 new_nr_entries, overlap_entries;
351 u32 idx, chg_idx, chg_nr;
352
353 /* If there's only one memory region, don't bother: */
354 if (table->nr_entries < 2)
355 return -1;
356
357 BUG_ON(table->nr_entries > max_nr_entries);
358
359 /* Bail out if we find any unreasonable addresses in the map: */
360 for (idx = 0; idx < table->nr_entries; idx++) {
361 if (entries[idx].addr + entries[idx].size < entries[idx].addr)
362 return -1;
363 }
364
365 /* Create pointers for initial change-point information (for sorting): */
366 for (idx = 0; idx < 2 * table->nr_entries; idx++)
367 change_point[idx] = &change_point_list[idx];
368
369 /*
370 * Record all known change-points (starting and ending addresses),
371 * omitting empty memory regions:
372 */
373 chg_idx = 0;
374 for (idx = 0; idx < table->nr_entries; idx++) {
375 if (entries[idx].size != 0) {
376 change_point[chg_idx]->addr = entries[idx].addr;
377 change_point[chg_idx++]->entry = &entries[idx];
378 change_point[chg_idx]->addr = entries[idx].addr + entries[idx].size;
379 change_point[chg_idx++]->entry = &entries[idx];
380 }
381 }
382 chg_nr = chg_idx;
383
384 /* Sort change-point list by memory addresses (low -> high): */
385 sort(change_point, chg_nr, sizeof(*change_point), cpcompare, NULL);
386
387 /* Create a new memory map, removing overlaps: */
388 overlap_entries = 0; /* Number of entries in the overlap table */
389 new_nr_entries = 0; /* Index for creating new map entries */
390 last_type = 0; /* Start with undefined memory type */
391 last_addr = 0; /* Start with 0 as last starting address */
392
393 /* Loop through change-points, determining effect on the new map: */
394 for (chg_idx = 0; chg_idx < chg_nr; chg_idx++) {
395 /* Keep track of all overlapping entries */
396 if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) {
397 /* Add map entry to overlap list (> 1 entry implies an overlap) */
398 overlap_list[overlap_entries++] = change_point[chg_idx]->entry;
399 } else {
400 /* Remove entry from list (order independent, so swap with last): */
401 for (idx = 0; idx < overlap_entries; idx++) {
402 if (overlap_list[idx] == change_point[chg_idx]->entry)
403 overlap_list[idx] = overlap_list[overlap_entries-1];
404 }
405 overlap_entries--;
406 }
407 /*
408 * If there are overlapping entries, decide which
409 * "type" to use (larger value takes precedence --
410 * 1=usable, 2,3,4,4+=unusable)
411 */
412 current_type = 0;
413 for (idx = 0; idx < overlap_entries; idx++) {
414 if (overlap_list[idx]->type > current_type)
415 current_type = overlap_list[idx]->type;
416 }
417
418 /* Continue building up new map based on this information: */
419 if (current_type != last_type || !e820_type_mergeable(current_type)) {
420 if (last_type) {
421 new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
422 /* Move forward only if the new size was non-zero: */
423 if (new_entries[new_nr_entries].size != 0)
424 /* No more space left for new entries? */
425 if (++new_nr_entries >= max_nr_entries)
426 break;
427 }
428 if (current_type) {
429 new_entries[new_nr_entries].addr = change_point[chg_idx]->addr;
430 new_entries[new_nr_entries].type = current_type;
431 last_addr = change_point[chg_idx]->addr;
432 }
433 last_type = current_type;
434 }
435 }
436
437 /* Copy the new entries into the original location: */
438 memcpy(entries, new_entries, new_nr_entries*sizeof(*entries));
439 table->nr_entries = new_nr_entries;
440
441 return 0;
442 }
443
444 /*
445 * Copy the BIOS E820 map into the kernel's e820_table.
446 *
447 * Sanity-check it while we're at it..
448 */
append_e820_table(struct boot_e820_entry * entries,u32 nr_entries)449 __init static int append_e820_table(struct boot_e820_entry *entries, u32 nr_entries)
450 {
451 struct boot_e820_entry *entry = entries;
452
453 while (nr_entries) {
454 u64 start = entry->addr;
455 u64 size = entry->size;
456 u64 end = start + size-1;
457 u32 type = entry->type;
458
459 /* Ignore the remaining entries on 64-bit overflow: */
460 if (start > end && likely(size))
461 return -1;
462
463 e820__range_add(start, size, type);
464
465 entry++;
466 nr_entries--;
467 }
468 return 0;
469 }
470
471 __init static u64
__e820__range_update(struct e820_table * table,u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)472 __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
473 {
474 u64 end;
475 u32 idx;
476 u64 real_updated_size = 0;
477
478 BUG_ON(old_type == new_type);
479
480 if (size > (ULLONG_MAX - start))
481 size = ULLONG_MAX - start;
482
483 end = start + size;
484 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx]", start, end - 1);
485 e820_print_type(old_type);
486 pr_cont(" ==>");
487 e820_print_type(new_type);
488 pr_cont("\n");
489
490 for (idx = 0; idx < table->nr_entries; idx++) {
491 struct e820_entry *entry = &table->entries[idx];
492 u64 final_start, final_end;
493 u64 entry_end;
494
495 if (entry->type != old_type)
496 continue;
497
498 entry_end = entry->addr + entry->size;
499
500 /* Completely covered by new range? */
501 if (entry->addr >= start && entry_end <= end) {
502 entry->type = new_type;
503 real_updated_size += entry->size;
504 continue;
505 }
506
507 /* New range is completely covered? */
508 if (entry->addr < start && entry_end > end) {
509 __e820__range_add(table, start, size, new_type);
510 __e820__range_add(table, end, entry_end - end, entry->type);
511 entry->size = start - entry->addr;
512 real_updated_size += size;
513 continue;
514 }
515
516 /* Partially covered: */
517 final_start = max(start, entry->addr);
518 final_end = min(end, entry_end);
519 if (final_start >= final_end)
520 continue;
521
522 __e820__range_add(table, final_start, final_end - final_start, new_type);
523
524 real_updated_size += final_end - final_start;
525
526 /*
527 * Left range could be head or tail, so need to update
528 * its size first:
529 */
530 entry->size -= final_end - final_start;
531 if (entry->addr < final_start)
532 continue;
533
534 entry->addr = final_end;
535 }
536 return real_updated_size;
537 }
538
e820__range_update(u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)539 __init u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
540 {
541 return __e820__range_update(e820_table, start, size, old_type, new_type);
542 }
543
e820__range_update_table(struct e820_table * t,u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)544 __init u64 e820__range_update_table(struct e820_table *t, u64 start, u64 size,
545 enum e820_type old_type, enum e820_type new_type)
546 {
547 return __e820__range_update(t, start, size, old_type, new_type);
548 }
549
550 /* Remove a range of memory from the E820 table: */
e820__range_remove(u64 start,u64 size,enum e820_type filter_type)551 __init void e820__range_remove(u64 start, u64 size, enum e820_type filter_type)
552 {
553 u32 idx;
554 u64 end;
555
556 if (size > (ULLONG_MAX - start))
557 size = ULLONG_MAX - start;
558
559 end = start + size;
560 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx]", start, end - 1);
561 if (filter_type)
562 e820_print_type(filter_type);
563 pr_cont("\n");
564
565 for (idx = 0; idx < e820_table->nr_entries; idx++) {
566 struct e820_entry *entry = &e820_table->entries[idx];
567 u64 final_start, final_end;
568 u64 entry_end;
569
570 if (filter_type && entry->type != filter_type)
571 continue;
572
573 entry_end = entry->addr + entry->size;
574
575 /* Completely covered? */
576 if (entry->addr >= start && entry_end <= end) {
577 memset(entry, 0, sizeof(*entry));
578 continue;
579 }
580
581 /* Is the new range completely covered? */
582 if (entry->addr < start && entry_end > end) {
583 e820__range_add(end, entry_end - end, entry->type);
584 entry->size = start - entry->addr;
585 continue;
586 }
587
588 /* Partially covered: */
589 final_start = max(start, entry->addr);
590 final_end = min(end, entry_end);
591 if (final_start >= final_end)
592 continue;
593
594 /*
595 * Left range could be head or tail, so need to update
596 * the size first:
597 */
598 entry->size -= final_end - final_start;
599 if (entry->addr < final_start)
600 continue;
601
602 entry->addr = final_end;
603 }
604 }
605
e820__update_table_print(void)606 __init void e820__update_table_print(void)
607 {
608 if (e820__update_table(e820_table))
609 return;
610
611 pr_info("modified physical RAM map:\n");
612 e820__print_table("modified");
613 }
614
e820__update_table_kexec(void)615 __init static void e820__update_table_kexec(void)
616 {
617 e820__update_table(e820_table_kexec);
618 }
619
620 #define MAX_GAP_END SZ_4G
621
622 /*
623 * Search for a gap in the E820 memory space from 0 to MAX_GAP_END (4GB).
624 */
e820_search_gap(unsigned long * max_gap_start,unsigned long * max_gap_size)625 __init static int e820_search_gap(unsigned long *max_gap_start, unsigned long *max_gap_size)
626 {
627 struct e820_entry *entry;
628 u64 range_end_prev = 0;
629 int found = 0;
630 u32 idx;
631
632 for (idx = 0; idx < e820_table->nr_entries; idx++) {
633 u64 range_start, range_end;
634
635 entry = e820_table->entries + idx;
636 range_start = entry->addr;
637 range_end = entry->addr + entry->size;
638
639 /* Process any gap before this entry: */
640 if (range_start > range_end_prev) {
641 u64 gap_start = range_end_prev;
642 u64 gap_end = range_start;
643 u64 gap_size;
644
645 if (gap_start < MAX_GAP_END) {
646 /* Make sure the entirety of the gap is below MAX_GAP_END: */
647 gap_end = min(gap_end, MAX_GAP_END);
648 gap_size = gap_end-gap_start;
649
650 if (gap_size >= *max_gap_size) {
651 *max_gap_start = gap_start;
652 *max_gap_size = gap_size;
653 found = 1;
654 }
655 }
656 }
657
658 range_end_prev = range_end;
659 }
660
661 /* Is there a usable gap beyond the last entry: */
662 if (entry->addr + entry->size < MAX_GAP_END) {
663 u64 gap_start = entry->addr + entry->size;
664 u64 gap_size = MAX_GAP_END-gap_start;
665
666 if (gap_size >= *max_gap_size) {
667 *max_gap_start = gap_start;
668 *max_gap_size = gap_size;
669 found = 1;
670 }
671 }
672
673 return found;
674 }
675
676 /*
677 * Search for the biggest gap in the low 32 bits of the E820
678 * memory space. We pass this space to the PCI subsystem, so
679 * that it can assign MMIO resources for hotplug or
680 * unconfigured devices in.
681 *
682 * Hopefully the BIOS let enough space left.
683 */
e820__setup_pci_gap(void)684 __init void e820__setup_pci_gap(void)
685 {
686 unsigned long max_gap_start, max_gap_size;
687 int found;
688
689 /* The minimum eligible gap size is 4MB: */
690 max_gap_size = SZ_4M;
691 found = e820_search_gap(&max_gap_start, &max_gap_size);
692
693 if (!found) {
694 #ifdef CONFIG_X86_64
695 max_gap_start = (max_pfn << PAGE_SHIFT) + SZ_1M;
696 pr_err("Cannot find an available gap in the 32-bit address range\n");
697 pr_err("PCI devices with unassigned 32-bit BARs may not work!\n");
698 #else
699 max_gap_start = SZ_256M;
700 #endif
701 }
702
703 /*
704 * e820__reserve_resources_late() protects stolen RAM already:
705 */
706 pci_mem_start = max_gap_start;
707
708 pr_info("[gap %#010lx-%#010lx] available for PCI devices\n",
709 max_gap_start, max_gap_start + max_gap_size-1);
710 }
711
712 /*
713 * Called late during init, in free_initmem().
714 *
715 * Initial e820_table and e820_table_kexec are largish __initdata arrays.
716 *
717 * Copy them to a (usually much smaller) dynamically allocated area that is
718 * sized precisely after the number of e820 entries.
719 *
720 * This is done after we've performed all the fixes and tweaks to the tables.
721 * All functions which modify them are __init functions, which won't exist
722 * after free_initmem().
723 */
e820__reallocate_tables(void)724 __init void e820__reallocate_tables(void)
725 {
726 struct e820_table *n;
727 int size;
728
729 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries;
730 n = kmemdup(e820_table, size, GFP_KERNEL);
731 BUG_ON(!n);
732 e820_table = n;
733
734 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
735 n = kmemdup(e820_table_kexec, size, GFP_KERNEL);
736 BUG_ON(!n);
737 e820_table_kexec = n;
738
739 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
740 n = kmemdup(e820_table_firmware, size, GFP_KERNEL);
741 BUG_ON(!n);
742 e820_table_firmware = n;
743 }
744
745 /*
746 * Because of the small fixed size of struct boot_params, only the first
747 * 128 E820 memory entries are passed to the kernel via boot_params.e820_table,
748 * the remaining (if any) entries are passed via the SETUP_E820_EXT node of
749 * struct setup_data, which is parsed here.
750 */
e820__memory_setup_extended(u64 phys_addr,u32 data_len)751 __init void e820__memory_setup_extended(u64 phys_addr, u32 data_len)
752 {
753 int entries;
754 struct boot_e820_entry *extmap;
755 struct setup_data *sdata;
756
757 sdata = early_memremap(phys_addr, data_len);
758 entries = sdata->len / sizeof(*extmap);
759 extmap = (struct boot_e820_entry *)(sdata->data);
760
761 append_e820_table(extmap, entries);
762 e820__update_table(e820_table);
763
764 memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
765 memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
766
767 early_memunmap(sdata, data_len);
768 pr_info("extended physical RAM map:\n");
769 e820__print_table("extended");
770 }
771
772 /*
773 * Find the ranges of physical addresses that do not correspond to
774 * E820 RAM areas and register the corresponding pages as 'nosave' for
775 * hibernation (32-bit) or software suspend and suspend to RAM (64-bit).
776 *
777 * This function requires the E820 map to be sorted and without any
778 * overlapping entries.
779 */
e820__register_nosave_regions(unsigned long limit_pfn)780 __init void e820__register_nosave_regions(unsigned long limit_pfn)
781 {
782 u32 idx;
783 u64 last_addr = 0;
784
785 for (idx = 0; idx < e820_table->nr_entries; idx++) {
786 struct e820_entry *entry = &e820_table->entries[idx];
787
788 if (entry->type != E820_TYPE_RAM)
789 continue;
790
791 if (last_addr < entry->addr)
792 register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
793
794 last_addr = entry->addr + entry->size;
795 }
796
797 register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
798 }
799
800 #ifdef CONFIG_ACPI
801 /*
802 * Register ACPI NVS memory regions, so that we can save/restore them during
803 * hibernation and the subsequent resume:
804 */
e820__register_nvs_regions(void)805 __init static int e820__register_nvs_regions(void)
806 {
807 u32 idx;
808
809 for (idx = 0; idx < e820_table->nr_entries; idx++) {
810 struct e820_entry *entry = &e820_table->entries[idx];
811
812 if (entry->type == E820_TYPE_NVS)
813 acpi_nvs_register(entry->addr, entry->size);
814 }
815
816 return 0;
817 }
818 core_initcall(e820__register_nvs_regions);
819 #endif
820
821 /*
822 * Allocate the requested number of bytes with the requested alignment
823 * and return (the physical address) to the caller. Also register this
824 * range in the 'kexec' E820 table as a reserved range.
825 *
826 * This allows kexec to fake a new mptable, as if it came from the real
827 * system.
828 */
e820__memblock_alloc_reserved(u64 size,u64 align)829 __init u64 e820__memblock_alloc_reserved(u64 size, u64 align)
830 {
831 u64 addr;
832
833 addr = memblock_phys_alloc(size, align);
834 if (addr) {
835 e820__range_update_table(e820_table_kexec, addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
836 pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
837 e820__update_table_kexec();
838 }
839
840 return addr;
841 }
842
843 #ifdef CONFIG_X86_32
844 # ifdef CONFIG_X86_PAE
845 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
846 # else
847 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
848 # endif
849 #else /* CONFIG_X86_32 */
850 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
851 #endif
852
853 /*
854 * Find the highest page frame number we have available
855 */
e820__end_ram_pfn(unsigned long limit_pfn)856 __init static unsigned long e820__end_ram_pfn(unsigned long limit_pfn)
857 {
858 u32 idx;
859 unsigned long last_pfn = 0;
860 unsigned long max_arch_pfn = MAX_ARCH_PFN;
861
862 for (idx = 0; idx < e820_table->nr_entries; idx++) {
863 struct e820_entry *entry = &e820_table->entries[idx];
864 unsigned long start_pfn;
865 unsigned long end_pfn;
866
867 if (entry->type != E820_TYPE_RAM &&
868 entry->type != E820_TYPE_ACPI)
869 continue;
870
871 start_pfn = entry->addr >> PAGE_SHIFT;
872 end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT;
873
874 if (start_pfn >= limit_pfn)
875 continue;
876 if (end_pfn > limit_pfn) {
877 last_pfn = limit_pfn;
878 break;
879 }
880 if (end_pfn > last_pfn)
881 last_pfn = end_pfn;
882 }
883
884 if (last_pfn > max_arch_pfn)
885 last_pfn = max_arch_pfn;
886
887 pr_info("last_pfn = %#lx max_arch_pfn = %#lx\n",
888 last_pfn, max_arch_pfn);
889 return last_pfn;
890 }
891
e820__end_of_ram_pfn(void)892 __init unsigned long e820__end_of_ram_pfn(void)
893 {
894 return e820__end_ram_pfn(MAX_ARCH_PFN);
895 }
896
e820__end_of_low_ram_pfn(void)897 __init unsigned long e820__end_of_low_ram_pfn(void)
898 {
899 return e820__end_ram_pfn(1UL << (32 - PAGE_SHIFT));
900 }
901
902 __initdata static int userdef;
903
904 /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */
parse_memopt(char * p)905 __init static int parse_memopt(char *p)
906 {
907 u64 mem_size;
908
909 if (!p)
910 return -EINVAL;
911
912 if (!strcmp(p, "nopentium")) {
913 #ifdef CONFIG_X86_32
914 setup_clear_cpu_cap(X86_FEATURE_PSE);
915 return 0;
916 #else
917 pr_warn("mem=nopentium ignored! (only supported on x86_32)\n");
918 return -EINVAL;
919 #endif
920 }
921
922 userdef = 1;
923 mem_size = memparse(p, &p);
924
925 /* Don't remove all memory when getting "mem={invalid}" parameter: */
926 if (mem_size == 0)
927 return -EINVAL;
928
929 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM);
930
931 #ifdef CONFIG_MEMORY_HOTPLUG
932 max_mem_size = mem_size;
933 #endif
934
935 return 0;
936 }
937 early_param("mem", parse_memopt);
938
parse_memmap_one(char * p)939 __init static int parse_memmap_one(char *p)
940 {
941 char *oldp;
942 u64 start_at, mem_size;
943
944 if (!p)
945 return -EINVAL;
946
947 if (!strncmp(p, "exactmap", 8)) {
948 e820_table->nr_entries = 0;
949 userdef = 1;
950 return 0;
951 }
952
953 oldp = p;
954 mem_size = memparse(p, &p);
955 if (p == oldp)
956 return -EINVAL;
957
958 userdef = 1;
959 if (*p == '@') {
960 start_at = memparse(p+1, &p);
961 e820__range_add(start_at, mem_size, E820_TYPE_RAM);
962 } else if (*p == '#') {
963 start_at = memparse(p+1, &p);
964 e820__range_add(start_at, mem_size, E820_TYPE_ACPI);
965 } else if (*p == '$') {
966 start_at = memparse(p+1, &p);
967 e820__range_add(start_at, mem_size, E820_TYPE_RESERVED);
968 } else if (*p == '!') {
969 start_at = memparse(p+1, &p);
970 e820__range_add(start_at, mem_size, E820_TYPE_PRAM);
971 } else if (*p == '%') {
972 enum e820_type from = 0, to = 0;
973
974 start_at = memparse(p + 1, &p);
975 if (*p == '-')
976 from = simple_strtoull(p + 1, &p, 0);
977 if (*p == '+')
978 to = simple_strtoull(p + 1, &p, 0);
979 if (*p != '\0')
980 return -EINVAL;
981 if (from && to)
982 e820__range_update(start_at, mem_size, from, to);
983 else if (to)
984 e820__range_add(start_at, mem_size, to);
985 else
986 e820__range_remove(start_at, mem_size, from);
987 } else {
988 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM);
989 }
990
991 return *p == '\0' ? 0 : -EINVAL;
992 }
993
parse_memmap_opt(char * str)994 __init static int parse_memmap_opt(char *str)
995 {
996 while (str) {
997 char *k = strchr(str, ',');
998
999 if (k)
1000 *k++ = 0;
1001
1002 parse_memmap_one(str);
1003 str = k;
1004 }
1005
1006 return 0;
1007 }
1008 early_param("memmap", parse_memmap_opt);
1009
1010 /*
1011 * Called after parse_early_param(), after early parameters (such as mem=)
1012 * have been processed, in which case we already have an E820 table filled in
1013 * via the parameter callback function(s), but it's not sorted and printed yet:
1014 */
e820__finish_early_params(void)1015 __init void e820__finish_early_params(void)
1016 {
1017 if (userdef) {
1018 if (e820__update_table(e820_table) < 0)
1019 panic("Invalid user supplied memory map");
1020
1021 pr_info("user-defined physical RAM map:\n");
1022 e820__print_table("user");
1023 }
1024 }
1025
e820_type_to_string(struct e820_entry * entry)1026 __init static const char * e820_type_to_string(struct e820_entry *entry)
1027 {
1028 switch (entry->type) {
1029 case E820_TYPE_RAM: return "System RAM";
1030 case E820_TYPE_ACPI: return "ACPI Tables";
1031 case E820_TYPE_NVS: return "ACPI Non-volatile Storage";
1032 case E820_TYPE_UNUSABLE: return "Unusable memory";
1033 case E820_TYPE_PRAM: return "Persistent Memory (legacy)";
1034 case E820_TYPE_PMEM: return "Persistent Memory";
1035 case E820_TYPE_RESERVED: return "Reserved";
1036 case E820_TYPE_SOFT_RESERVED: return "Soft Reserved";
1037 default: return "Unknown E820 type";
1038 }
1039 }
1040
e820_type_to_iomem_type(struct e820_entry * entry)1041 __init static unsigned long e820_type_to_iomem_type(struct e820_entry *entry)
1042 {
1043 switch (entry->type) {
1044 case E820_TYPE_RAM: return IORESOURCE_SYSTEM_RAM;
1045 case E820_TYPE_ACPI: /* Fall-through: */
1046 case E820_TYPE_NVS: /* Fall-through: */
1047 case E820_TYPE_UNUSABLE: /* Fall-through: */
1048 case E820_TYPE_PRAM: /* Fall-through: */
1049 case E820_TYPE_PMEM: /* Fall-through: */
1050 case E820_TYPE_RESERVED: /* Fall-through: */
1051 case E820_TYPE_SOFT_RESERVED: /* Fall-through: */
1052 default: return IORESOURCE_MEM;
1053 }
1054 }
1055
e820_type_to_iores_desc(struct e820_entry * entry)1056 __init static unsigned long e820_type_to_iores_desc(struct e820_entry *entry)
1057 {
1058 switch (entry->type) {
1059 case E820_TYPE_ACPI: return IORES_DESC_ACPI_TABLES;
1060 case E820_TYPE_NVS: return IORES_DESC_ACPI_NV_STORAGE;
1061 case E820_TYPE_PMEM: return IORES_DESC_PERSISTENT_MEMORY;
1062 case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
1063 case E820_TYPE_RESERVED: return IORES_DESC_RESERVED;
1064 case E820_TYPE_SOFT_RESERVED: return IORES_DESC_SOFT_RESERVED;
1065 case E820_TYPE_RAM: /* Fall-through: */
1066 case E820_TYPE_UNUSABLE: /* Fall-through: */
1067 default: return IORES_DESC_NONE;
1068 }
1069 }
1070
1071 /*
1072 * We assign one resource entry for each E820 map entry:
1073 */
1074 __initdata static struct resource *e820_res;
1075
1076 /*
1077 * Is this a device address region that should not be marked busy?
1078 * (Versus system address regions that we register & lock early.)
1079 */
e820_device_region(enum e820_type type,struct resource * res)1080 __init static bool e820_device_region(enum e820_type type, struct resource *res)
1081 {
1082 /* This is the legacy BIOS/DOS ROM-shadow + MMIO region: */
1083 if (res->start < SZ_1M)
1084 return false;
1085
1086 /*
1087 * Treat persistent memory and other special memory ranges like
1088 * device memory, i.e. keep it available for exclusive use of a
1089 * driver:
1090 */
1091 switch (type) {
1092 case E820_TYPE_RESERVED:
1093 case E820_TYPE_SOFT_RESERVED:
1094 case E820_TYPE_PRAM:
1095 case E820_TYPE_PMEM:
1096 return true;
1097 case E820_TYPE_RAM:
1098 case E820_TYPE_ACPI:
1099 case E820_TYPE_NVS:
1100 case E820_TYPE_UNUSABLE:
1101 default:
1102 return false;
1103 }
1104 }
1105
1106 /*
1107 * Mark E820 system regions as busy for the resource manager:
1108 */
e820__reserve_resources(void)1109 __init void e820__reserve_resources(void)
1110 {
1111 u32 idx;
1112 struct resource *res;
1113 u64 end;
1114
1115 res = memblock_alloc_or_panic(sizeof(*res) * e820_table->nr_entries,
1116 SMP_CACHE_BYTES);
1117 e820_res = res;
1118
1119 for (idx = 0; idx < e820_table->nr_entries; idx++) {
1120 struct e820_entry *entry = e820_table->entries + idx;
1121
1122 end = entry->addr + entry->size - 1;
1123 if (end != (resource_size_t)end) {
1124 res++;
1125 continue;
1126 }
1127 res->start = entry->addr;
1128 res->end = end;
1129 res->name = e820_type_to_string(entry);
1130 res->flags = e820_type_to_iomem_type(entry);
1131 res->desc = e820_type_to_iores_desc(entry);
1132
1133 /*
1134 * Skip and don't register device regions that could be conflicted
1135 * with PCI device BAR resources. They get inserted later in
1136 * pcibios_resource_survey() -> e820__reserve_resources_late():
1137 */
1138 if (!e820_device_region(entry->type, res)) {
1139 res->flags |= IORESOURCE_BUSY;
1140 insert_resource(&iomem_resource, res);
1141 }
1142 res++;
1143 }
1144
1145 /* Expose the kexec e820 table to sysfs: */
1146 for (idx = 0; idx < e820_table_kexec->nr_entries; idx++) {
1147 struct e820_entry *entry = e820_table_kexec->entries + idx;
1148
1149 firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry));
1150 }
1151 }
1152
1153 /*
1154 * How much should we pad the end of RAM, depending on where it is?
1155 */
ram_alignment(resource_size_t pos)1156 __init static unsigned long ram_alignment(resource_size_t pos)
1157 {
1158 unsigned long mb = pos >> 20;
1159
1160 /* To 64kB in the first megabyte */
1161 if (!mb)
1162 return 64*1024;
1163
1164 /* To 1MB in the first 16MB */
1165 if (mb < 16)
1166 return 1024*1024;
1167
1168 /* To 64MB for anything above that */
1169 return 64*1024*1024;
1170 }
1171
1172 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1173
e820__reserve_resources_late(void)1174 __init void e820__reserve_resources_late(void)
1175 {
1176 /*
1177 * Register device address regions listed in the E820 map,
1178 * these can be claimed by device drivers later on:
1179 */
1180 for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
1181 struct resource *res = e820_res + idx;
1182
1183 /* skip added or uninitialized resources */
1184 if (res->parent || !res->end)
1185 continue;
1186
1187 /* set aside soft-reserved resources for driver consideration */
1188 if (res->desc == IORES_DESC_SOFT_RESERVED) {
1189 insert_resource_expand_to_fit(&soft_reserve_resource, res);
1190 } else {
1191 /* publish the rest immediately */
1192 insert_resource_expand_to_fit(&iomem_resource, res);
1193 }
1194 }
1195
1196 /*
1197 * Create additional 'gaps' at the end of RAM regions,
1198 * rounding them up to 64k/1MB/64MB boundaries, should
1199 * they be weirdly sized, and register extra, locked
1200 * resource regions for them, to make sure drivers
1201 * won't claim those addresses.
1202 *
1203 * These are basically blind guesses and heuristics to
1204 * avoid resource conflicts with broken firmware that
1205 * doesn't properly list 'stolen RAM' as a system region
1206 * in the E820 map.
1207 */
1208 for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
1209 struct e820_entry *entry = &e820_table->entries[idx];
1210 u64 start, end;
1211
1212 if (entry->type != E820_TYPE_RAM)
1213 continue;
1214
1215 start = entry->addr + entry->size;
1216 end = round_up(start, ram_alignment(start)) - 1;
1217 if (end > MAX_RESOURCE_SIZE)
1218 end = MAX_RESOURCE_SIZE;
1219 if (start >= end)
1220 continue;
1221
1222 pr_info("e820: register RAM buffer resource [mem %#010llx-%#010llx]\n", start, end);
1223 reserve_region_with_split(&iomem_resource, start, end, "RAM buffer");
1224 }
1225 }
1226
1227 /*
1228 * Pass the firmware (bootloader) E820 map to the kernel and process it:
1229 */
e820__memory_setup_default(void)1230 __init char * e820__memory_setup_default(void)
1231 {
1232 char *who = "BIOS-e820";
1233
1234 /*
1235 * Try to copy the BIOS-supplied E820-map.
1236 *
1237 * Otherwise fake a memory map; one section from 0k->640k,
1238 * the next section from 1mb->appropriate_mem_k
1239 */
1240 if (append_e820_table(boot_params.e820_table, boot_params.e820_entries) < 0) {
1241 u64 mem_size;
1242
1243 /* Compare results from other methods and take the one that gives more RAM: */
1244 if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
1245 mem_size = boot_params.screen_info.ext_mem_k;
1246 who = "BIOS-88";
1247 } else {
1248 mem_size = boot_params.alt_mem_k;
1249 who = "BIOS-e801";
1250 }
1251
1252 e820_table->nr_entries = 0;
1253 e820__range_add(0, LOWMEMSIZE(), E820_TYPE_RAM);
1254 e820__range_add(HIGH_MEMORY, mem_size << 10, E820_TYPE_RAM);
1255 }
1256
1257 /* We just appended a lot of ranges, sanitize the table: */
1258 e820__update_table(e820_table);
1259
1260 return who;
1261 }
1262
1263 /*
1264 * Calls e820__memory_setup_default() in essence to pick up the firmware/bootloader
1265 * E820 map - with an optional platform quirk available for virtual platforms
1266 * to override this method of boot environment processing:
1267 */
e820__memory_setup(void)1268 __init void e820__memory_setup(void)
1269 {
1270 char *who;
1271
1272 /* This is a firmware interface ABI - make sure we don't break it: */
1273 BUILD_BUG_ON(sizeof(struct boot_e820_entry) != 20);
1274
1275 who = x86_init.resources.memory_setup();
1276
1277 memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
1278 memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
1279
1280 pr_info("BIOS-provided physical RAM map:\n");
1281 e820__print_table(who);
1282 }
1283
e820__memblock_setup(void)1284 __init void e820__memblock_setup(void)
1285 {
1286 u32 idx;
1287 u64 end;
1288
1289 #ifdef CONFIG_MEMORY_HOTPLUG
1290 /*
1291 * Memory used by the kernel cannot be hot-removed because Linux
1292 * cannot migrate the kernel pages. When memory hotplug is
1293 * enabled, we should prevent memblock from allocating memory
1294 * for the kernel.
1295 *
1296 * ACPI SRAT records all hotpluggable memory ranges. But before
1297 * SRAT is parsed, we don't know about it.
1298 *
1299 * The kernel image is loaded into memory at very early time. We
1300 * cannot prevent this anyway. So on NUMA system, we set any
1301 * node the kernel resides in as un-hotpluggable.
1302 *
1303 * Since on modern servers, one node could have double-digit
1304 * gigabytes memory, we can assume the memory around the kernel
1305 * image is also un-hotpluggable. So before SRAT is parsed, just
1306 * allocate memory near the kernel image to try the best to keep
1307 * the kernel away from hotpluggable memory.
1308 */
1309 if (movable_node_is_enabled())
1310 memblock_set_bottom_up(true);
1311 #endif
1312
1313 /*
1314 * At this point only the first megabyte is mapped for sure, the
1315 * rest of the memory cannot be used for memblock resizing
1316 */
1317 memblock_set_current_limit(ISA_END_ADDRESS);
1318
1319 /*
1320 * The bootstrap memblock region count maximum is 128 entries
1321 * (INIT_MEMBLOCK_REGIONS), but EFI might pass us more E820 entries
1322 * than that - so allow memblock resizing.
1323 *
1324 * This is safe, because this call happens pretty late during x86 setup,
1325 * so we know about reserved memory regions already. (This is important
1326 * so that memblock resizing does no stomp over reserved areas.)
1327 */
1328 memblock_allow_resize();
1329
1330 for (idx = 0; idx < e820_table->nr_entries; idx++) {
1331 struct e820_entry *entry = &e820_table->entries[idx];
1332
1333 end = entry->addr + entry->size;
1334 if (end != (resource_size_t)end)
1335 continue;
1336
1337 if (entry->type == E820_TYPE_SOFT_RESERVED)
1338 memblock_reserve(entry->addr, entry->size);
1339
1340 if (entry->type != E820_TYPE_RAM)
1341 continue;
1342
1343 memblock_add(entry->addr, entry->size);
1344 }
1345
1346 /*
1347 * At this point memblock is only allowed to allocate from memory
1348 * below 1M (aka ISA_END_ADDRESS) up until direct map is completely set
1349 * up in init_mem_mapping().
1350 *
1351 * KHO kernels are special and use only scratch memory for memblock
1352 * allocations, but memory below 1M is ignored by kernel after early
1353 * boot and cannot be naturally marked as scratch.
1354 *
1355 * To allow allocation of the real-mode trampoline and a few (if any)
1356 * other very early allocations from below 1M forcibly mark the memory
1357 * below 1M as scratch.
1358 *
1359 * After real mode trampoline is allocated, we clear that scratch
1360 * marking.
1361 */
1362 memblock_mark_kho_scratch(0, SZ_1M);
1363
1364 /*
1365 * 32-bit systems are limited to 4BG of memory even with HIGHMEM and
1366 * to even less without it.
1367 * Discard memory after max_pfn - the actual limit detected at runtime.
1368 */
1369 if (IS_ENABLED(CONFIG_X86_32))
1370 memblock_remove(PFN_PHYS(max_pfn), -1);
1371
1372 /* Throw away partial pages: */
1373 memblock_trim_memory(PAGE_SIZE);
1374
1375 memblock_dump_all();
1376 }
1377