1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19 #include <linux/mutex.h>
20
21 #ifdef CONFIG_KEXEC_HANDOVER
22 #include <linux/libfdt.h>
23 #include <linux/kexec_handover.h>
24 #include <linux/kho/abi/memblock.h>
25 #endif /* CONFIG_KEXEC_HANDOVER */
26
27 #include <asm/sections.h>
28 #include <linux/io.h>
29
30 #include "internal.h"
31
32 #define INIT_MEMBLOCK_REGIONS 128
33 #define INIT_PHYSMEM_REGIONS 4
34
35 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
36 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
37 #endif
38
39 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
40 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
41 #endif
42
43 /**
44 * DOC: memblock overview
45 *
46 * Memblock is a method of managing memory regions during the early
47 * boot period when the usual kernel memory allocators are not up and
48 * running.
49 *
50 * Memblock views the system memory as collections of contiguous
51 * regions. There are several types of these collections:
52 *
53 * * ``memory`` - describes the physical memory available to the
54 * kernel; this may differ from the actual physical memory installed
55 * in the system, for instance when the memory is restricted with
56 * ``mem=`` command line parameter
57 * * ``reserved`` - describes the regions that were allocated
58 * * ``physmem`` - describes the actual physical memory available during
59 * boot regardless of the possible restrictions and memory hot(un)plug;
60 * the ``physmem`` type is only available on some architectures.
61 *
62 * Each region is represented by struct memblock_region that
63 * defines the region extents, its attributes and NUMA node id on NUMA
64 * systems. Every memory type is described by the struct memblock_type
65 * which contains an array of memory regions along with
66 * the allocator metadata. The "memory" and "reserved" types are nicely
67 * wrapped with struct memblock. This structure is statically
68 * initialized at build time. The region arrays are initially sized to
69 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
70 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
71 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
72 * The memblock_allow_resize() enables automatic resizing of the region
73 * arrays during addition of new regions. This feature should be used
74 * with care so that memory allocated for the region array will not
75 * overlap with areas that should be reserved, for example initrd.
76 *
77 * The early architecture setup should tell memblock what the physical
78 * memory layout is by using memblock_add() or memblock_add_node()
79 * functions. The first function does not assign the region to a NUMA
80 * node and it is appropriate for UMA systems. Yet, it is possible to
81 * use it on NUMA systems as well and assign the region to a NUMA node
82 * later in the setup process using memblock_set_node(). The
83 * memblock_add_node() performs such an assignment directly.
84 *
85 * Once memblock is setup the memory can be allocated using one of the
86 * API variants:
87 *
88 * * memblock_phys_alloc*() - these functions return the **physical**
89 * address of the allocated memory
90 * * memblock_alloc*() - these functions return the **virtual** address
91 * of the allocated memory.
92 *
93 * Note, that both API variants use implicit assumptions about allowed
94 * memory ranges and the fallback methods. Consult the documentation
95 * of memblock_alloc_internal() and memblock_alloc_range_nid()
96 * functions for more elaborate description.
97 *
98 * As the system boot progresses, the architecture specific mem_init()
99 * function frees all the memory to the buddy page allocator.
100 *
101 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
102 * memblock data structures (except "physmem") will be discarded after the
103 * system initialization completes.
104 */
105
106 #ifndef CONFIG_NUMA
107 struct pglist_data __refdata contig_page_data;
108 EXPORT_SYMBOL(contig_page_data);
109 #endif
110
111 unsigned long max_low_pfn;
112 unsigned long min_low_pfn;
113 unsigned long max_pfn;
114 unsigned long long max_possible_pfn;
115
116 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
117 /* When set to true, only allocate from MEMBLOCK_KHO_SCRATCH ranges */
118 static bool kho_scratch_only;
119 #else
120 #define kho_scratch_only false
121 #endif
122
123 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
124 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
125 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
126 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
127 #endif
128
129 struct memblock memblock __initdata_memblock = {
130 .memory.regions = memblock_memory_init_regions,
131 .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
132 .memory.name = "memory",
133
134 .reserved.regions = memblock_reserved_init_regions,
135 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
136 .reserved.name = "reserved",
137
138 .bottom_up = false,
139 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
140 };
141
142 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
143 struct memblock_type physmem = {
144 .regions = memblock_physmem_init_regions,
145 .max = INIT_PHYSMEM_REGIONS,
146 .name = "physmem",
147 };
148 #endif
149
150 /*
151 * keep a pointer to &memblock.memory in the text section to use it in
152 * __next_mem_range() and its helpers.
153 * For architectures that do not keep memblock data after init, this
154 * pointer will be reset to NULL at memblock_discard()
155 */
156 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
157
158 #define for_each_memblock_type(i, memblock_type, rgn) \
159 for (i = 0, rgn = &memblock_type->regions[0]; \
160 i < memblock_type->cnt; \
161 i++, rgn = &memblock_type->regions[i])
162
163 #define memblock_dbg(fmt, ...) \
164 do { \
165 if (memblock_debug) \
166 pr_info(fmt, ##__VA_ARGS__); \
167 } while (0)
168
169 static int memblock_debug __initdata_memblock;
170 static bool system_has_some_mirror __initdata_memblock;
171 static int memblock_can_resize __initdata_memblock;
172 static int memblock_memory_in_slab __initdata_memblock;
173 static int memblock_reserved_in_slab __initdata_memblock;
174
memblock_has_mirror(void)175 bool __init_memblock memblock_has_mirror(void)
176 {
177 return system_has_some_mirror;
178 }
179
choose_memblock_flags(void)180 static enum memblock_flags __init_memblock choose_memblock_flags(void)
181 {
182 /* skip non-scratch memory for kho early boot allocations */
183 if (kho_scratch_only)
184 return MEMBLOCK_KHO_SCRATCH;
185
186 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
187 }
188
189 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
memblock_cap_size(phys_addr_t base,phys_addr_t * size)190 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
191 {
192 return *size = min(*size, PHYS_ADDR_MAX - base);
193 }
194
195 /*
196 * Address comparison utilities
197 */
198 unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1,phys_addr_t size1,phys_addr_t base2,phys_addr_t size2)199 memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
200 phys_addr_t size2)
201 {
202 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
203 }
204
memblock_overlaps_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size)205 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
206 phys_addr_t base, phys_addr_t size)
207 {
208 unsigned long i;
209
210 memblock_cap_size(base, &size);
211
212 for (i = 0; i < type->cnt; i++)
213 if (memblock_addrs_overlap(base, size, type->regions[i].base,
214 type->regions[i].size))
215 return true;
216 return false;
217 }
218
219 /**
220 * __memblock_find_range_bottom_up - find free area utility in bottom-up
221 * @start: start of candidate range
222 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
223 * %MEMBLOCK_ALLOC_ACCESSIBLE
224 * @size: size of free area to find
225 * @align: alignment of free area to find
226 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
227 * @flags: pick from blocks based on memory attributes
228 *
229 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
230 *
231 * Return:
232 * Found address on success, 0 on failure.
233 */
234 static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)235 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
236 phys_addr_t size, phys_addr_t align, int nid,
237 enum memblock_flags flags)
238 {
239 phys_addr_t this_start, this_end, cand;
240 u64 i;
241
242 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
243 this_start = clamp(this_start, start, end);
244 this_end = clamp(this_end, start, end);
245
246 cand = round_up(this_start, align);
247 if (cand < this_end && this_end - cand >= size)
248 return cand;
249 }
250
251 return 0;
252 }
253
254 /**
255 * __memblock_find_range_top_down - find free area utility, in top-down
256 * @start: start of candidate range
257 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
258 * %MEMBLOCK_ALLOC_ACCESSIBLE
259 * @size: size of free area to find
260 * @align: alignment of free area to find
261 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
262 * @flags: pick from blocks based on memory attributes
263 *
264 * Utility called from memblock_find_in_range_node(), find free area top-down.
265 *
266 * Return:
267 * Found address on success, 0 on failure.
268 */
269 static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)270 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
271 phys_addr_t size, phys_addr_t align, int nid,
272 enum memblock_flags flags)
273 {
274 phys_addr_t this_start, this_end, cand;
275 u64 i;
276
277 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
278 NULL) {
279 this_start = clamp(this_start, start, end);
280 this_end = clamp(this_end, start, end);
281
282 if (this_end < size)
283 continue;
284
285 cand = round_down(this_end - size, align);
286 if (cand >= this_start)
287 return cand;
288 }
289
290 return 0;
291 }
292
293 /**
294 * memblock_find_in_range_node - find free area in given range and node
295 * @size: size of free area to find
296 * @align: alignment of free area to find
297 * @start: start of candidate range
298 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
299 * %MEMBLOCK_ALLOC_ACCESSIBLE
300 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
301 * @flags: pick from blocks based on memory attributes
302 *
303 * Find @size free area aligned to @align in the specified range and node.
304 *
305 * Return:
306 * Found address on success, 0 on failure.
307 */
memblock_find_in_range_node(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,enum memblock_flags flags)308 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
309 phys_addr_t align, phys_addr_t start,
310 phys_addr_t end, int nid,
311 enum memblock_flags flags)
312 {
313 /* pump up @end */
314 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
315 end == MEMBLOCK_ALLOC_NOLEAKTRACE)
316 end = memblock.current_limit;
317
318 /* avoid allocating the first page */
319 start = max_t(phys_addr_t, start, PAGE_SIZE);
320 end = max(start, end);
321
322 if (memblock_bottom_up())
323 return __memblock_find_range_bottom_up(start, end, size, align,
324 nid, flags);
325 else
326 return __memblock_find_range_top_down(start, end, size, align,
327 nid, flags);
328 }
329
330 /**
331 * memblock_find_in_range - find free area in given range
332 * @start: start of candidate range
333 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
334 * %MEMBLOCK_ALLOC_ACCESSIBLE
335 * @size: size of free area to find
336 * @align: alignment of free area to find
337 *
338 * Find @size free area aligned to @align in the specified range.
339 *
340 * Return:
341 * Found address on success, 0 on failure.
342 */
memblock_find_in_range(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align)343 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
344 phys_addr_t end, phys_addr_t size,
345 phys_addr_t align)
346 {
347 phys_addr_t ret;
348 enum memblock_flags flags = choose_memblock_flags();
349
350 again:
351 ret = memblock_find_in_range_node(size, align, start, end,
352 NUMA_NO_NODE, flags);
353
354 if (!ret && (flags & MEMBLOCK_MIRROR)) {
355 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
356 &size);
357 flags &= ~MEMBLOCK_MIRROR;
358 goto again;
359 }
360
361 return ret;
362 }
363
memblock_remove_region(struct memblock_type * type,unsigned long r)364 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
365 {
366 type->total_size -= type->regions[r].size;
367 memmove(&type->regions[r], &type->regions[r + 1],
368 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
369 type->cnt--;
370
371 /* Special case for empty arrays */
372 if (type->cnt == 0) {
373 WARN_ON(type->total_size != 0);
374 type->regions[0].base = 0;
375 type->regions[0].size = 0;
376 type->regions[0].flags = 0;
377 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
378 }
379 }
380
381 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
382 /**
383 * memblock_discard - discard memory and reserved arrays if they were allocated
384 */
memblock_discard(void)385 void __init memblock_discard(void)
386 {
387 phys_addr_t addr, size;
388
389 if (memblock.reserved.regions != memblock_reserved_init_regions) {
390 addr = __pa(memblock.reserved.regions);
391 size = PAGE_ALIGN(sizeof(struct memblock_region) *
392 memblock.reserved.max);
393 if (memblock_reserved_in_slab)
394 kfree(memblock.reserved.regions);
395 else
396 memblock_free_late(addr, size);
397 }
398
399 if (memblock.memory.regions != memblock_memory_init_regions) {
400 addr = __pa(memblock.memory.regions);
401 size = PAGE_ALIGN(sizeof(struct memblock_region) *
402 memblock.memory.max);
403 if (memblock_memory_in_slab)
404 kfree(memblock.memory.regions);
405 else
406 memblock_free_late(addr, size);
407 }
408
409 memblock_memory = NULL;
410 }
411 #endif
412
413 /**
414 * memblock_double_array - double the size of the memblock regions array
415 * @type: memblock type of the regions array being doubled
416 * @new_area_start: starting address of memory range to avoid overlap with
417 * @new_area_size: size of memory range to avoid overlap with
418 *
419 * Double the size of the @type regions array. If memblock is being used to
420 * allocate memory for a new reserved regions array and there is a previously
421 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
422 * waiting to be reserved, ensure the memory used by the new array does
423 * not overlap.
424 *
425 * Return:
426 * 0 on success, -1 on failure.
427 */
memblock_double_array(struct memblock_type * type,phys_addr_t new_area_start,phys_addr_t new_area_size)428 static int __init_memblock memblock_double_array(struct memblock_type *type,
429 phys_addr_t new_area_start,
430 phys_addr_t new_area_size)
431 {
432 struct memblock_region *new_array, *old_array;
433 phys_addr_t old_alloc_size, new_alloc_size;
434 phys_addr_t old_size, new_size, addr, new_end;
435 int use_slab = slab_is_available();
436 int *in_slab;
437
438 /* We don't allow resizing until we know about the reserved regions
439 * of memory that aren't suitable for allocation
440 */
441 if (!memblock_can_resize)
442 panic("memblock: cannot resize %s array\n", type->name);
443
444 /* Calculate new doubled size */
445 old_size = type->max * sizeof(struct memblock_region);
446 new_size = old_size << 1;
447 /*
448 * We need to allocated new one align to PAGE_SIZE,
449 * so we can free them completely later.
450 */
451 old_alloc_size = PAGE_ALIGN(old_size);
452 new_alloc_size = PAGE_ALIGN(new_size);
453
454 /* Retrieve the slab flag */
455 if (type == &memblock.memory)
456 in_slab = &memblock_memory_in_slab;
457 else
458 in_slab = &memblock_reserved_in_slab;
459
460 /* Try to find some space for it */
461 if (use_slab) {
462 new_array = kmalloc(new_size, GFP_KERNEL);
463 addr = new_array ? __pa(new_array) : 0;
464 } else {
465 /* only exclude range when trying to double reserved.regions */
466 if (type != &memblock.reserved)
467 new_area_start = new_area_size = 0;
468
469 addr = memblock_find_in_range(new_area_start + new_area_size,
470 memblock.current_limit,
471 new_alloc_size, PAGE_SIZE);
472 if (!addr && new_area_size)
473 addr = memblock_find_in_range(0,
474 min(new_area_start, memblock.current_limit),
475 new_alloc_size, PAGE_SIZE);
476
477 if (addr) {
478 /* The memory may not have been accepted, yet. */
479 accept_memory(addr, new_alloc_size);
480
481 new_array = __va(addr);
482 } else {
483 new_array = NULL;
484 }
485 }
486 if (!addr) {
487 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
488 type->name, type->max, type->max * 2);
489 return -1;
490 }
491
492 new_end = addr + new_size - 1;
493 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
494 type->name, type->max * 2, &addr, &new_end);
495
496 /*
497 * Found space, we now need to move the array over before we add the
498 * reserved region since it may be our reserved array itself that is
499 * full.
500 */
501 memcpy(new_array, type->regions, old_size);
502 memset(new_array + type->max, 0, old_size);
503 old_array = type->regions;
504 type->regions = new_array;
505 type->max <<= 1;
506
507 /* Free old array. We needn't free it if the array is the static one */
508 if (*in_slab)
509 kfree(old_array);
510 else if (old_array != memblock_memory_init_regions &&
511 old_array != memblock_reserved_init_regions)
512 memblock_free(old_array, old_alloc_size);
513
514 /*
515 * Reserve the new array if that comes from the memblock. Otherwise, we
516 * needn't do it
517 */
518 if (!use_slab)
519 BUG_ON(memblock_reserve_kern(addr, new_alloc_size));
520
521 /* Update slab flag */
522 *in_slab = use_slab;
523
524 return 0;
525 }
526
527 /**
528 * memblock_merge_regions - merge neighboring compatible regions
529 * @type: memblock type to scan
530 * @start_rgn: start scanning from (@start_rgn - 1)
531 * @end_rgn: end scanning at (@end_rgn - 1)
532 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
533 */
memblock_merge_regions(struct memblock_type * type,unsigned long start_rgn,unsigned long end_rgn)534 static void __init_memblock memblock_merge_regions(struct memblock_type *type,
535 unsigned long start_rgn,
536 unsigned long end_rgn)
537 {
538 int i = 0;
539 if (start_rgn)
540 i = start_rgn - 1;
541 end_rgn = min(end_rgn, type->cnt - 1);
542 while (i < end_rgn) {
543 struct memblock_region *this = &type->regions[i];
544 struct memblock_region *next = &type->regions[i + 1];
545
546 if (this->base + this->size != next->base ||
547 memblock_get_region_node(this) !=
548 memblock_get_region_node(next) ||
549 this->flags != next->flags) {
550 BUG_ON(this->base + this->size > next->base);
551 i++;
552 continue;
553 }
554
555 this->size += next->size;
556 /* move forward from next + 1, index of which is i + 2 */
557 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
558 type->cnt--;
559 end_rgn--;
560 }
561 }
562
563 /**
564 * memblock_insert_region - insert new memblock region
565 * @type: memblock type to insert into
566 * @idx: index for the insertion point
567 * @base: base address of the new region
568 * @size: size of the new region
569 * @nid: node id of the new region
570 * @flags: flags of the new region
571 *
572 * Insert new memblock region [@base, @base + @size) into @type at @idx.
573 * @type must already have extra room to accommodate the new region.
574 */
memblock_insert_region(struct memblock_type * type,int idx,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)575 static void __init_memblock memblock_insert_region(struct memblock_type *type,
576 int idx, phys_addr_t base,
577 phys_addr_t size,
578 int nid,
579 enum memblock_flags flags)
580 {
581 struct memblock_region *rgn = &type->regions[idx];
582
583 BUG_ON(type->cnt >= type->max);
584 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
585 rgn->base = base;
586 rgn->size = size;
587 rgn->flags = flags;
588 memblock_set_region_node(rgn, nid);
589 type->cnt++;
590 type->total_size += size;
591 }
592
593 /**
594 * memblock_add_range - add new memblock region
595 * @type: memblock type to add new region into
596 * @base: base address of the new region
597 * @size: size of the new region
598 * @nid: nid of the new region
599 * @flags: flags of the new region
600 *
601 * Add new memblock region [@base, @base + @size) into @type. The new region
602 * is allowed to overlap with existing ones - overlaps don't affect already
603 * existing regions. @type is guaranteed to be minimal (all neighbouring
604 * compatible regions are merged) after the addition.
605 *
606 * Return:
607 * 0 on success, -errno on failure.
608 */
memblock_add_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)609 static int __init_memblock memblock_add_range(struct memblock_type *type,
610 phys_addr_t base, phys_addr_t size,
611 int nid, enum memblock_flags flags)
612 {
613 bool insert = false;
614 phys_addr_t obase = base;
615 phys_addr_t end = base + memblock_cap_size(base, &size);
616 int idx, nr_new, start_rgn = -1, end_rgn;
617 struct memblock_region *rgn;
618
619 if (!size)
620 return 0;
621
622 /* special case for empty array */
623 if (type->regions[0].size == 0) {
624 WARN_ON(type->cnt != 0 || type->total_size);
625 type->regions[0].base = base;
626 type->regions[0].size = size;
627 type->regions[0].flags = flags;
628 memblock_set_region_node(&type->regions[0], nid);
629 type->total_size = size;
630 type->cnt = 1;
631 return 0;
632 }
633
634 /*
635 * The worst case is when new range overlaps all existing regions,
636 * then we'll need type->cnt + 1 empty regions in @type. So if
637 * type->cnt * 2 + 1 is less than or equal to type->max, we know
638 * that there is enough empty regions in @type, and we can insert
639 * regions directly.
640 */
641 if (type->cnt * 2 + 1 <= type->max)
642 insert = true;
643
644 repeat:
645 /*
646 * The following is executed twice. Once with %false @insert and
647 * then with %true. The first counts the number of regions needed
648 * to accommodate the new area. The second actually inserts them.
649 */
650 base = obase;
651 nr_new = 0;
652
653 for_each_memblock_type(idx, type, rgn) {
654 phys_addr_t rbase = rgn->base;
655 phys_addr_t rend = rbase + rgn->size;
656
657 if (rbase >= end)
658 break;
659 if (rend <= base)
660 continue;
661 /*
662 * @rgn overlaps. If it separates the lower part of new
663 * area, insert that portion.
664 */
665 if (rbase > base) {
666 #ifdef CONFIG_NUMA
667 WARN_ON(nid != memblock_get_region_node(rgn));
668 #endif
669 WARN_ON(flags != MEMBLOCK_NONE && flags != rgn->flags);
670 nr_new++;
671 if (insert) {
672 if (start_rgn == -1)
673 start_rgn = idx;
674 end_rgn = idx + 1;
675 memblock_insert_region(type, idx++, base,
676 rbase - base, nid,
677 flags);
678 }
679 }
680 /* area below @rend is dealt with, forget about it */
681 base = min(rend, end);
682 }
683
684 /* insert the remaining portion */
685 if (base < end) {
686 nr_new++;
687 if (insert) {
688 if (start_rgn == -1)
689 start_rgn = idx;
690 end_rgn = idx + 1;
691 memblock_insert_region(type, idx, base, end - base,
692 nid, flags);
693 }
694 }
695
696 if (!nr_new)
697 return 0;
698
699 /*
700 * If this was the first round, resize array and repeat for actual
701 * insertions; otherwise, merge and return.
702 */
703 if (!insert) {
704 while (type->cnt + nr_new > type->max)
705 if (memblock_double_array(type, obase, size) < 0)
706 return -ENOMEM;
707 insert = true;
708 goto repeat;
709 } else {
710 memblock_merge_regions(type, start_rgn, end_rgn);
711 return 0;
712 }
713 }
714
715 /**
716 * memblock_add_node - add new memblock region within a NUMA node
717 * @base: base address of the new region
718 * @size: size of the new region
719 * @nid: nid of the new region
720 * @flags: flags of the new region
721 *
722 * Add new memblock region [@base, @base + @size) to the "memory"
723 * type. See memblock_add_range() description for mode details
724 *
725 * Return:
726 * 0 on success, -errno on failure.
727 */
memblock_add_node(phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)728 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
729 int nid, enum memblock_flags flags)
730 {
731 phys_addr_t end = base + size - 1;
732
733 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
734 &base, &end, nid, flags, (void *)_RET_IP_);
735
736 return memblock_add_range(&memblock.memory, base, size, nid, flags);
737 }
738
739 /**
740 * memblock_add - add new memblock region
741 * @base: base address of the new region
742 * @size: size of the new region
743 *
744 * Add new memblock region [@base, @base + @size) to the "memory"
745 * type. See memblock_add_range() description for mode details
746 *
747 * Return:
748 * 0 on success, -errno on failure.
749 */
memblock_add(phys_addr_t base,phys_addr_t size)750 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
751 {
752 phys_addr_t end = base + size - 1;
753
754 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
755 &base, &end, (void *)_RET_IP_);
756
757 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
758 }
759
760 /**
761 * memblock_validate_numa_coverage - check if amount of memory with
762 * no node ID assigned is less than a threshold
763 * @threshold_bytes: maximal memory size that can have unassigned node
764 * ID (in bytes).
765 *
766 * A buggy firmware may report memory that does not belong to any node.
767 * Check if amount of such memory is below @threshold_bytes.
768 *
769 * Return: true on success, false on failure.
770 */
memblock_validate_numa_coverage(unsigned long threshold_bytes)771 bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
772 {
773 unsigned long nr_pages = 0;
774 unsigned long start_pfn, end_pfn, mem_size_mb;
775 int nid, i;
776
777 /* calculate lost page */
778 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
779 if (!numa_valid_node(nid))
780 nr_pages += end_pfn - start_pfn;
781 }
782
783 if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
784 mem_size_mb = memblock_phys_mem_size() / SZ_1M;
785 pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
786 (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
787 return false;
788 }
789
790 return true;
791 }
792
793
794 /**
795 * memblock_isolate_range - isolate given range into disjoint memblocks
796 * @type: memblock type to isolate range for
797 * @base: base of range to isolate
798 * @size: size of range to isolate
799 * @start_rgn: out parameter for the start of isolated region
800 * @end_rgn: out parameter for the end of isolated region
801 *
802 * Walk @type and ensure that regions don't cross the boundaries defined by
803 * [@base, @base + @size). Crossing regions are split at the boundaries,
804 * which may create at most two more regions. The index of the first
805 * region inside the range is returned in *@start_rgn and the index of the
806 * first region after the range is returned in *@end_rgn.
807 *
808 * Return:
809 * 0 on success, -errno on failure.
810 */
memblock_isolate_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int * start_rgn,int * end_rgn)811 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
812 phys_addr_t base, phys_addr_t size,
813 int *start_rgn, int *end_rgn)
814 {
815 phys_addr_t end = base + memblock_cap_size(base, &size);
816 int idx;
817 struct memblock_region *rgn;
818
819 *start_rgn = *end_rgn = 0;
820
821 if (!size)
822 return 0;
823
824 /* we'll create at most two more regions */
825 while (type->cnt + 2 > type->max)
826 if (memblock_double_array(type, base, size) < 0)
827 return -ENOMEM;
828
829 for_each_memblock_type(idx, type, rgn) {
830 phys_addr_t rbase = rgn->base;
831 phys_addr_t rend = rbase + rgn->size;
832
833 if (rbase >= end)
834 break;
835 if (rend <= base)
836 continue;
837
838 if (rbase < base) {
839 /*
840 * @rgn intersects from below. Split and continue
841 * to process the next region - the new top half.
842 */
843 rgn->base = base;
844 rgn->size -= base - rbase;
845 type->total_size -= base - rbase;
846 memblock_insert_region(type, idx, rbase, base - rbase,
847 memblock_get_region_node(rgn),
848 rgn->flags);
849 } else if (rend > end) {
850 /*
851 * @rgn intersects from above. Split and redo the
852 * current region - the new bottom half.
853 */
854 rgn->base = end;
855 rgn->size -= end - rbase;
856 type->total_size -= end - rbase;
857 memblock_insert_region(type, idx--, rbase, end - rbase,
858 memblock_get_region_node(rgn),
859 rgn->flags);
860 } else {
861 /* @rgn is fully contained, record it */
862 if (!*end_rgn)
863 *start_rgn = idx;
864 *end_rgn = idx + 1;
865 }
866 }
867
868 return 0;
869 }
870
memblock_remove_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size)871 static int __init_memblock memblock_remove_range(struct memblock_type *type,
872 phys_addr_t base, phys_addr_t size)
873 {
874 int start_rgn, end_rgn;
875 int i, ret;
876
877 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
878 if (ret)
879 return ret;
880
881 for (i = end_rgn - 1; i >= start_rgn; i--)
882 memblock_remove_region(type, i);
883 return 0;
884 }
885
memblock_remove(phys_addr_t base,phys_addr_t size)886 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
887 {
888 phys_addr_t end = base + size - 1;
889
890 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
891 &base, &end, (void *)_RET_IP_);
892
893 return memblock_remove_range(&memblock.memory, base, size);
894 }
895
896 /**
897 * memblock_free - free boot memory allocation
898 * @ptr: starting address of the boot memory allocation
899 * @size: size of the boot memory block in bytes
900 *
901 * Free boot memory block previously allocated by memblock_alloc_xx() API.
902 * The freeing memory will not be released to the buddy allocator.
903 */
memblock_free(void * ptr,size_t size)904 void __init_memblock memblock_free(void *ptr, size_t size)
905 {
906 if (ptr)
907 memblock_phys_free(__pa(ptr), size);
908 }
909
910 /**
911 * memblock_phys_free - free boot memory block
912 * @base: phys starting address of the boot memory block
913 * @size: size of the boot memory block in bytes
914 *
915 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
916 * The freeing memory will not be released to the buddy allocator.
917 */
memblock_phys_free(phys_addr_t base,phys_addr_t size)918 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
919 {
920 phys_addr_t end = base + size - 1;
921
922 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
923 &base, &end, (void *)_RET_IP_);
924
925 kmemleak_free_part_phys(base, size);
926 return memblock_remove_range(&memblock.reserved, base, size);
927 }
928
__memblock_reserve(phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)929 int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
930 int nid, enum memblock_flags flags)
931 {
932 phys_addr_t end = base + size - 1;
933
934 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
935 &base, &end, nid, flags, (void *)_RET_IP_);
936
937 return memblock_add_range(&memblock.reserved, base, size, nid, flags);
938 }
939
940 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
memblock_physmem_add(phys_addr_t base,phys_addr_t size)941 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
942 {
943 phys_addr_t end = base + size - 1;
944
945 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
946 &base, &end, (void *)_RET_IP_);
947
948 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
949 }
950 #endif
951
952 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
memblock_set_kho_scratch_only(void)953 __init void memblock_set_kho_scratch_only(void)
954 {
955 kho_scratch_only = true;
956 }
957
memblock_clear_kho_scratch_only(void)958 __init void memblock_clear_kho_scratch_only(void)
959 {
960 kho_scratch_only = false;
961 }
962
memmap_init_kho_scratch_pages(void)963 __init void memmap_init_kho_scratch_pages(void)
964 {
965 phys_addr_t start, end;
966 unsigned long pfn;
967 int nid;
968 u64 i;
969
970 if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT))
971 return;
972
973 /*
974 * Initialize struct pages for free scratch memory.
975 * The struct pages for reserved scratch memory will be set up in
976 * reserve_bootmem_region()
977 */
978 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
979 MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
980 for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
981 init_deferred_page(pfn, nid);
982 }
983 }
984 #endif
985
986 /**
987 * memblock_setclr_flag - set or clear flag for a memory region
988 * @type: memblock type to set/clear flag for
989 * @base: base address of the region
990 * @size: size of the region
991 * @set: set or clear the flag
992 * @flag: the flag to update
993 *
994 * This function isolates region [@base, @base + @size), and sets/clears flag
995 *
996 * Return: 0 on success, -errno on failure.
997 */
memblock_setclr_flag(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int set,int flag)998 static int __init_memblock memblock_setclr_flag(struct memblock_type *type,
999 phys_addr_t base, phys_addr_t size, int set, int flag)
1000 {
1001 int i, ret, start_rgn, end_rgn;
1002
1003 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1004 if (ret)
1005 return ret;
1006
1007 for (i = start_rgn; i < end_rgn; i++) {
1008 struct memblock_region *r = &type->regions[i];
1009
1010 if (set)
1011 r->flags |= flag;
1012 else
1013 r->flags &= ~flag;
1014 }
1015
1016 memblock_merge_regions(type, start_rgn, end_rgn);
1017 return 0;
1018 }
1019
1020 /**
1021 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
1022 * @base: the base phys addr of the region
1023 * @size: the size of the region
1024 *
1025 * Return: 0 on success, -errno on failure.
1026 */
memblock_mark_hotplug(phys_addr_t base,phys_addr_t size)1027 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
1028 {
1029 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG);
1030 }
1031
1032 /**
1033 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
1034 * @base: the base phys addr of the region
1035 * @size: the size of the region
1036 *
1037 * Return: 0 on success, -errno on failure.
1038 */
memblock_clear_hotplug(phys_addr_t base,phys_addr_t size)1039 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
1040 {
1041 return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_HOTPLUG);
1042 }
1043
1044 /**
1045 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
1046 * @base: the base phys addr of the region
1047 * @size: the size of the region
1048 *
1049 * Return: 0 on success, -errno on failure.
1050 */
memblock_mark_mirror(phys_addr_t base,phys_addr_t size)1051 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
1052 {
1053 if (!mirrored_kernelcore)
1054 return 0;
1055
1056 system_has_some_mirror = true;
1057
1058 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_MIRROR);
1059 }
1060
1061 /**
1062 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1063 * @base: the base phys addr of the region
1064 * @size: the size of the region
1065 *
1066 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1067 * direct mapping of the physical memory. These regions will still be
1068 * covered by the memory map. The struct page representing NOMAP memory
1069 * frames in the memory map will be PageReserved()
1070 *
1071 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1072 * memblock, the caller must inform kmemleak to ignore that memory
1073 *
1074 * Return: 0 on success, -errno on failure.
1075 */
memblock_mark_nomap(phys_addr_t base,phys_addr_t size)1076 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
1077 {
1078 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_NOMAP);
1079 }
1080
1081 /**
1082 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1083 * @base: the base phys addr of the region
1084 * @size: the size of the region
1085 *
1086 * Return: 0 on success, -errno on failure.
1087 */
memblock_clear_nomap(phys_addr_t base,phys_addr_t size)1088 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
1089 {
1090 return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_NOMAP);
1091 }
1092
1093 /**
1094 * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1095 * MEMBLOCK_RSRV_NOINIT
1096 *
1097 * @base: the base phys addr of the region
1098 * @size: the size of the region
1099 *
1100 * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
1101 * not be fully initialized to allow the caller optimize their initialization.
1102 *
1103 * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
1104 * completely bypasses the initialization of struct pages for such region.
1105 *
1106 * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
1107 * region will be initialized with default values but won't be marked as
1108 * reserved.
1109 *
1110 * Return: 0 on success, -errno on failure.
1111 */
memblock_reserved_mark_noinit(phys_addr_t base,phys_addr_t size)1112 int __init_memblock memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size)
1113 {
1114 return memblock_setclr_flag(&memblock.reserved, base, size, 1,
1115 MEMBLOCK_RSRV_NOINIT);
1116 }
1117
1118 /**
1119 * memblock_mark_kho_scratch - Mark a memory region as MEMBLOCK_KHO_SCRATCH.
1120 * @base: the base phys addr of the region
1121 * @size: the size of the region
1122 *
1123 * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered
1124 * for allocations during early boot with kexec handover.
1125 *
1126 * Return: 0 on success, -errno on failure.
1127 */
memblock_mark_kho_scratch(phys_addr_t base,phys_addr_t size)1128 __init int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size)
1129 {
1130 return memblock_setclr_flag(&memblock.memory, base, size, 1,
1131 MEMBLOCK_KHO_SCRATCH);
1132 }
1133
1134 /**
1135 * memblock_clear_kho_scratch - Clear MEMBLOCK_KHO_SCRATCH flag for a
1136 * specified region.
1137 * @base: the base phys addr of the region
1138 * @size: the size of the region
1139 *
1140 * Return: 0 on success, -errno on failure.
1141 */
memblock_clear_kho_scratch(phys_addr_t base,phys_addr_t size)1142 __init int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size)
1143 {
1144 return memblock_setclr_flag(&memblock.memory, base, size, 0,
1145 MEMBLOCK_KHO_SCRATCH);
1146 }
1147
should_skip_region(struct memblock_type * type,struct memblock_region * m,int nid,int flags)1148 static bool should_skip_region(struct memblock_type *type,
1149 struct memblock_region *m,
1150 int nid, int flags)
1151 {
1152 int m_nid = memblock_get_region_node(m);
1153
1154 /* we never skip regions when iterating memblock.reserved or physmem */
1155 if (type != memblock_memory)
1156 return false;
1157
1158 /* only memory regions are associated with nodes, check it */
1159 if (numa_valid_node(nid) && nid != m_nid)
1160 return true;
1161
1162 /* skip hotpluggable memory regions if needed */
1163 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
1164 !(flags & MEMBLOCK_HOTPLUG))
1165 return true;
1166
1167 /* if we want mirror memory skip non-mirror memory regions */
1168 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1169 return true;
1170
1171 /* skip nomap memory unless we were asked for it explicitly */
1172 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1173 return true;
1174
1175 /* skip driver-managed memory unless we were asked for it explicitly */
1176 if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1177 return true;
1178
1179 /*
1180 * In early alloc during kexec handover, we can only consider
1181 * MEMBLOCK_KHO_SCRATCH regions for the allocations
1182 */
1183 if ((flags & MEMBLOCK_KHO_SCRATCH) && !memblock_is_kho_scratch(m))
1184 return true;
1185
1186 return false;
1187 }
1188
1189 /**
1190 * __next_mem_range - next function for for_each_free_mem_range() etc.
1191 * @idx: pointer to u64 loop variable
1192 * @nid: node selector, %NUMA_NO_NODE for all nodes
1193 * @flags: pick from blocks based on memory attributes
1194 * @type_a: pointer to memblock_type from where the range is taken
1195 * @type_b: pointer to memblock_type which excludes memory from being taken
1196 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1197 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1198 * @out_nid: ptr to int for nid of the range, can be %NULL
1199 *
1200 * Find the first area from *@idx which matches @nid, fill the out
1201 * parameters, and update *@idx for the next iteration. The lower 32bit of
1202 * *@idx contains index into type_a and the upper 32bit indexes the
1203 * areas before each region in type_b. For example, if type_b regions
1204 * look like the following,
1205 *
1206 * 0:[0-16), 1:[32-48), 2:[128-130)
1207 *
1208 * The upper 32bit indexes the following regions.
1209 *
1210 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1211 *
1212 * As both region arrays are sorted, the function advances the two indices
1213 * in lockstep and returns each intersection.
1214 */
__next_mem_range(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)1215 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1216 struct memblock_type *type_a,
1217 struct memblock_type *type_b, phys_addr_t *out_start,
1218 phys_addr_t *out_end, int *out_nid)
1219 {
1220 int idx_a = *idx & 0xffffffff;
1221 int idx_b = *idx >> 32;
1222
1223 for (; idx_a < type_a->cnt; idx_a++) {
1224 struct memblock_region *m = &type_a->regions[idx_a];
1225
1226 phys_addr_t m_start = m->base;
1227 phys_addr_t m_end = m->base + m->size;
1228 int m_nid = memblock_get_region_node(m);
1229
1230 if (should_skip_region(type_a, m, nid, flags))
1231 continue;
1232
1233 if (!type_b) {
1234 if (out_start)
1235 *out_start = m_start;
1236 if (out_end)
1237 *out_end = m_end;
1238 if (out_nid)
1239 *out_nid = m_nid;
1240 idx_a++;
1241 *idx = (u32)idx_a | (u64)idx_b << 32;
1242 return;
1243 }
1244
1245 /* scan areas before each reservation */
1246 for (; idx_b < type_b->cnt + 1; idx_b++) {
1247 struct memblock_region *r;
1248 phys_addr_t r_start;
1249 phys_addr_t r_end;
1250
1251 r = &type_b->regions[idx_b];
1252 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1253 r_end = idx_b < type_b->cnt ?
1254 r->base : PHYS_ADDR_MAX;
1255
1256 /*
1257 * if idx_b advanced past idx_a,
1258 * break out to advance idx_a
1259 */
1260 if (r_start >= m_end)
1261 break;
1262 /* if the two regions intersect, we're done */
1263 if (m_start < r_end) {
1264 if (out_start)
1265 *out_start =
1266 max(m_start, r_start);
1267 if (out_end)
1268 *out_end = min(m_end, r_end);
1269 if (out_nid)
1270 *out_nid = m_nid;
1271 /*
1272 * The region which ends first is
1273 * advanced for the next iteration.
1274 */
1275 if (m_end <= r_end)
1276 idx_a++;
1277 else
1278 idx_b++;
1279 *idx = (u32)idx_a | (u64)idx_b << 32;
1280 return;
1281 }
1282 }
1283 }
1284
1285 /* signal end of iteration */
1286 *idx = ULLONG_MAX;
1287 }
1288
1289 /**
1290 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1291 *
1292 * @idx: pointer to u64 loop variable
1293 * @nid: node selector, %NUMA_NO_NODE for all nodes
1294 * @flags: pick from blocks based on memory attributes
1295 * @type_a: pointer to memblock_type from where the range is taken
1296 * @type_b: pointer to memblock_type which excludes memory from being taken
1297 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1298 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1299 * @out_nid: ptr to int for nid of the range, can be %NULL
1300 *
1301 * Finds the next range from type_a which is not marked as unsuitable
1302 * in type_b.
1303 *
1304 * Reverse of __next_mem_range().
1305 */
__next_mem_range_rev(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)1306 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1307 enum memblock_flags flags,
1308 struct memblock_type *type_a,
1309 struct memblock_type *type_b,
1310 phys_addr_t *out_start,
1311 phys_addr_t *out_end, int *out_nid)
1312 {
1313 int idx_a = *idx & 0xffffffff;
1314 int idx_b = *idx >> 32;
1315
1316 if (*idx == (u64)ULLONG_MAX) {
1317 idx_a = type_a->cnt - 1;
1318 if (type_b != NULL)
1319 idx_b = type_b->cnt;
1320 else
1321 idx_b = 0;
1322 }
1323
1324 for (; idx_a >= 0; idx_a--) {
1325 struct memblock_region *m = &type_a->regions[idx_a];
1326
1327 phys_addr_t m_start = m->base;
1328 phys_addr_t m_end = m->base + m->size;
1329 int m_nid = memblock_get_region_node(m);
1330
1331 if (should_skip_region(type_a, m, nid, flags))
1332 continue;
1333
1334 if (!type_b) {
1335 if (out_start)
1336 *out_start = m_start;
1337 if (out_end)
1338 *out_end = m_end;
1339 if (out_nid)
1340 *out_nid = m_nid;
1341 idx_a--;
1342 *idx = (u32)idx_a | (u64)idx_b << 32;
1343 return;
1344 }
1345
1346 /* scan areas before each reservation */
1347 for (; idx_b >= 0; idx_b--) {
1348 struct memblock_region *r;
1349 phys_addr_t r_start;
1350 phys_addr_t r_end;
1351
1352 r = &type_b->regions[idx_b];
1353 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1354 r_end = idx_b < type_b->cnt ?
1355 r->base : PHYS_ADDR_MAX;
1356 /*
1357 * if idx_b advanced past idx_a,
1358 * break out to advance idx_a
1359 */
1360
1361 if (r_end <= m_start)
1362 break;
1363 /* if the two regions intersect, we're done */
1364 if (m_end > r_start) {
1365 if (out_start)
1366 *out_start = max(m_start, r_start);
1367 if (out_end)
1368 *out_end = min(m_end, r_end);
1369 if (out_nid)
1370 *out_nid = m_nid;
1371 if (m_start >= r_start)
1372 idx_a--;
1373 else
1374 idx_b--;
1375 *idx = (u32)idx_a | (u64)idx_b << 32;
1376 return;
1377 }
1378 }
1379 }
1380 /* signal end of iteration */
1381 *idx = ULLONG_MAX;
1382 }
1383
1384 /*
1385 * Common iterator interface used to define for_each_mem_pfn_range().
1386 */
__next_mem_pfn_range(int * idx,int nid,unsigned long * out_start_pfn,unsigned long * out_end_pfn,int * out_nid)1387 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1388 unsigned long *out_start_pfn,
1389 unsigned long *out_end_pfn, int *out_nid)
1390 {
1391 struct memblock_type *type = &memblock.memory;
1392 struct memblock_region *r;
1393 int r_nid;
1394
1395 while (++*idx < type->cnt) {
1396 r = &type->regions[*idx];
1397 r_nid = memblock_get_region_node(r);
1398
1399 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1400 continue;
1401 if (!numa_valid_node(nid) || nid == r_nid)
1402 break;
1403 }
1404 if (*idx >= type->cnt) {
1405 *idx = -1;
1406 return;
1407 }
1408
1409 if (out_start_pfn)
1410 *out_start_pfn = PFN_UP(r->base);
1411 if (out_end_pfn)
1412 *out_end_pfn = PFN_DOWN(r->base + r->size);
1413 if (out_nid)
1414 *out_nid = r_nid;
1415 }
1416
1417 /**
1418 * memblock_set_node - set node ID on memblock regions
1419 * @base: base of area to set node ID for
1420 * @size: size of area to set node ID for
1421 * @type: memblock type to set node ID for
1422 * @nid: node ID to set
1423 *
1424 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1425 * Regions which cross the area boundaries are split as necessary.
1426 *
1427 * Return:
1428 * 0 on success, -errno on failure.
1429 */
memblock_set_node(phys_addr_t base,phys_addr_t size,struct memblock_type * type,int nid)1430 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1431 struct memblock_type *type, int nid)
1432 {
1433 #ifdef CONFIG_NUMA
1434 int start_rgn, end_rgn;
1435 int i, ret;
1436
1437 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1438 if (ret)
1439 return ret;
1440
1441 for (i = start_rgn; i < end_rgn; i++)
1442 memblock_set_region_node(&type->regions[i], nid);
1443
1444 memblock_merge_regions(type, start_rgn, end_rgn);
1445 #endif
1446 return 0;
1447 }
1448
1449 /**
1450 * memblock_alloc_range_nid - allocate boot memory block
1451 * @size: size of memory block to be allocated in bytes
1452 * @align: alignment of the region and block's size
1453 * @start: the lower bound of the memory region to allocate (phys address)
1454 * @end: the upper bound of the memory region to allocate (phys address)
1455 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1456 * @exact_nid: control the allocation fall back to other nodes
1457 *
1458 * The allocation is performed from memory region limited by
1459 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1460 *
1461 * If the specified node can not hold the requested memory and @exact_nid
1462 * is false, the allocation falls back to any node in the system.
1463 *
1464 * For systems with memory mirroring, the allocation is attempted first
1465 * from the regions with mirroring enabled and then retried from any
1466 * memory region.
1467 *
1468 * In addition, function using kmemleak_alloc_phys for allocated boot
1469 * memory block, it is never reported as leaks.
1470 *
1471 * Return:
1472 * Physical address of allocated memory block on success, %0 on failure.
1473 */
memblock_alloc_range_nid(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,bool exact_nid)1474 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1475 phys_addr_t align, phys_addr_t start,
1476 phys_addr_t end, int nid,
1477 bool exact_nid)
1478 {
1479 enum memblock_flags flags = choose_memblock_flags();
1480 phys_addr_t found;
1481
1482 /*
1483 * Detect any accidental use of these APIs after slab is ready, as at
1484 * this moment memblock may be deinitialized already and its
1485 * internal data may be destroyed (after execution of memblock_free_all)
1486 */
1487 if (WARN_ON_ONCE(slab_is_available())) {
1488 void *vaddr = kzalloc_node(size, GFP_NOWAIT, nid);
1489
1490 return vaddr ? virt_to_phys(vaddr) : 0;
1491 }
1492
1493 if (!align) {
1494 /* Can't use WARNs this early in boot on powerpc */
1495 dump_stack();
1496 align = SMP_CACHE_BYTES;
1497 }
1498
1499 again:
1500 found = memblock_find_in_range_node(size, align, start, end, nid,
1501 flags);
1502 if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN))
1503 goto done;
1504
1505 if (numa_valid_node(nid) && !exact_nid) {
1506 found = memblock_find_in_range_node(size, align, start,
1507 end, NUMA_NO_NODE,
1508 flags);
1509 if (found && !memblock_reserve_kern(found, size))
1510 goto done;
1511 }
1512
1513 if (flags & MEMBLOCK_MIRROR) {
1514 flags &= ~MEMBLOCK_MIRROR;
1515 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1516 &size);
1517 goto again;
1518 }
1519
1520 return 0;
1521
1522 done:
1523 /*
1524 * Skip kmemleak for those places like kasan_init() and
1525 * early_pgtable_alloc() due to high volume.
1526 */
1527 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1528 /*
1529 * Memblock allocated blocks are never reported as
1530 * leaks. This is because many of these blocks are
1531 * only referred via the physical address which is
1532 * not looked up by kmemleak.
1533 */
1534 kmemleak_alloc_phys(found, size, 0);
1535
1536 /*
1537 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1538 * require memory to be accepted before it can be used by the
1539 * guest.
1540 *
1541 * Accept the memory of the allocated buffer.
1542 */
1543 accept_memory(found, size);
1544
1545 return found;
1546 }
1547
1548 /**
1549 * memblock_phys_alloc_range - allocate a memory block inside specified range
1550 * @size: size of memory block to be allocated in bytes
1551 * @align: alignment of the region and block's size
1552 * @start: the lower bound of the memory region to allocate (physical address)
1553 * @end: the upper bound of the memory region to allocate (physical address)
1554 *
1555 * Allocate @size bytes in the between @start and @end.
1556 *
1557 * Return: physical address of the allocated memory block on success,
1558 * %0 on failure.
1559 */
memblock_phys_alloc_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end)1560 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1561 phys_addr_t align,
1562 phys_addr_t start,
1563 phys_addr_t end)
1564 {
1565 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1566 __func__, (u64)size, (u64)align, &start, &end,
1567 (void *)_RET_IP_);
1568 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1569 false);
1570 }
1571
1572 /**
1573 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1574 * @size: size of memory block to be allocated in bytes
1575 * @align: alignment of the region and block's size
1576 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1577 *
1578 * Allocates memory block from the specified NUMA node. If the node
1579 * has no available memory, attempts to allocated from any node in the
1580 * system.
1581 *
1582 * Return: physical address of the allocated memory block on success,
1583 * %0 on failure.
1584 */
memblock_phys_alloc_try_nid(phys_addr_t size,phys_addr_t align,int nid)1585 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1586 {
1587 return memblock_alloc_range_nid(size, align, 0,
1588 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1589 }
1590
1591 /**
1592 * memblock_alloc_internal - allocate boot memory block
1593 * @size: size of memory block to be allocated in bytes
1594 * @align: alignment of the region and block's size
1595 * @min_addr: the lower bound of the memory region to allocate (phys address)
1596 * @max_addr: the upper bound of the memory region to allocate (phys address)
1597 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1598 * @exact_nid: control the allocation fall back to other nodes
1599 *
1600 * Allocates memory block using memblock_alloc_range_nid() and
1601 * converts the returned physical address to virtual.
1602 *
1603 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1604 * will fall back to memory below @min_addr. Other constraints, such
1605 * as node and mirrored memory will be handled again in
1606 * memblock_alloc_range_nid().
1607 *
1608 * Return:
1609 * Virtual address of allocated memory block on success, NULL on failure.
1610 */
memblock_alloc_internal(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid,bool exact_nid)1611 static void * __init memblock_alloc_internal(
1612 phys_addr_t size, phys_addr_t align,
1613 phys_addr_t min_addr, phys_addr_t max_addr,
1614 int nid, bool exact_nid)
1615 {
1616 phys_addr_t alloc;
1617
1618
1619 if (max_addr > memblock.current_limit)
1620 max_addr = memblock.current_limit;
1621
1622 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1623 exact_nid);
1624
1625 /* retry allocation without lower limit */
1626 if (!alloc && min_addr)
1627 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1628 exact_nid);
1629
1630 if (!alloc)
1631 return NULL;
1632
1633 return phys_to_virt(alloc);
1634 }
1635
1636 /**
1637 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1638 * without zeroing memory
1639 * @size: size of memory block to be allocated in bytes
1640 * @align: alignment of the region and block's size
1641 * @min_addr: the lower bound of the memory region from where the allocation
1642 * is preferred (phys address)
1643 * @max_addr: the upper bound of the memory region from where the allocation
1644 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1645 * allocate only from memory limited by memblock.current_limit value
1646 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1647 *
1648 * Public function, provides additional debug information (including caller
1649 * info), if enabled. Does not zero allocated memory.
1650 *
1651 * Return:
1652 * Virtual address of allocated memory block on success, NULL on failure.
1653 */
memblock_alloc_exact_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1654 void * __init memblock_alloc_exact_nid_raw(
1655 phys_addr_t size, phys_addr_t align,
1656 phys_addr_t min_addr, phys_addr_t max_addr,
1657 int nid)
1658 {
1659 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1660 __func__, (u64)size, (u64)align, nid, &min_addr,
1661 &max_addr, (void *)_RET_IP_);
1662
1663 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1664 true);
1665 }
1666
1667 /**
1668 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1669 * memory and without panicking
1670 * @size: size of memory block to be allocated in bytes
1671 * @align: alignment of the region and block's size
1672 * @min_addr: the lower bound of the memory region from where the allocation
1673 * is preferred (phys address)
1674 * @max_addr: the upper bound of the memory region from where the allocation
1675 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1676 * allocate only from memory limited by memblock.current_limit value
1677 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1678 *
1679 * Public function, provides additional debug information (including caller
1680 * info), if enabled. Does not zero allocated memory, does not panic if request
1681 * cannot be satisfied.
1682 *
1683 * Return:
1684 * Virtual address of allocated memory block on success, NULL on failure.
1685 */
memblock_alloc_try_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1686 void * __init memblock_alloc_try_nid_raw(
1687 phys_addr_t size, phys_addr_t align,
1688 phys_addr_t min_addr, phys_addr_t max_addr,
1689 int nid)
1690 {
1691 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1692 __func__, (u64)size, (u64)align, nid, &min_addr,
1693 &max_addr, (void *)_RET_IP_);
1694
1695 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1696 false);
1697 }
1698
1699 /**
1700 * memblock_alloc_try_nid - allocate boot memory block
1701 * @size: size of memory block to be allocated in bytes
1702 * @align: alignment of the region and block's size
1703 * @min_addr: the lower bound of the memory region from where the allocation
1704 * is preferred (phys address)
1705 * @max_addr: the upper bound of the memory region from where the allocation
1706 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1707 * allocate only from memory limited by memblock.current_limit value
1708 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1709 *
1710 * Public function, provides additional debug information (including caller
1711 * info), if enabled. This function zeroes the allocated memory.
1712 *
1713 * Return:
1714 * Virtual address of allocated memory block on success, NULL on failure.
1715 */
memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1716 void * __init memblock_alloc_try_nid(
1717 phys_addr_t size, phys_addr_t align,
1718 phys_addr_t min_addr, phys_addr_t max_addr,
1719 int nid)
1720 {
1721 void *ptr;
1722
1723 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1724 __func__, (u64)size, (u64)align, nid, &min_addr,
1725 &max_addr, (void *)_RET_IP_);
1726 ptr = memblock_alloc_internal(size, align,
1727 min_addr, max_addr, nid, false);
1728 if (ptr)
1729 memset(ptr, 0, size);
1730
1731 return ptr;
1732 }
1733
1734 /**
1735 * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
1736 * @size: size of memory block to be allocated in bytes
1737 * @align: alignment of the region and block's size
1738 * @func: caller func name
1739 *
1740 * This function attempts to allocate memory using memblock_alloc,
1741 * and in case of failure, it calls panic with the formatted message.
1742 * This function should not be used directly, please use the macro memblock_alloc_or_panic.
1743 */
__memblock_alloc_or_panic(phys_addr_t size,phys_addr_t align,const char * func)1744 void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
1745 const char *func)
1746 {
1747 void *addr = memblock_alloc(size, align);
1748
1749 if (unlikely(!addr))
1750 panic("%s: Failed to allocate %pap bytes\n", func, &size);
1751 return addr;
1752 }
1753
1754 /**
1755 * memblock_free_late - free pages directly to buddy allocator
1756 * @base: phys starting address of the boot memory block
1757 * @size: size of the boot memory block in bytes
1758 *
1759 * This is only useful when the memblock allocator has already been torn
1760 * down, but we are still initializing the system. Pages are released directly
1761 * to the buddy allocator.
1762 */
memblock_free_late(phys_addr_t base,phys_addr_t size)1763 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1764 {
1765 phys_addr_t cursor, end;
1766
1767 end = base + size - 1;
1768 memblock_dbg("%s: [%pa-%pa] %pS\n",
1769 __func__, &base, &end, (void *)_RET_IP_);
1770 kmemleak_free_part_phys(base, size);
1771 cursor = PFN_UP(base);
1772 end = PFN_DOWN(base + size);
1773
1774 for (; cursor < end; cursor++) {
1775 memblock_free_pages(cursor, 0);
1776 totalram_pages_inc();
1777 }
1778 }
1779
1780 /*
1781 * Remaining API functions
1782 */
1783
memblock_phys_mem_size(void)1784 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1785 {
1786 return memblock.memory.total_size;
1787 }
1788
memblock_reserved_size(void)1789 phys_addr_t __init_memblock memblock_reserved_size(void)
1790 {
1791 return memblock.reserved.total_size;
1792 }
1793
memblock_reserved_kern_size(phys_addr_t limit,int nid)1794 phys_addr_t __init_memblock memblock_reserved_kern_size(phys_addr_t limit, int nid)
1795 {
1796 struct memblock_region *r;
1797 phys_addr_t total = 0;
1798
1799 for_each_reserved_mem_region(r) {
1800 phys_addr_t size = r->size;
1801
1802 if (r->base > limit)
1803 break;
1804
1805 if (r->base + r->size > limit)
1806 size = limit - r->base;
1807
1808 if (nid == memblock_get_region_node(r) || !numa_valid_node(nid))
1809 if (r->flags & MEMBLOCK_RSRV_KERN)
1810 total += size;
1811 }
1812
1813 return total;
1814 }
1815
1816 /**
1817 * memblock_estimated_nr_free_pages - return estimated number of free pages
1818 * from memblock point of view
1819 *
1820 * During bootup, subsystems might need a rough estimate of the number of free
1821 * pages in the whole system, before precise numbers are available from the
1822 * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
1823 * obtained from the buddy might be very imprecise during bootup.
1824 *
1825 * Return:
1826 * An estimated number of free pages from memblock point of view.
1827 */
memblock_estimated_nr_free_pages(void)1828 unsigned long __init memblock_estimated_nr_free_pages(void)
1829 {
1830 return PHYS_PFN(memblock_phys_mem_size() -
1831 memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, NUMA_NO_NODE));
1832 }
1833
1834 /* lowest address */
memblock_start_of_DRAM(void)1835 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1836 {
1837 return memblock.memory.regions[0].base;
1838 }
1839
memblock_end_of_DRAM(void)1840 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1841 {
1842 int idx = memblock.memory.cnt - 1;
1843
1844 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1845 }
1846
__find_max_addr(phys_addr_t limit)1847 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1848 {
1849 phys_addr_t max_addr = PHYS_ADDR_MAX;
1850 struct memblock_region *r;
1851
1852 /*
1853 * translate the memory @limit size into the max address within one of
1854 * the memory memblock regions, if the @limit exceeds the total size
1855 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1856 */
1857 for_each_mem_region(r) {
1858 if (limit <= r->size) {
1859 max_addr = r->base + limit;
1860 break;
1861 }
1862 limit -= r->size;
1863 }
1864
1865 return max_addr;
1866 }
1867
memblock_enforce_memory_limit(phys_addr_t limit)1868 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1869 {
1870 phys_addr_t max_addr;
1871
1872 if (!limit)
1873 return;
1874
1875 max_addr = __find_max_addr(limit);
1876
1877 /* @limit exceeds the total size of the memory, do nothing */
1878 if (max_addr == PHYS_ADDR_MAX)
1879 return;
1880
1881 /* truncate both memory and reserved regions */
1882 memblock_remove_range(&memblock.memory, max_addr,
1883 PHYS_ADDR_MAX);
1884 memblock_remove_range(&memblock.reserved, max_addr,
1885 PHYS_ADDR_MAX);
1886 }
1887
memblock_cap_memory_range(phys_addr_t base,phys_addr_t size)1888 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1889 {
1890 int start_rgn, end_rgn;
1891 int i, ret;
1892
1893 if (!size)
1894 return;
1895
1896 if (!memblock_memory->total_size) {
1897 pr_warn("%s: No memory registered yet\n", __func__);
1898 return;
1899 }
1900
1901 ret = memblock_isolate_range(&memblock.memory, base, size,
1902 &start_rgn, &end_rgn);
1903 if (ret)
1904 return;
1905
1906 /* remove all the MAP regions */
1907 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1908 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1909 memblock_remove_region(&memblock.memory, i);
1910
1911 for (i = start_rgn - 1; i >= 0; i--)
1912 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1913 memblock_remove_region(&memblock.memory, i);
1914
1915 /* truncate the reserved regions */
1916 memblock_remove_range(&memblock.reserved, 0, base);
1917 memblock_remove_range(&memblock.reserved,
1918 base + size, PHYS_ADDR_MAX);
1919 }
1920
memblock_mem_limit_remove_map(phys_addr_t limit)1921 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1922 {
1923 phys_addr_t max_addr;
1924
1925 if (!limit)
1926 return;
1927
1928 max_addr = __find_max_addr(limit);
1929
1930 /* @limit exceeds the total size of the memory, do nothing */
1931 if (max_addr == PHYS_ADDR_MAX)
1932 return;
1933
1934 memblock_cap_memory_range(0, max_addr);
1935 }
1936
memblock_search(struct memblock_type * type,phys_addr_t addr)1937 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1938 {
1939 unsigned int left = 0, right = type->cnt;
1940
1941 do {
1942 unsigned int mid = (right + left) / 2;
1943
1944 if (addr < type->regions[mid].base)
1945 right = mid;
1946 else if (addr >= (type->regions[mid].base +
1947 type->regions[mid].size))
1948 left = mid + 1;
1949 else
1950 return mid;
1951 } while (left < right);
1952 return -1;
1953 }
1954
memblock_is_reserved(phys_addr_t addr)1955 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1956 {
1957 return memblock_search(&memblock.reserved, addr) != -1;
1958 }
1959
memblock_is_memory(phys_addr_t addr)1960 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1961 {
1962 return memblock_search(&memblock.memory, addr) != -1;
1963 }
1964
memblock_is_map_memory(phys_addr_t addr)1965 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1966 {
1967 int i = memblock_search(&memblock.memory, addr);
1968
1969 if (i == -1)
1970 return false;
1971 return !memblock_is_nomap(&memblock.memory.regions[i]);
1972 }
1973
memblock_search_pfn_nid(unsigned long pfn,unsigned long * start_pfn,unsigned long * end_pfn)1974 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1975 unsigned long *start_pfn, unsigned long *end_pfn)
1976 {
1977 struct memblock_type *type = &memblock.memory;
1978 int mid = memblock_search(type, PFN_PHYS(pfn));
1979
1980 if (mid == -1)
1981 return NUMA_NO_NODE;
1982
1983 *start_pfn = PFN_DOWN(type->regions[mid].base);
1984 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1985
1986 return memblock_get_region_node(&type->regions[mid]);
1987 }
1988
1989 /**
1990 * memblock_is_region_memory - check if a region is a subset of memory
1991 * @base: base of region to check
1992 * @size: size of region to check
1993 *
1994 * Check if the region [@base, @base + @size) is a subset of a memory block.
1995 *
1996 * Return:
1997 * 0 if false, non-zero if true
1998 */
memblock_is_region_memory(phys_addr_t base,phys_addr_t size)1999 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
2000 {
2001 int idx = memblock_search(&memblock.memory, base);
2002 phys_addr_t end = base + memblock_cap_size(base, &size);
2003
2004 if (idx == -1)
2005 return false;
2006 return (memblock.memory.regions[idx].base +
2007 memblock.memory.regions[idx].size) >= end;
2008 }
2009
2010 /**
2011 * memblock_is_region_reserved - check if a region intersects reserved memory
2012 * @base: base of region to check
2013 * @size: size of region to check
2014 *
2015 * Check if the region [@base, @base + @size) intersects a reserved
2016 * memory block.
2017 *
2018 * Return:
2019 * True if they intersect, false if not.
2020 */
memblock_is_region_reserved(phys_addr_t base,phys_addr_t size)2021 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
2022 {
2023 return memblock_overlaps_region(&memblock.reserved, base, size);
2024 }
2025
memblock_trim_memory(phys_addr_t align)2026 void __init_memblock memblock_trim_memory(phys_addr_t align)
2027 {
2028 phys_addr_t start, end, orig_start, orig_end;
2029 struct memblock_region *r;
2030
2031 for_each_mem_region(r) {
2032 orig_start = r->base;
2033 orig_end = r->base + r->size;
2034 start = round_up(orig_start, align);
2035 end = round_down(orig_end, align);
2036
2037 if (start == orig_start && end == orig_end)
2038 continue;
2039
2040 if (start < end) {
2041 r->base = start;
2042 r->size = end - start;
2043 } else {
2044 memblock_remove_region(&memblock.memory,
2045 r - memblock.memory.regions);
2046 r--;
2047 }
2048 }
2049 }
2050
memblock_set_current_limit(phys_addr_t limit)2051 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
2052 {
2053 memblock.current_limit = limit;
2054 }
2055
memblock_get_current_limit(void)2056 phys_addr_t __init_memblock memblock_get_current_limit(void)
2057 {
2058 return memblock.current_limit;
2059 }
2060
memblock_dump(struct memblock_type * type)2061 static void __init_memblock memblock_dump(struct memblock_type *type)
2062 {
2063 phys_addr_t base, end, size;
2064 enum memblock_flags flags;
2065 int idx;
2066 struct memblock_region *rgn;
2067
2068 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
2069
2070 for_each_memblock_type(idx, type, rgn) {
2071 char nid_buf[32] = "";
2072
2073 base = rgn->base;
2074 size = rgn->size;
2075 end = base + size - 1;
2076 flags = rgn->flags;
2077 #ifdef CONFIG_NUMA
2078 if (numa_valid_node(memblock_get_region_node(rgn)))
2079 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
2080 memblock_get_region_node(rgn));
2081 #endif
2082 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
2083 type->name, idx, &base, &end, &size, nid_buf, flags);
2084 }
2085 }
2086
__memblock_dump_all(void)2087 static void __init_memblock __memblock_dump_all(void)
2088 {
2089 pr_info("MEMBLOCK configuration:\n");
2090 pr_info(" memory size = %pa reserved size = %pa\n",
2091 &memblock.memory.total_size,
2092 &memblock.reserved.total_size);
2093
2094 memblock_dump(&memblock.memory);
2095 memblock_dump(&memblock.reserved);
2096 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2097 memblock_dump(&physmem);
2098 #endif
2099 }
2100
memblock_dump_all(void)2101 void __init_memblock memblock_dump_all(void)
2102 {
2103 if (memblock_debug)
2104 __memblock_dump_all();
2105 }
2106
memblock_allow_resize(void)2107 void __init memblock_allow_resize(void)
2108 {
2109 memblock_can_resize = 1;
2110 }
2111
early_memblock(char * p)2112 static int __init early_memblock(char *p)
2113 {
2114 if (p && strstr(p, "debug"))
2115 memblock_debug = 1;
2116 return 0;
2117 }
2118 early_param("memblock", early_memblock);
2119
free_memmap(unsigned long start_pfn,unsigned long end_pfn)2120 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
2121 {
2122 struct page *start_pg, *end_pg;
2123 phys_addr_t pg, pgend;
2124
2125 /*
2126 * Convert start_pfn/end_pfn to a struct page pointer.
2127 */
2128 start_pg = pfn_to_page(start_pfn - 1) + 1;
2129 end_pg = pfn_to_page(end_pfn - 1) + 1;
2130
2131 /*
2132 * Convert to physical addresses, and round start upwards and end
2133 * downwards.
2134 */
2135 pg = PAGE_ALIGN(__pa(start_pg));
2136 pgend = PAGE_ALIGN_DOWN(__pa(end_pg));
2137
2138 /*
2139 * If there are free pages between these, free the section of the
2140 * memmap array.
2141 */
2142 if (pg < pgend)
2143 memblock_phys_free(pg, pgend - pg);
2144 }
2145
2146 /*
2147 * The mem_map array can get very big. Free the unused area of the memory map.
2148 */
free_unused_memmap(void)2149 static void __init free_unused_memmap(void)
2150 {
2151 unsigned long start, end, prev_end = 0;
2152 int i;
2153
2154 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
2155 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
2156 return;
2157
2158 /*
2159 * This relies on each bank being in address order.
2160 * The banks are sorted previously in bootmem_init().
2161 */
2162 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
2163 #ifdef CONFIG_SPARSEMEM
2164 /*
2165 * Take care not to free memmap entries that don't exist
2166 * due to SPARSEMEM sections which aren't present.
2167 */
2168 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
2169 #endif
2170 /*
2171 * Align down here since many operations in VM subsystem
2172 * presume that there are no holes in the memory map inside
2173 * a pageblock
2174 */
2175 start = pageblock_start_pfn(start);
2176
2177 /*
2178 * If we had a previous bank, and there is a space
2179 * between the current bank and the previous, free it.
2180 */
2181 if (prev_end && prev_end < start)
2182 free_memmap(prev_end, start);
2183
2184 /*
2185 * Align up here since many operations in VM subsystem
2186 * presume that there are no holes in the memory map inside
2187 * a pageblock
2188 */
2189 prev_end = pageblock_align(end);
2190 }
2191
2192 #ifdef CONFIG_SPARSEMEM
2193 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2194 prev_end = pageblock_align(end);
2195 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2196 }
2197 #endif
2198 }
2199
__free_pages_memory(unsigned long start,unsigned long end)2200 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2201 {
2202 int order;
2203
2204 while (start < end) {
2205 /*
2206 * Free the pages in the largest chunks alignment allows.
2207 *
2208 * __ffs() behaviour is undefined for 0. start == 0 is
2209 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
2210 * the case.
2211 */
2212 if (start)
2213 order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
2214 else
2215 order = MAX_PAGE_ORDER;
2216
2217 while (start + (1UL << order) > end)
2218 order--;
2219
2220 memblock_free_pages(start, order);
2221
2222 start += (1UL << order);
2223 }
2224 }
2225
__free_memory_core(phys_addr_t start,phys_addr_t end)2226 static unsigned long __init __free_memory_core(phys_addr_t start,
2227 phys_addr_t end)
2228 {
2229 unsigned long start_pfn = PFN_UP(start);
2230 unsigned long end_pfn = PFN_DOWN(end);
2231
2232 if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
2233 end_pfn = max_low_pfn;
2234
2235 if (start_pfn >= end_pfn)
2236 return 0;
2237
2238 __free_pages_memory(start_pfn, end_pfn);
2239
2240 return end_pfn - start_pfn;
2241 }
2242
memmap_init_reserved_pages(void)2243 static void __init memmap_init_reserved_pages(void)
2244 {
2245 struct memblock_region *region;
2246 phys_addr_t start, end;
2247 int nid;
2248 unsigned long max_reserved;
2249
2250 /*
2251 * set nid on all reserved pages and also treat struct
2252 * pages for the NOMAP regions as PageReserved
2253 */
2254 repeat:
2255 max_reserved = memblock.reserved.max;
2256 for_each_mem_region(region) {
2257 nid = memblock_get_region_node(region);
2258 start = region->base;
2259 end = start + region->size;
2260
2261 if (memblock_is_nomap(region))
2262 reserve_bootmem_region(start, end, nid);
2263
2264 memblock_set_node(start, region->size, &memblock.reserved, nid);
2265 }
2266 /*
2267 * 'max' is changed means memblock.reserved has been doubled its
2268 * array, which may result a new reserved region before current
2269 * 'start'. Now we should repeat the procedure to set its node id.
2270 */
2271 if (max_reserved != memblock.reserved.max)
2272 goto repeat;
2273
2274 /*
2275 * initialize struct pages for reserved regions that don't have
2276 * the MEMBLOCK_RSRV_NOINIT flag set
2277 */
2278 for_each_reserved_mem_region(region) {
2279 if (!memblock_is_reserved_noinit(region)) {
2280 nid = memblock_get_region_node(region);
2281 start = region->base;
2282 end = start + region->size;
2283
2284 if (!numa_valid_node(nid))
2285 nid = early_pfn_to_nid(PFN_DOWN(start));
2286
2287 reserve_bootmem_region(start, end, nid);
2288 }
2289 }
2290 }
2291
free_low_memory_core_early(void)2292 static unsigned long __init free_low_memory_core_early(void)
2293 {
2294 unsigned long count = 0;
2295 phys_addr_t start, end;
2296 u64 i;
2297
2298 memblock_clear_hotplug(0, -1);
2299
2300 memmap_init_reserved_pages();
2301
2302 /*
2303 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2304 * because in some case like Node0 doesn't have RAM installed
2305 * low ram will be on Node1
2306 */
2307 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2308 NULL)
2309 count += __free_memory_core(start, end);
2310
2311 return count;
2312 }
2313
2314 static int reset_managed_pages_done __initdata;
2315
reset_node_managed_pages(pg_data_t * pgdat)2316 static void __init reset_node_managed_pages(pg_data_t *pgdat)
2317 {
2318 struct zone *z;
2319
2320 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2321 atomic_long_set(&z->managed_pages, 0);
2322 }
2323
reset_all_zones_managed_pages(void)2324 void __init reset_all_zones_managed_pages(void)
2325 {
2326 struct pglist_data *pgdat;
2327
2328 if (reset_managed_pages_done)
2329 return;
2330
2331 for_each_online_pgdat(pgdat)
2332 reset_node_managed_pages(pgdat);
2333
2334 reset_managed_pages_done = 1;
2335 }
2336
2337 /**
2338 * memblock_free_all - release free pages to the buddy allocator
2339 */
memblock_free_all(void)2340 void __init memblock_free_all(void)
2341 {
2342 unsigned long pages;
2343
2344 free_unused_memmap();
2345 reset_all_zones_managed_pages();
2346
2347 memblock_clear_kho_scratch_only();
2348 pages = free_low_memory_core_early();
2349 totalram_pages_add(pages);
2350 }
2351
2352 /* Keep a table to reserve named memory */
2353 #define RESERVE_MEM_MAX_ENTRIES 8
2354 #define RESERVE_MEM_NAME_SIZE 16
2355 struct reserve_mem_table {
2356 char name[RESERVE_MEM_NAME_SIZE];
2357 phys_addr_t start;
2358 phys_addr_t size;
2359 };
2360 static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
2361 static int reserved_mem_count;
2362 static DEFINE_MUTEX(reserve_mem_lock);
2363
2364 /* Add wildcard region with a lookup name */
reserved_mem_add(phys_addr_t start,phys_addr_t size,const char * name)2365 static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
2366 const char *name)
2367 {
2368 struct reserve_mem_table *map;
2369
2370 map = &reserved_mem_table[reserved_mem_count++];
2371 map->start = start;
2372 map->size = size;
2373 strscpy(map->name, name);
2374 }
2375
reserve_mem_find_by_name_nolock(const char * name)2376 static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
2377 {
2378 struct reserve_mem_table *map;
2379 int i;
2380
2381 for (i = 0; i < reserved_mem_count; i++) {
2382 map = &reserved_mem_table[i];
2383 if (!map->size)
2384 continue;
2385 if (strcmp(name, map->name) == 0)
2386 return map;
2387 }
2388 return NULL;
2389 }
2390
2391 /**
2392 * reserve_mem_find_by_name - Find reserved memory region with a given name
2393 * @name: The name that is attached to a reserved memory region
2394 * @start: If found, holds the start address
2395 * @size: If found, holds the size of the address.
2396 *
2397 * @start and @size are only updated if @name is found.
2398 *
2399 * Returns: 1 if found or 0 if not found.
2400 */
reserve_mem_find_by_name(const char * name,phys_addr_t * start,phys_addr_t * size)2401 int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
2402 {
2403 struct reserve_mem_table *map;
2404
2405 guard(mutex)(&reserve_mem_lock);
2406 map = reserve_mem_find_by_name_nolock(name);
2407 if (!map)
2408 return 0;
2409
2410 *start = map->start;
2411 *size = map->size;
2412 return 1;
2413 }
2414 EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
2415
2416 /**
2417 * reserve_mem_release_by_name - Release reserved memory region with a given name
2418 * @name: The name that is attached to a reserved memory region
2419 *
2420 * Forcibly release the pages in the reserved memory region so that those memory
2421 * can be used as free memory. After released the reserved region size becomes 0.
2422 *
2423 * Returns: 1 if released or 0 if not found.
2424 */
reserve_mem_release_by_name(const char * name)2425 int reserve_mem_release_by_name(const char *name)
2426 {
2427 char buf[RESERVE_MEM_NAME_SIZE + 12];
2428 struct reserve_mem_table *map;
2429 void *start, *end;
2430
2431 guard(mutex)(&reserve_mem_lock);
2432 map = reserve_mem_find_by_name_nolock(name);
2433 if (!map)
2434 return 0;
2435
2436 start = phys_to_virt(map->start);
2437 end = start + map->size - 1;
2438 snprintf(buf, sizeof(buf), "reserve_mem:%s", name);
2439 free_reserved_area(start, end, 0, buf);
2440 map->size = 0;
2441
2442 return 1;
2443 }
2444
2445 #ifdef CONFIG_KEXEC_HANDOVER
2446
reserved_mem_preserve(void)2447 static int __init reserved_mem_preserve(void)
2448 {
2449 unsigned int nr_preserved = 0;
2450 int err;
2451
2452 for (unsigned int i = 0; i < reserved_mem_count; i++, nr_preserved++) {
2453 struct reserve_mem_table *map = &reserved_mem_table[i];
2454 struct page *page = phys_to_page(map->start);
2455 unsigned int nr_pages = map->size >> PAGE_SHIFT;
2456
2457 err = kho_preserve_pages(page, nr_pages);
2458 if (err)
2459 goto err_unpreserve;
2460 }
2461
2462 return 0;
2463
2464 err_unpreserve:
2465 for (unsigned int i = 0; i < nr_preserved; i++) {
2466 struct reserve_mem_table *map = &reserved_mem_table[i];
2467 struct page *page = phys_to_page(map->start);
2468 unsigned int nr_pages = map->size >> PAGE_SHIFT;
2469
2470 kho_unpreserve_pages(page, nr_pages);
2471 }
2472
2473 return err;
2474 }
2475
prepare_kho_fdt(void)2476 static int __init prepare_kho_fdt(void)
2477 {
2478 struct page *fdt_page;
2479 void *fdt;
2480 int err;
2481
2482 fdt_page = alloc_page(GFP_KERNEL);
2483 if (!fdt_page) {
2484 err = -ENOMEM;
2485 goto err_report;
2486 }
2487
2488 fdt = page_to_virt(fdt_page);
2489 err = kho_preserve_pages(fdt_page, 1);
2490 if (err)
2491 goto err_free_fdt;
2492
2493 err |= fdt_create(fdt, PAGE_SIZE);
2494 err |= fdt_finish_reservemap(fdt);
2495 err |= fdt_begin_node(fdt, "");
2496 err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE);
2497
2498 for (unsigned int i = 0; !err && i < reserved_mem_count; i++) {
2499 struct reserve_mem_table *map = &reserved_mem_table[i];
2500
2501 err |= fdt_begin_node(fdt, map->name);
2502 err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE);
2503 err |= fdt_property(fdt, "start", &map->start, sizeof(map->start));
2504 err |= fdt_property(fdt, "size", &map->size, sizeof(map->size));
2505 err |= fdt_end_node(fdt);
2506 }
2507 err |= fdt_end_node(fdt);
2508 err |= fdt_finish(fdt);
2509
2510 if (err)
2511 goto err_unpreserve_fdt;
2512
2513 err = kho_add_subtree(MEMBLOCK_KHO_FDT, fdt);
2514 if (err)
2515 goto err_unpreserve_fdt;
2516
2517 err = reserved_mem_preserve();
2518 if (err)
2519 goto err_remove_subtree;
2520
2521 return 0;
2522
2523 err_remove_subtree:
2524 kho_remove_subtree(fdt);
2525 err_unpreserve_fdt:
2526 kho_unpreserve_pages(fdt_page, 1);
2527 err_free_fdt:
2528 put_page(fdt_page);
2529 err_report:
2530 pr_err("failed to prepare memblock FDT for KHO: %d\n", err);
2531
2532 return err;
2533 }
2534
reserve_mem_init(void)2535 static int __init reserve_mem_init(void)
2536 {
2537 int err;
2538
2539 if (!kho_is_enabled() || !reserved_mem_count)
2540 return 0;
2541
2542 err = prepare_kho_fdt();
2543 if (err)
2544 return err;
2545 return err;
2546 }
2547 late_initcall(reserve_mem_init);
2548
reserve_mem_kho_retrieve_fdt(void)2549 static void *__init reserve_mem_kho_retrieve_fdt(void)
2550 {
2551 phys_addr_t fdt_phys;
2552 static void *fdt;
2553 int err;
2554
2555 if (fdt)
2556 return fdt;
2557
2558 err = kho_retrieve_subtree(MEMBLOCK_KHO_FDT, &fdt_phys);
2559 if (err) {
2560 if (err != -ENOENT)
2561 pr_warn("failed to retrieve FDT '%s' from KHO: %d\n",
2562 MEMBLOCK_KHO_FDT, err);
2563 return NULL;
2564 }
2565
2566 fdt = phys_to_virt(fdt_phys);
2567
2568 err = fdt_node_check_compatible(fdt, 0, MEMBLOCK_KHO_NODE_COMPATIBLE);
2569 if (err) {
2570 pr_warn("FDT '%s' is incompatible with '%s': %d\n",
2571 MEMBLOCK_KHO_FDT, MEMBLOCK_KHO_NODE_COMPATIBLE, err);
2572 fdt = NULL;
2573 }
2574
2575 return fdt;
2576 }
2577
reserve_mem_kho_revive(const char * name,phys_addr_t size,phys_addr_t align)2578 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2579 phys_addr_t align)
2580 {
2581 int err, len_start, len_size, offset;
2582 const phys_addr_t *p_start, *p_size;
2583 const void *fdt;
2584
2585 fdt = reserve_mem_kho_retrieve_fdt();
2586 if (!fdt)
2587 return false;
2588
2589 offset = fdt_subnode_offset(fdt, 0, name);
2590 if (offset < 0) {
2591 pr_warn("FDT '%s' has no child '%s': %d\n",
2592 MEMBLOCK_KHO_FDT, name, offset);
2593 return false;
2594 }
2595 err = fdt_node_check_compatible(fdt, offset, RESERVE_MEM_KHO_NODE_COMPATIBLE);
2596 if (err) {
2597 pr_warn("Node '%s' is incompatible with '%s': %d\n",
2598 name, RESERVE_MEM_KHO_NODE_COMPATIBLE, err);
2599 return false;
2600 }
2601
2602 p_start = fdt_getprop(fdt, offset, "start", &len_start);
2603 p_size = fdt_getprop(fdt, offset, "size", &len_size);
2604 if (!p_start || len_start != sizeof(*p_start) || !p_size ||
2605 len_size != sizeof(*p_size)) {
2606 return false;
2607 }
2608
2609 if (*p_start & (align - 1)) {
2610 pr_warn("KHO reserve-mem '%s' has wrong alignment (0x%lx, 0x%lx)\n",
2611 name, (long)align, (long)*p_start);
2612 return false;
2613 }
2614
2615 if (*p_size != size) {
2616 pr_warn("KHO reserve-mem '%s' has wrong size (0x%lx != 0x%lx)\n",
2617 name, (long)*p_size, (long)size);
2618 return false;
2619 }
2620
2621 reserved_mem_add(*p_start, size, name);
2622 pr_info("Revived memory reservation '%s' from KHO\n", name);
2623
2624 return true;
2625 }
2626 #else
reserve_mem_kho_revive(const char * name,phys_addr_t size,phys_addr_t align)2627 static bool __init reserve_mem_kho_revive(const char *name, phys_addr_t size,
2628 phys_addr_t align)
2629 {
2630 return false;
2631 }
2632 #endif /* CONFIG_KEXEC_HANDOVER */
2633
2634 /*
2635 * Parse reserve_mem=nn:align:name
2636 */
reserve_mem(char * p)2637 static int __init reserve_mem(char *p)
2638 {
2639 phys_addr_t start, size, align, tmp;
2640 char *name;
2641 char *oldp;
2642 int len;
2643
2644 if (!p)
2645 return -EINVAL;
2646
2647 /* Check if there's room for more reserved memory */
2648 if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES)
2649 return -EBUSY;
2650
2651 oldp = p;
2652 size = memparse(p, &p);
2653 if (!size || p == oldp)
2654 return -EINVAL;
2655
2656 if (*p != ':')
2657 return -EINVAL;
2658
2659 align = memparse(p+1, &p);
2660 if (*p != ':')
2661 return -EINVAL;
2662
2663 /*
2664 * memblock_phys_alloc() doesn't like a zero size align,
2665 * but it is OK for this command to have it.
2666 */
2667 if (align < SMP_CACHE_BYTES)
2668 align = SMP_CACHE_BYTES;
2669
2670 name = p + 1;
2671 len = strlen(name);
2672
2673 /* name needs to have length but not too big */
2674 if (!len || len >= RESERVE_MEM_NAME_SIZE)
2675 return -EINVAL;
2676
2677 /* Make sure that name has text */
2678 for (p = name; *p; p++) {
2679 if (!isspace(*p))
2680 break;
2681 }
2682 if (!*p)
2683 return -EINVAL;
2684
2685 /* Make sure the name is not already used */
2686 if (reserve_mem_find_by_name(name, &start, &tmp))
2687 return -EBUSY;
2688
2689 /* Pick previous allocations up from KHO if available */
2690 if (reserve_mem_kho_revive(name, size, align))
2691 return 1;
2692
2693 /* TODO: Allocation must be outside of scratch region */
2694 start = memblock_phys_alloc(size, align);
2695 if (!start)
2696 return -ENOMEM;
2697
2698 reserved_mem_add(start, size, name);
2699
2700 return 1;
2701 }
2702 __setup("reserve_mem=", reserve_mem);
2703
2704 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2705 static const char * const flagname[] = {
2706 [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG",
2707 [ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
2708 [ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
2709 [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
2710 [ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
2711 [ilog2(MEMBLOCK_RSRV_KERN)] = "RSV_KERN",
2712 [ilog2(MEMBLOCK_KHO_SCRATCH)] = "KHO_SCRATCH",
2713 };
2714
memblock_debug_show(struct seq_file * m,void * private)2715 static int memblock_debug_show(struct seq_file *m, void *private)
2716 {
2717 struct memblock_type *type = m->private;
2718 struct memblock_region *reg;
2719 int i, j, nid;
2720 unsigned int count = ARRAY_SIZE(flagname);
2721 phys_addr_t end;
2722
2723 for (i = 0; i < type->cnt; i++) {
2724 reg = &type->regions[i];
2725 end = reg->base + reg->size - 1;
2726 nid = memblock_get_region_node(reg);
2727
2728 seq_printf(m, "%4d: ", i);
2729 seq_printf(m, "%pa..%pa ", ®->base, &end);
2730 if (numa_valid_node(nid))
2731 seq_printf(m, "%4d ", nid);
2732 else
2733 seq_printf(m, "%4c ", 'x');
2734 if (reg->flags) {
2735 for (j = 0; j < count; j++) {
2736 if (reg->flags & (1U << j)) {
2737 seq_printf(m, "%s\n", flagname[j]);
2738 break;
2739 }
2740 }
2741 if (j == count)
2742 seq_printf(m, "%s\n", "UNKNOWN");
2743 } else {
2744 seq_printf(m, "%s\n", "NONE");
2745 }
2746 }
2747 return 0;
2748 }
2749 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2750
memblock_init_debugfs(void)2751 static int __init memblock_init_debugfs(void)
2752 {
2753 struct dentry *root = debugfs_create_dir("memblock", NULL);
2754
2755 debugfs_create_file("memory", 0444, root,
2756 &memblock.memory, &memblock_debug_fops);
2757 debugfs_create_file("reserved", 0444, root,
2758 &memblock.reserved, &memblock_debug_fops);
2759 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2760 debugfs_create_file("physmem", 0444, root, &physmem,
2761 &memblock_debug_fops);
2762 #endif
2763
2764 return 0;
2765 }
2766 __initcall(memblock_init_debugfs);
2767
2768 #endif /* CONFIG_DEBUG_FS */
2769