Lines Matching refs:regions
45 * Memblock is a method of managing memory regions during the early
50 * regions. There are several types of these collections:
56 * * ``reserved`` - describes the regions that were allocated
64 * which contains an array of memory regions along with
72 * arrays during addition of new regions. This feature should be used
129 .memory.regions = memblock_memory_init_regions,
133 .reserved.regions = memblock_reserved_init_regions,
143 .regions = memblock_physmem_init_regions,
158 for (i = 0, rgn = &memblock_type->regions[0]; \
160 i++, rgn = &memblock_type->regions[i])
212 if (memblock_addrs_overlap(base, size, type->regions[i].base,
213 type->regions[i].size))
365 type->total_size -= type->regions[r].size;
366 memmove(&type->regions[r], &type->regions[r + 1],
367 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
373 type->regions[0].base = 0;
374 type->regions[0].size = 0;
375 type->regions[0].flags = 0;
376 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
388 if (memblock.reserved.regions != memblock_reserved_init_regions) {
389 addr = __pa(memblock.reserved.regions);
393 kfree(memblock.reserved.regions);
398 if (memblock.memory.regions != memblock_memory_init_regions) {
399 addr = __pa(memblock.memory.regions);
403 kfree(memblock.memory.regions);
413 * memblock_double_array - double the size of the memblock regions array
414 * @type: memblock type of the regions array being doubled
418 * Double the size of the @type regions array. If memblock is being used to
419 * allocate memory for a new reserved regions array and there is a previously
437 /* We don't allow resizing until we know about the reserved regions
464 /* only exclude range when trying to double reserved.regions */
500 memcpy(new_array, type->regions, old_size);
502 old_array = type->regions;
503 type->regions = new_array;
527 * memblock_merge_regions - merge neighboring compatible regions
531 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
542 struct memblock_region *this = &type->regions[i];
543 struct memblock_region *next = &type->regions[i + 1];
580 struct memblock_region *rgn = &type->regions[idx];
602 * existing regions. @type is guaranteed to be minimal (all neighbouring
603 * compatible regions are merged) after the addition.
622 if (type->regions[0].size == 0) {
624 type->regions[0].base = base;
625 type->regions[0].size = size;
626 type->regions[0].flags = flags;
627 memblock_set_region_node(&type->regions[0], nid);
634 * The worst case is when new range overlaps all existing regions,
635 * then we'll need type->cnt + 1 empty regions in @type. So if
637 * that there is enough empty regions in @type, and we can insert
638 * regions directly.
646 * then with %true. The first counts the number of regions needed
801 * Walk @type and ensure that regions don't cross the boundaries defined by
802 * [@base, @base + @size). Crossing regions are split at the boundaries,
803 * which may create at most two more regions. The index of the first
823 /* we'll create at most two more regions */
1007 struct memblock_region *r = &type->regions[i];
1065 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1066 * direct mapping of the physical memory. These regions will still be
1099 * struct pages will not be initialized for reserved memory regions marked with
1115 * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered
1146 /* we never skip regions when iterating memblock.reserved or physmem */
1150 /* only memory regions are associated with nodes, check it */
1154 /* skip hotpluggable memory regions if needed */
1159 /* if we want mirror memory skip non-mirror memory regions */
1173 * MEMBLOCK_KHO_SCRATCH regions for the allocations
1195 * areas before each region in type_b. For example, if type_b regions
1200 * The upper 32bit indexes the following regions.
1216 struct memblock_region *m = &type_a->regions[idx_a];
1243 r = &type_b->regions[idx_b];
1254 /* if the two regions intersect, we're done */
1317 struct memblock_region *m = &type_a->regions[idx_a];
1344 r = &type_b->regions[idx_b];
1355 /* if the two regions intersect, we're done */
1388 r = &type->regions[*idx];
1410 * memblock_set_node - set node ID on memblock regions
1416 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1434 memblock_set_region_node(&type->regions[i], nid);
1521 * from the regions with mirroring enabled and then retried from any
1892 return memblock.memory.regions[0].base;
1899 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1909 * the memory memblock regions, if the @limit exceeds the total size
1910 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1936 /* truncate both memory and reserved regions */
1961 /* remove all the MAP regions */
1963 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1967 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1970 /* truncate the reserved regions */
1999 if (addr < type->regions[mid].base)
2001 else if (addr >= (type->regions[mid].base +
2002 type->regions[mid].size))
2026 return !memblock_is_nomap(&memblock.memory.regions[i]);
2038 *start_pfn = PFN_DOWN(type->regions[mid].base);
2039 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
2041 return memblock_get_region_node(&type->regions[mid]);
2061 return (memblock.memory.regions[idx].base +
2062 memblock.memory.regions[idx].size) >= end;
2100 r - memblock.memory.regions);
2307 * pages for the NOMAP regions as PageReserved
2330 * initialize struct pages for reserved regions that don't have
2773 reg = &type->regions[i];