Lines Matching full:zone
49 struct zone *zone; in mminit_verify_zonelist() local
57 /* Identify the zone and nodelist */ in mminit_verify_zonelist()
61 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
62 if (!populated_zone(zone)) in mminit_verify_zonelist()
68 zone->name); in mminit_verify_zonelist()
71 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
72 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
87 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", in mminit_verify_pageflags_layout()
97 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", in mminit_verify_pageflags_layout()
104 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", in mminit_verify_pageflags_layout()
111 "Node/Zone ID: %lu -> %lu\n", in mminit_verify_pageflags_layout()
294 * Sum pages in active regions for movable zone.
314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
316 * increasing memory addresses so that the "highest" populated zone is used
335 * Find the PFN the Movable zone begins in each node. Kernel memory
565 unsigned long zone, int nid) in __init_single_page() argument
568 set_page_links(page, zone, nid, pfn); in __init_single_page()
577 if (!is_highmem_idx(zone)) in __init_single_page()
679 * prev_end_pfn static that contains the end of previous zone in defer_init()
717 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
719 if (zone_spans_pfn(zone, pfn)) in init_reserved_page()
773 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
775 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
779 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
811 * - zone and node links point to zone and node that span the page if the
812 * hole is in the middle of a zone
813 * - zone and node links point to adjacent zone/node if the hole falls on
814 * the zone boundary; the pages in such holes will be prepended to the
815 * zone/node above the hole except for the trailing pages in the last
816 * section that will be appended to the zone/node below.
820 int zone, int node) in init_unavailable_range() argument
830 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
836 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n", in init_unavailable_range()
837 node, zone_names[zone], pgcnt); in init_unavailable_range()
847 * zone stats (e.g., nr_isolate_pageblock) are touched.
849 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, in memmap_init_range() argument
868 if (zone == ZONE_DEVICE) { in memmap_init_range()
884 if (overlap_memmap_init(zone, &pfn)) in memmap_init_range()
893 __init_single_page(page, pfn, zone, nid); in memmap_init_range()
910 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
915 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
916 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
917 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
944 struct zone *zone = node->node_zones + j; in memmap_init() local
946 if (!populated_zone(zone)) in memmap_init()
949 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
959 * Append the pages in this hole to the highest zone in the last in memmap_init()
981 * phase for it to be fully associated with a zone. in __init_zone_device_page()
1064 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
1070 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
1073 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
1108 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1112 * is distributed. This helper function adjusts the zone ranges
1114 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1176 /* Return the number of page frames in holes in a zone on a node */
1184 /* zone is empty, we don't have any absent pages */ in zone_absent_pages_in_node()
1219 * Return the number of pages a zone spans in a node, including holes
1232 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
1238 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
1242 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
1252 struct zone *z; in reset_memoryless_node_totalpages()
1276 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
1293 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
1295 zone->zone_start_pfn = 0; in calculate_node_totalpages()
1296 zone->spanned_pages = spanned; in calculate_node_totalpages()
1297 zone->present_pages = real_size; in calculate_node_totalpages()
1299 zone->present_early_pages = real_size; in calculate_node_totalpages()
1318 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
1319 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
1373 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
1376 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
1377 zone_set_nid(zone, nid); in zone_init_internals()
1378 zone->name = zone_names[idx]; in zone_init_internals()
1379 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
1380 spin_lock_init(&zone->lock); in zone_init_internals()
1381 zone_seqlock_init(zone); in zone_init_internals()
1382 zone_pcp_init(zone); in zone_init_internals()
1385 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
1389 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
1390 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
1394 INIT_LIST_HEAD(&zone->unaccepted_pages); in zone_init_free_lists()
1398 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
1402 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
1403 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
1408 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
1411 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
1413 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
1416 zone_init_free_lists(zone); in init_currently_empty_zone()
1417 zone->initialized = 1; in init_currently_empty_zone()
1422 * Calculate the size of the zone->blockflags rounded to an unsigned long
1441 static void __ref setup_usemap(struct zone *zone) in setup_usemap() argument
1443 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
1444 zone->spanned_pages); in setup_usemap()
1445 zone->pageblock_flags = NULL; in setup_usemap()
1447 zone->pageblock_flags = in setup_usemap()
1449 zone_to_nid(zone)); in setup_usemap()
1450 if (!zone->pageblock_flags) in setup_usemap()
1451 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
1452 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
1456 static inline void setup_usemap(struct zone *zone) {} in setup_usemap() argument
1495 * Set up the zone data structures
1537 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() local
1539 zone->present_pages = 0; in free_area_init_core_hotplug()
1540 zone_init_internals(zone, z, nid, 0); in free_area_init_core_hotplug()
1546 * Set up the zone data structures:
1563 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
1566 size = zone->spanned_pages; in free_area_init_core()
1567 freesize = zone->present_pages; in free_area_init_core()
1571 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
1579 pr_debug(" %s zone: %lu pages used for memmap\n", in free_area_init_core()
1582 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", in free_area_init_core()
1589 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); in free_area_init_core()
1604 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
1609 setup_usemap(zone); in free_area_init_core()
1610 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
1647 * The zone's endpoints aren't required to be MAX_PAGE_ORDER in alloc_node_mem_map()
1743 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
1744 if (populated_zone(zone)) { in check_for_memory()
1777 * free_area_init - Initialise all pg_data_t and zone data
1778 * @max_zone_pfn: an array of max PFNs for each zone
1782 * zone in each node and their holes is calculated. If the maximum PFN
1783 * between two adjacent zones match, it is assumed that the zone is empty.
1785 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1792 int i, nid, zone; in free_area_init() local
1795 /* Record where the zone boundaries are */ in free_area_init()
1806 zone = MAX_NR_ZONES - i - 1; in free_area_init()
1808 zone = i; in free_area_init()
1810 if (zone == ZONE_MOVABLE) in free_area_init()
1813 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
1814 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
1815 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
1824 /* Print out the zone ranges */ in free_area_init()
1825 pr_info("Zone ranges:\n"); in free_area_init()
1842 pr_info("Movable zone start for each node\n"); in free_area_init()
2039 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
2043 int nid = zone_to_nid(zone); in deferred_init_pages()
2045 int zid = zone_idx(zone); in deferred_init_pages()
2064 * This function is meant to pre-load the iterator for the zone init.
2070 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
2077 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
2081 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
2104 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
2113 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
2120 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
2131 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
2152 struct zone *zone = arg; in deferred_init_memmap_chunk() local
2155 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
2162 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
2182 struct zone *zone; in deferred_init_memmap() local
2204 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
2205 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
2210 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
2212 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2213 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
2217 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
2218 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2228 .fn_arg = zone, in deferred_init_memmap()
2237 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2241 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
2242 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2252 * If this zone has deferred pages, try to grow it by initializing enough
2258 * Return true when zone was grown, otherwise return false. We return true even
2266 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2269 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2275 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2276 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2282 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2290 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2291 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2308 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2348 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
2350 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
2354 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
2358 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
2361 block_end_pfn, zone)) in set_zone_contiguous()
2367 zone->contiguous = true; in set_zone_contiguous()
2372 struct zone *zone; in page_alloc_init_late() local
2404 for_each_populated_zone(zone) in page_alloc_init_late()
2405 set_zone_contiguous(zone); in page_alloc_init_late()
2550 * set_dma_reserve - set the specified number of pages reserved in the first zone
2553 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2554 * In the DMA zone, a significant percentage may be consumed by kernel image
2557 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and