Lines Matching +full:speed +full:- +full:bins
1 /* SPDX-License-Identifier: GPL-2.0 */
19 #include <linux/pageblock-flags.h>
20 #include <linux/page-flags-layout.h>
23 #include <linux/page-flags.h>
28 /* Free memory management - zoned buddy allocator. */
80 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
109 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
115 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
201 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
257 * Global and per-node slab counters track slab pages. in vmstat_item_in_bytes()
261 * Per-memcg and per-lruvec counters track memory, consumed in vmstat_item_in_bytes()
263 * byte-precise. in vmstat_item_in_bytes()
320 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
322 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
342 * corresponding generation. The gen counter in folio->flags stores gen+1 while
343 * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
354 * PG_active is always cleared while a folio is on one of lrugen->folios[] so
356 * a folio considered active is isolated for non-reclaiming purposes, e.g.,
359 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
362 * in folio->flags, masked by LRU_GEN_MASK.
371 * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
373 * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
376 * across tiers only involves atomic operations on folio->flags and therefore
382 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
384 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
385 * folio->flags, masked by LRU_REFS_MASK.
391 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
392 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
396 * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
451 * MAX_NR_GENS-MIN_NR_GENS-1.
463 /* the multi-gen LRU lists, lazily sorted on eviction */
465 /* the multi-gen LRU sizes, eventually consistent */
476 /* whether the multi-gen LRU is enabled */
482 /* per-node lru_gen_folio list for global reclaim */
489 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
490 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
494 /* double-buffering Bloom filters */
529 * young. For each generation, memcgs are randomly sharded into multiple bins
535 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
536 * the old generation, is incremented when all its bins become empty.
562 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
567 * locklessly, a stale value (seq-1) does not wraparound to young.
573 /* the per-node memcg generation counter */
577 /* per-node lru_gen_folio list for global reclaim */
640 * These track the cost of reclaiming one LRU - file or anon -
646 /* Non-resident age, driven by LRU movement */
696 * Flags used in pcp->flags field.
698 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
700 * high-order page freeing.
702 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
703 * draining PCP for consecutive high-order pages freeing without
705 * zone lock contention and keep cache-hot pages reusing.
717 u8 flags; /* protected by pcp->lock */
724 /* Lists of pages, one per migrate type stored on the pcp-lists */
757 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
788 * likely to succeed, and to locally limit unmovable allocations - e.g.,
791 * 1. Pinned pages: (long-term) pinning of movable pages might
793 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
798 * to a different zone. When migration fails - pinning fails.
810 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
812 * some cases (virtio-mem), such pages can be skipped during
816 * of memory unplug in virtio-mem).
821 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
824 * self-stored in the range, but they are treated as movable when
846 /* Read-mostly fields */
882 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
894 * spanned_pages = zone_end_pfn - zone_start_pfn;
898 * present_pages = spanned_pages - absent_pages(pages in holes);
907 * managed_pages = present_pages - reserved_pages;
914 * (present_pages - managed_pages). And managed_pages should be used
921 * It is a seqlock because it has to be read outside of zone->lock,
925 * The span_seq lock is declared along with zone->lock because it is
926 * frequently read in proximity to zone->lock. It's good to
949 * of pageblock. Protected by zone->lock.
961 /* Write-intensive fields used from the page allocator */
984 /* Write-intensive fields used by compaction and vmstats. */
989 * when reading the number of free pages to avoid per-cpu counter
1050 return z->_watermark[w] + z->watermark_boost; in wmark_pages()
1075 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
1081 return zone->cma_pages; in zone_cma_pages()
1089 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
1094 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
1099 return zone->initialized; in zone_is_initialized()
1104 return zone->spanned_pages == 0; in zone_is_empty()
1114 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1115 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1116 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1117 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1118 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1119 #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
1120 #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
1123 * Define the bit shifts to access each section. For non-existent
1146 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1147 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1148 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1149 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1150 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1151 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1155 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); in page_zonenum()
1156 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; in page_zonenum()
1161 return page_zonenum(&folio->page); in folio_zonenum()
1173 return page_folio(page)->pgmap; in page_pgmap()
1214 return is_zone_device_page(&folio->page); in folio_is_zone_device()
1229 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
1238 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
1272 int zone_idx; /* zone_idx(zoneref->zone) */
1281 * To speed the reading of the zonelist, the zonerefs contain the zone index
1285 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1286 * zonelist_zone_idx() - Return the index of the zone for an entry
1287 * zonelist_node_idx() - Return the index of the node for an entry
1316 * arch-specific memory_failure (SGX), hwpoison_filter() filtered
1339 * per-zone basis.
1367 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1374 * Nests above zone->lock and zone->span_seqlock
1389 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
1409 * This is a per-node reserve of pages that are not available
1422 /* Write-intensive fields used by page reclaim */
1472 /* Per-node vmstats */
1483 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1484 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1486 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1491 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
1522 return lruvec->pgdat; in lruvec_pgdat()
1537 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1565 return zone->present_pages; in populated_zone()
1571 return zone->node; in zone_to_nid()
1576 zone->node = nid; in zone_set_nid()
1600 * is_highmem - helper function to quickly check if a struct zone is a
1640 * for_each_online_pgdat - helper macro to iterate over all online nodes
1648 * for_each_zone - helper macro to iterate over all memory zones
1655 for (zone = (first_online_pgdat())->node_zones; \
1660 for (zone = (first_online_pgdat())->node_zones; \
1669 return zoneref->zone; in zonelist_zone()
1674 return zoneref->zone_idx; in zonelist_zone_idx()
1679 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1687 …* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodem…
1711 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1721 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1731 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
1736 …* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or b…
1738 * @z: The current pointer within zonelist->_zonerefs being iterated
1760 …* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a gi…
1762 * @z: The current pointer within zonelist->zones being iterated
1787 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes()
1808 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1813 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1816 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1831 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1837 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1839 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1844 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1888 * WARNING: mem_section must be a power-of-2 in size for the
1901 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1911 return ms->usage->pageblock_flags; in section_to_usemap()
1932 * as mem_map - section_nr_to_pfn(pnum). The result is
1934 * 1. All mem_map arrays are page-aligned.
1936 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1937 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1970 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1975 unsigned long map = section->section_mem_map; in __section_mem_map_addr()
1982 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); in present_section()
1992 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); in valid_section()
1997 return (section && (section->section_mem_map & SECTION_IS_EARLY)); in early_section()
2007 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); in online_section()
2015 return section && ((section->section_mem_map & flags) == flags); in online_device_section()
2028 (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); in preinited_vmemmap_section()
2074 struct mem_section_usage *usage = READ_ONCE(ms->usage); in pfn_section_valid()
2076 return usage ? test_bit(idx, usage->subsection_map) : 0; in pfn_section_valid()
2090 * pfn_valid - check if there is a valid memory map entry for a PFN
2124 * the entire section-sized span. in pfn_valid()
2147 return -1; in next_present_section_nr()
2151 for (section_nr = next_present_section_nr(start - 1); \
2152 section_nr != -1; \