Lines Matching +full:speed +full:- +full:bins

1 /* SPDX-License-Identifier: GPL-2.0 */
19 #include <linux/pageblock-flags.h>
20 #include <linux/page-flags-layout.h>
23 #include <linux/page-flags.h>
28 /* Free memory management - zoned buddy allocator. */
106 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
112 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
197 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
245 * Global and per-node slab counters track slab pages. in vmstat_item_in_bytes()
249 * Per-memcg and per-lruvec counters track memory, consumed in vmstat_item_in_bytes()
251 * byte-precise. in vmstat_item_in_bytes()
308 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
310 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
330 * corresponding generation. The gen counter in folio->flags stores gen+1 while
331 * a page is on one of lrugen->folios[]. Otherwise it stores 0.
343 * PG_active is always cleared while a page is on one of lrugen->folios[] so
345 * considered active is isolated for non-reclaiming purposes, e.g., migration.
348 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
351 * in folio->flags.
362 * supported without using additional bits in folio->flags.
365 * across tiers only involves atomic operations on folio->flags and therefore
371 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
373 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
374 * folio->flags.
383 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
384 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
430 /* the multi-gen LRU lists, lazily sorted on eviction */
432 /* the multi-gen LRU sizes, eventually consistent */
439 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
443 /* whether the multi-gen LRU is enabled */
449 /* per-node lru_gen_folio list for global reclaim */
457 MM_NONLEAF_TOTAL, /* total non-leaf entries */
458 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
459 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
463 /* double-buffering Bloom filters */
498 * young. For each generation, memcgs are randomly sharded into multiple bins
504 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
505 * the old generation, is incremented when all its bins become empty.
531 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
536 * locklessly, a stale value (seq-1) does not wraparound to young.
542 /* the per-node memcg generation counter */
546 /* per-node lru_gen_folio list for global reclaim */
608 * These track the cost of reclaiming one LRU - file or anon -
614 /* Non-resident age, driven by LRU movement */
664 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
665 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
666 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
667 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
670 * Flags used in pcp->flags field.
672 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
674 * high-order page freeing.
676 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
677 * draining PCP for consecutive high-order pages freeing without
679 * zone lock contention and keep cache-hot pages reusing.
691 u8 flags; /* protected by pcp->lock */
698 /* Lists of pages, one per migrate type stored on the pcp-lists */
731 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
762 * likely to succeed, and to locally limit unmovable allocations - e.g.,
765 * 1. Pinned pages: (long-term) pinning of movable pages might
767 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
772 * to a different zone. When migration fails - pinning fails.
784 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
786 * some cases (virtio-mem), such pages can be skipped during
790 * of memory unplug in virtio-mem).
795 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
798 * self-stored in the range, but they are treated as movable when
820 /* Read-mostly fields */
855 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
867 * spanned_pages = zone_end_pfn - zone_start_pfn;
871 * present_pages = spanned_pages - absent_pages(pages in holes);
880 * managed_pages = present_pages - reserved_pages;
887 * (present_pages - managed_pages). And managed_pages should be used
894 * It is a seqlock because it has to be read outside of zone->lock,
898 * The span_seq lock is declared along with zone->lock because it is
899 * frequently read in proximity to zone->lock. It's good to
922 * of pageblock. Protected by zone->lock.
934 /* Write-intensive fields used from the page allocator */
951 /* Write-intensive fields used by compaction and vmstats. */
956 * when reading the number of free pages to avoid per-cpu counter
1016 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
1022 return zone->cma_pages; in zone_cma_pages()
1030 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
1035 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
1040 return zone->initialized; in zone_is_initialized()
1045 return zone->spanned_pages == 0; in zone_is_empty()
1055 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1056 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1057 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1058 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1059 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1060 #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
1061 #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
1064 * Define the bit shifts to access each section. For non-existent
1087 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1088 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1089 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1090 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1091 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1092 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1096 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); in page_zonenum()
1097 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; in page_zonenum()
1102 return page_zonenum(&folio->page); in folio_zonenum()
1126 return a->pgmap == b->pgmap; in zone_device_pages_have_same_pgmap()
1145 return is_zone_device_page(&folio->page); in folio_is_zone_device()
1160 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
1169 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
1203 int zone_idx; /* zone_idx(zoneref->zone) */
1212 * To speed the reading of the zonelist, the zonerefs contain the zone index
1216 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1217 * zonelist_zone_idx() - Return the index of the zone for an entry
1218 * zonelist_node_idx() - Return the index of the node for an entry
1247 * arch-specific memory_failure (SGX), hwpoison_filter() filtered
1270 * per-zone basis.
1298 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1305 * Nests above zone->lock and zone->span_seqlock
1320 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
1340 * This is a per-node reserve of pages that are not available
1353 /* Write-intensive fields used by page reclaim */
1403 /* Per-node vmstats */
1414 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1415 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1417 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1422 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
1455 return lruvec->pgdat; in lruvec_pgdat()
1470 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1498 return zone->present_pages; in populated_zone()
1504 return zone->node; in zone_to_nid()
1509 zone->node = nid; in zone_set_nid()
1533 * is_highmem - helper function to quickly check if a struct zone is a
1573 * for_each_online_pgdat - helper macro to iterate over all online nodes
1581 * for_each_zone - helper macro to iterate over all memory zones
1588 for (zone = (first_online_pgdat())->node_zones; \
1593 for (zone = (first_online_pgdat())->node_zones; \
1602 return zoneref->zone; in zonelist_zone()
1607 return zoneref->zone_idx; in zonelist_zone_idx()
1612 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1620 …* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodem…
1644 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1654 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1664 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
1669 …* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or b…
1671 * @z: The current pointer within zonelist->_zonerefs being iterated
1686 for (zone = z->zone; \
1693 …* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a gi…
1695 * @z: The current pointer within zonelist->zones being iterated
1720 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes()
1722 return (!z->zone) ? true : false; in movable_only_nodes()
1741 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1746 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1749 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1764 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1770 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1772 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1777 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1821 * WARNING: mem_section must be a power-of-2 in size for the
1834 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1844 return ms->usage->pageblock_flags; in section_to_usemap()
1865 * as mem_map - section_nr_to_pfn(pnum). The result is
1867 * 1. All mem_map arrays are page-aligned.
1869 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1870 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1897 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1902 unsigned long map = section->section_mem_map; in __section_mem_map_addr()
1909 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); in present_section()
1919 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); in valid_section()
1924 return (section && (section->section_mem_map & SECTION_IS_EARLY)); in early_section()
1934 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); in online_section()
1942 return section && ((section->section_mem_map & flags) == flags); in online_device_section()
1978 return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); in pfn_section_valid()
1989 * pfn_valid - check if there is a valid memory map entry for a PFN
2023 * the entire section-sized span. in pfn_valid()
2046 return -1; in next_present_section_nr()