Lines Matching +full:speed +full:- +full:bins

1 /* SPDX-License-Identifier: GPL-2.0 */
19 #include <linux/pageblock-flags.h>
20 #include <linux/page-flags-layout.h>
23 #include <linux/page-flags.h>
28 /* Free memory management - zoned buddy allocator. */
80 get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
109 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
115 get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
200 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
251 * Global and per-node slab counters track slab pages. in vmstat_item_in_bytes()
255 * Per-memcg and per-lruvec counters track memory, consumed in vmstat_item_in_bytes()
257 * byte-precise. in vmstat_item_in_bytes()
314 * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
316 * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
336 * corresponding generation. The gen counter in folio->flags stores gen+1 while
337 * a page is on one of lrugen->folios[]. Otherwise it stores 0.
349 * PG_active is always cleared while a page is on one of lrugen->folios[] so
351 * considered active is isolated for non-reclaiming purposes, e.g., migration.
354 * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
357 * in folio->flags.
368 * supported without using additional bits in folio->flags.
371 * across tiers only involves atomic operations on folio->flags and therefore
377 * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
379 * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
380 * folio->flags.
389 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
390 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
436 /* the multi-gen LRU lists, lazily sorted on eviction */
438 /* the multi-gen LRU sizes, eventually consistent */
445 unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
449 /* whether the multi-gen LRU is enabled */
455 /* per-node lru_gen_folio list for global reclaim */
462 MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
463 MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
467 /* double-buffering Bloom filters */
502 * young. For each generation, memcgs are randomly sharded into multiple bins
508 * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
509 * the old generation, is incremented when all its bins become empty.
535 * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
540 * locklessly, a stale value (seq-1) does not wraparound to young.
546 /* the per-node memcg generation counter */
550 /* per-node lru_gen_folio list for global reclaim */
613 * These track the cost of reclaiming one LRU - file or anon -
619 /* Non-resident age, driven by LRU movement */
669 * Flags used in pcp->flags field.
671 * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
673 * high-order page freeing.
675 * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
676 * draining PCP for consecutive high-order pages freeing without
678 * zone lock contention and keep cache-hot pages reusing.
690 u8 flags; /* protected by pcp->lock */
697 /* Lists of pages, one per migrate type stored on the pcp-lists */
730 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
761 * likely to succeed, and to locally limit unmovable allocations - e.g.,
764 * 1. Pinned pages: (long-term) pinning of movable pages might
766 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
771 * to a different zone. When migration fails - pinning fails.
783 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
785 * some cases (virtio-mem), such pages can be skipped during
789 * of memory unplug in virtio-mem).
794 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
797 * self-stored in the range, but they are treated as movable when
819 /* Read-mostly fields */
855 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
867 * spanned_pages = zone_end_pfn - zone_start_pfn;
871 * present_pages = spanned_pages - absent_pages(pages in holes);
880 * managed_pages = present_pages - reserved_pages;
887 * (present_pages - managed_pages). And managed_pages should be used
894 * It is a seqlock because it has to be read outside of zone->lock,
898 * The span_seq lock is declared along with zone->lock because it is
899 * frequently read in proximity to zone->lock. It's good to
922 * of pageblock. Protected by zone->lock.
934 /* Write-intensive fields used from the page allocator */
951 /* Write-intensive fields used by compaction and vmstats. */
956 * when reading the number of free pages to avoid per-cpu counter
1017 return z->_watermark[w] + z->watermark_boost; in wmark_pages()
1042 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
1048 return zone->cma_pages; in zone_cma_pages()
1056 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
1061 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
1066 return zone->initialized; in zone_is_initialized()
1071 return zone->spanned_pages == 0; in zone_is_empty()
1081 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1082 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1083 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1084 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1085 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1086 #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
1087 #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
1090 * Define the bit shifts to access each section. For non-existent
1113 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1114 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1115 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1116 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1117 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1118 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1122 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); in page_zonenum()
1123 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; in page_zonenum()
1128 return page_zonenum(&folio->page); in folio_zonenum()
1152 return a->pgmap == b->pgmap; in zone_device_pages_have_same_pgmap()
1171 return is_zone_device_page(&folio->page); in folio_is_zone_device()
1186 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
1195 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
1229 int zone_idx; /* zone_idx(zoneref->zone) */
1238 * To speed the reading of the zonelist, the zonerefs contain the zone index
1242 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
1243 * zonelist_zone_idx() - Return the index of the zone for an entry
1244 * zonelist_node_idx() - Return the index of the node for an entry
1273 * arch-specific memory_failure (SGX), hwpoison_filter() filtered
1296 * per-zone basis.
1324 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1331 * Nests above zone->lock and zone->span_seqlock
1346 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
1366 * This is a per-node reserve of pages that are not available
1379 /* Write-intensive fields used by page reclaim */
1429 /* Per-node vmstats */
1440 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
1441 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
1443 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
1448 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
1481 return lruvec->pgdat; in lruvec_pgdat()
1496 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
1524 return zone->present_pages; in populated_zone()
1530 return zone->node; in zone_to_nid()
1535 zone->node = nid; in zone_set_nid()
1559 * is_highmem - helper function to quickly check if a struct zone is a
1599 * for_each_online_pgdat - helper macro to iterate over all online nodes
1607 * for_each_zone - helper macro to iterate over all memory zones
1614 for (zone = (first_online_pgdat())->node_zones; \
1619 for (zone = (first_online_pgdat())->node_zones; \
1628 return zoneref->zone; in zonelist_zone()
1633 return zoneref->zone_idx; in zonelist_zone_idx()
1638 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1646 …* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodem…
1670 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1680 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1690 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
1695 …* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or b…
1697 * @z: The current pointer within zonelist->_zonerefs being iterated
1719 …* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a gi…
1721 * @z: The current pointer within zonelist->zones being iterated
1746 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes()
1767 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1772 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1775 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1790 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1796 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1798 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1803 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1847 * WARNING: mem_section must be a power-of-2 in size for the
1860 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1870 return ms->usage->pageblock_flags; in section_to_usemap()
1891 * as mem_map - section_nr_to_pfn(pnum). The result is
1893 * 1. All mem_map arrays are page-aligned.
1895 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1896 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1923 #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
1928 unsigned long map = section->section_mem_map; in __section_mem_map_addr()
1935 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); in present_section()
1945 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); in valid_section()
1950 return (section && (section->section_mem_map & SECTION_IS_EARLY)); in early_section()
1960 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); in online_section()
1968 return section && ((section->section_mem_map & flags) == flags); in online_device_section()
2003 struct mem_section_usage *usage = READ_ONCE(ms->usage); in pfn_section_valid()
2005 return usage ? test_bit(idx, usage->subsection_map) : 0; in pfn_section_valid()
2016 * pfn_valid - check if there is a valid memory map entry for a PFN
2050 * the entire section-sized span. in pfn_valid()
2073 return -1; in next_present_section_nr()