/linux-6.12.1/kernel/ |
D | vmcore_info.c | 169 VMCOREINFO_STRUCT_SIZE(free_area); in crash_save_vmcoreinfo_init() 187 VMCOREINFO_OFFSET(zone, free_area); in crash_save_vmcoreinfo_init() 190 VMCOREINFO_OFFSET(free_area, free_list); in crash_save_vmcoreinfo_init() 193 VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS); in crash_save_vmcoreinfo_init() 195 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); in crash_save_vmcoreinfo_init()
|
/linux-6.12.1/arch/arm64/kvm/hyp/nvhe/ |
D | page_alloc.c | 127 page_add_to_list(p, &pool->free_area[order]); in __hyp_attach_page() 147 page_add_to_list(buddy, &pool->free_area[buddy->order]); in __hyp_extract_page() 206 while (i <= pool->max_order && list_empty(&pool->free_area[i])) in hyp_alloc_pages() 214 p = node_to_page(pool->free_area[i].next); in hyp_alloc_pages() 234 INIT_LIST_HEAD(&pool->free_area[i]); in hyp_pool_init()
|
/linux-6.12.1/tools/testing/selftests/x86/ |
D | test_shadow_stack.c | 562 void *free_area, *shstk, *test_map = (void *)0xFFFFFFFFFFFFFFFF; in test_guard_gap_other_gaps() local 565 free_area = mmap(0, SS_SIZE * 3, PROT_READ | PROT_WRITE, in test_guard_gap_other_gaps() 567 munmap(free_area, SS_SIZE * 3); in test_guard_gap_other_gaps() 569 shstk = create_shstk(free_area + SS_SIZE); in test_guard_gap_other_gaps() 605 void *free_area, *shstk_start, *test_map = (void *)0xFFFFFFFFFFFFFFFF; in test_guard_gap_new_mappings_gaps() local 609 free_area = mmap(0, PAGE_SIZE * 4, PROT_READ | PROT_WRITE, in test_guard_gap_new_mappings_gaps() 611 munmap(free_area, PAGE_SIZE * 4); in test_guard_gap_new_mappings_gaps() 614 shstk_start = mmap(free_area, PAGE_SIZE, PROT_READ | PROT_WRITE, in test_guard_gap_new_mappings_gaps() 616 if (shstk_start == MAP_FAILED || shstk_start != free_area) in test_guard_gap_new_mappings_gaps() 633 if (test_map == free_area + PAGE_SIZE) { in test_guard_gap_new_mappings_gaps()
|
/linux-6.12.1/Documentation/admin-guide/kdump/ |
D | vmcoreinfo.rst | 126 free_area section in Common variables 129 The size of a free_area structure. It indicates whether the free_area 160 (zone, free_area|vm_stat|spanned_pages) 169 (free_area, free_list) 175 Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS]. 192 (zone.free_area, NR_PAGE_ORDERS) 196 free_area ranges. NR_PAGE_ORDERS is used by the zone buddy allocator. 316 (free_area.free_list, MIGRATE_TYPES)
|
/linux-6.12.1/arch/arm64/kvm/hyp/include/nvhe/ |
D | gfp.h | 19 struct list_head free_area[NR_PAGE_ORDERS]; member
|
/linux-6.12.1/mm/ |
D | page_reporting.c | 150 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle()
|
D | show_mem.c | 367 struct free_area *area = &zone->free_area[order]; in show_free_areas()
|
D | page_alloc.c | 657 struct free_area *area = &zone->free_area[order]; in __add_to_free_list() 678 struct free_area *area = &zone->free_area[order]; in move_to_free_list() 705 zone->free_area[order].nr_free--; in __del_page_from_free_list() 715 static inline struct page *get_page_from_free_area(struct free_area *area, in get_page_from_free_area() 1590 struct free_area *area; in __rmqueue_smallest() 1595 area = &(zone->free_area[current_order]); in __rmqueue_smallest() 1999 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() 2107 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock() 2185 struct free_area *area; in __rmqueue_fallback() 2207 area = &(zone->free_area[current_order]); in __rmqueue_fallback() [all …]
|
D | compaction.c | 1582 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages() 2016 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock() 2331 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
|
D | vmstat.c | 1089 blocks = data_race(zone->free_area[order].nr_free); in fill_contig_page_info() 1534 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); in frag_show_print() 1560 struct free_area *area; in pagetypeinfo_showfree_print() 1564 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print()
|
D | internal.h | 835 int find_suitable_fallback(struct free_area *area, unsigned int order, 838 static inline bool free_area_empty(struct free_area *area, int migratetype) in free_area_empty()
|
D | mm_init.c | 1399 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists() 1400 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
|
/linux-6.12.1/drivers/md/ |
D | dm-snap-persistent.c | 205 static void free_area(struct pstore *ps) in free_area() function 365 free_area(ps); in read_header() 379 free_area(ps); in read_header() 601 free_area(ps); in persistent_dtr()
|
/linux-6.12.1/include/linux/ |
D | mmzone.h | 117 struct free_area { struct 938 struct free_area free_area[NR_PAGE_ORDERS]; member
|
/linux-6.12.1/kernel/events/ |
D | uprobes.c | 1546 goto free_area; in __create_xol_area() 1566 free_area: in __create_xol_area()
|
/linux-6.12.1/kernel/power/ |
D | snapshot.c | 1279 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages()
|