Home
last modified time | relevance | path

Searched refs:folio_list (Results 1 – 7 of 7) sorted by relevance

/linux-6.12.1/mm/damon/
Dpaddr.c249 LIST_HEAD(folio_list); in damon_pa_pageout()
283 list_add(&folio->lru, &folio_list); in damon_pa_pageout()
289 applied = reclaim_pages(&folio_list); in damon_pa_pageout()
363 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, in damon_pa_migrate_folio_list() argument
372 while (!list_empty(folio_list)) { in damon_pa_migrate_folio_list()
377 folio = lru_to_folio(folio_list); in damon_pa_migrate_folio_list()
400 list_splice_init(&migrate_folios, folio_list); in damon_pa_migrate_folio_list()
404 list_splice(&ret_folios, folio_list); in damon_pa_migrate_folio_list()
406 while (!list_empty(folio_list)) { in damon_pa_migrate_folio_list()
407 folio = lru_to_folio(folio_list); in damon_pa_migrate_folio_list()
[all …]
/linux-6.12.1/mm/
Dhugetlb_vmemmap.h23 struct list_head *folio_list,
26 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
52 struct list_head *folio_list, in hugetlb_vmemmap_restore_folios() argument
55 list_splice_init(folio_list, non_hvo_folios); in hugetlb_vmemmap_restore_folios()
63 static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) in hugetlb_vmemmap_optimize_folios() argument
Dhugetlb_vmemmap.c514 struct list_head *folio_list, in hugetlb_vmemmap_restore_folios() argument
522 list_for_each_entry_safe(folio, t_folio, folio_list, lru) { in hugetlb_vmemmap_restore_folios()
648 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) in hugetlb_vmemmap_optimize_folios() argument
654 list_for_each_entry(folio, folio_list, lru) { in hugetlb_vmemmap_optimize_folios()
669 list_for_each_entry(folio, folio_list, lru) { in hugetlb_vmemmap_optimize_folios()
Dvmscan.c632 struct swap_iocb **plug, struct list_head *folio_list) in pageout() argument
686 wbc.list = folio_list; in pageout()
1042 static unsigned int shrink_folio_list(struct list_head *folio_list, in shrink_folio_list() argument
1060 while (!list_empty(folio_list)) { in shrink_folio_list()
1069 folio = lru_to_folio(folio_list); in shrink_folio_list()
1193 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1244 split_folio_to_list(folio, folio_list)) in shrink_folio_list()
1253 if (split_folio_to_list(folio, folio_list)) in shrink_folio_list()
1367 switch (pageout(folio, mapping, &plug, folio_list)) { in shrink_folio_list()
1523 list_splice_init(&demote_folios, folio_list); in shrink_folio_list()
[all …]
Dmadvise.c348 LIST_HEAD(folio_list); in madvise_cold_or_pageout_pte_range()
419 list_add(&folio->lru, &folio_list); in madvise_cold_or_pageout_pte_range()
426 reclaim_pages(&folio_list); in madvise_cold_or_pageout_pte_range()
539 list_add(&folio->lru, &folio_list); in madvise_cold_or_pageout_pte_range()
550 reclaim_pages(&folio_list); in madvise_cold_or_pageout_pte_range()
Dhugetlb.c1779 struct list_head *folio_list, in bulk_vmemmap_restore_error() argument
1811 list_for_each_entry_safe(folio, t_folio, folio_list, lru) in bulk_vmemmap_restore_error()
1830 struct list_head *folio_list) in update_and_free_pages_bulk() argument
1842 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk()
1844 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk()
1855 VM_WARN_ON(!list_empty(folio_list)); in update_and_free_pages_bulk()
2104 struct list_head *folio_list) in prep_and_add_allocated_folios() argument
2110 hugetlb_vmemmap_optimize_folios(h, folio_list); in prep_and_add_allocated_folios()
2114 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { in prep_and_add_allocated_folios()
3204 struct list_head *folio_list) in prep_and_add_bootmem_folios() argument
[all …]
Dinternal.h1137 unsigned long reclaim_pages(struct list_head *folio_list);
1139 struct list_head *folio_list);