Lines Matching refs:new_folio
2852 struct folio *new_folio = NULL; in alloc_and_dissolve_hugetlb_folio() local
2884 if (!new_folio) { in alloc_and_dissolve_hugetlb_folio()
2886 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, in alloc_and_dissolve_hugetlb_folio()
2888 if (!new_folio) in alloc_and_dissolve_hugetlb_folio()
2890 __prep_new_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2908 enqueue_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2921 if (new_folio) in alloc_and_dissolve_hugetlb_folio()
2922 update_and_free_hugetlb_folio(h, new_folio, false); in alloc_and_dissolve_hugetlb_folio()
5193 struct folio *new_folio, pte_t old, unsigned long sz) in hugetlb_install_folio() argument
5195 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); in hugetlb_install_folio()
5197 __folio_mark_uptodate(new_folio); in hugetlb_install_folio()
5198 hugetlb_add_new_anon_rmap(new_folio, vma, addr); in hugetlb_install_folio()
5203 folio_set_hugetlb_migratable(new_folio); in hugetlb_install_folio()
5324 struct folio *new_folio; in copy_hugetlb_page_range() local
5329 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); in copy_hugetlb_page_range()
5330 if (IS_ERR(new_folio)) { in copy_hugetlb_page_range()
5332 ret = PTR_ERR(new_folio); in copy_hugetlb_page_range()
5335 ret = copy_user_large_folio(new_folio, pte_folio, in copy_hugetlb_page_range()
5339 folio_put(new_folio); in copy_hugetlb_page_range()
5350 new_folio); in copy_hugetlb_page_range()
5351 folio_put(new_folio); in copy_hugetlb_page_range()
5356 new_folio, src_pte_old, sz); in copy_hugetlb_page_range()
5782 struct folio *new_folio; in hugetlb_wp() local
5861 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); in hugetlb_wp()
5863 if (IS_ERR(new_folio)) { in hugetlb_wp()
5910 ret = vmf_error(PTR_ERR(new_folio)); in hugetlb_wp()
5922 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { in hugetlb_wp()
5926 __folio_mark_uptodate(new_folio); in hugetlb_wp()
5939 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); in hugetlb_wp()
5944 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); in hugetlb_wp()
5949 folio_set_hugetlb_migratable(new_folio); in hugetlb_wp()
5951 new_folio = old_folio; in hugetlb_wp()
5960 if (new_folio != old_folio) in hugetlb_wp()
5961 restore_reserve_on_error(h, vma, vmf->address, new_folio); in hugetlb_wp()
5962 folio_put(new_folio); in hugetlb_wp()
6063 bool new_folio, new_pagecache_folio = false; in hugetlb_no_page() local
6082 new_folio = false; in hugetlb_no_page()
6144 new_folio = true; in hugetlb_no_page()
6242 if (new_folio) in hugetlb_no_page()
6262 if (new_folio && !new_pagecache_folio) in hugetlb_no_page()
7446 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) in move_hugetlb_state() argument
7450 hugetlb_cgroup_migrate(old_folio, new_folio); in move_hugetlb_state()
7451 set_page_owner_migrate_reason(&new_folio->page, reason); in move_hugetlb_state()
7463 if (folio_test_hugetlb_temporary(new_folio)) { in move_hugetlb_state()
7465 int new_nid = folio_nid(new_folio); in move_hugetlb_state()
7468 folio_clear_hugetlb_temporary(new_folio); in move_hugetlb_state()