Lines Matching refs:h
85 static int hugetlb_acct_memory(struct hstate *h, long delta);
132 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
144 spool->hstate = h; in hugepage_new_subpool()
147 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
468 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
475 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
489 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
493 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
550 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
557 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add()
576 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
616 iter->from, h, h_cg, in add_reservation_in_range()
629 t, h, h_cg, regions_needed); in add_reservation_in_range()
708 long in_regions_needed, struct hstate *h, in region_add() argument
746 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
944 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
946 if (!hugetlb_acct_memory(h, 1)) in hugetlb_fix_reserve_counts()
991 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
994 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
995 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
1068 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
1071 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
1077 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
1078 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1312 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) in enqueue_hugetlb_folio() argument
1319 list_move(&folio->lru, &h->hugepage_freelists[nid]); in enqueue_hugetlb_folio()
1320 h->free_huge_pages++; in enqueue_hugetlb_folio()
1321 h->free_huge_pages_node[nid]++; in enqueue_hugetlb_folio()
1325 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, in dequeue_hugetlb_folio_node_exact() argument
1332 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { in dequeue_hugetlb_folio_node_exact()
1339 list_move(&folio->lru, &h->hugepage_activelist); in dequeue_hugetlb_folio_node_exact()
1342 h->free_huge_pages--; in dequeue_hugetlb_folio_node_exact()
1343 h->free_huge_pages_node[nid]--; in dequeue_hugetlb_folio_node_exact()
1350 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, in dequeue_hugetlb_folio_nodemask() argument
1380 folio = dequeue_hugetlb_folio_node_exact(h, node); in dequeue_hugetlb_folio_nodemask()
1390 static unsigned long available_huge_pages(struct hstate *h) in available_huge_pages() argument
1392 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1395 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, in dequeue_hugetlb_folio_vma() argument
1411 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1415 if (avoid_reserve && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1418 gfp_mask = htlb_alloc_mask(h); in dequeue_hugetlb_folio_vma()
1422 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1430 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1435 h->resv_huge_pages--; in dequeue_hugetlb_folio_vma()
1492 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1498 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1499 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1518 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1522 int order = huge_page_order(h); in alloc_gigantic_folio()
1567 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1575 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1590 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, in remove_hugetlb_folio() argument
1599 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in remove_hugetlb_folio()
1606 h->free_huge_pages--; in remove_hugetlb_folio()
1607 h->free_huge_pages_node[nid]--; in remove_hugetlb_folio()
1610 h->surplus_huge_pages--; in remove_hugetlb_folio()
1611 h->surplus_huge_pages_node[nid]--; in remove_hugetlb_folio()
1622 h->nr_huge_pages--; in remove_hugetlb_folio()
1623 h->nr_huge_pages_node[nid]--; in remove_hugetlb_folio()
1626 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, in add_hugetlb_folio() argument
1636 h->nr_huge_pages++; in add_hugetlb_folio()
1637 h->nr_huge_pages_node[nid]++; in add_hugetlb_folio()
1640 h->surplus_huge_pages++; in add_hugetlb_folio()
1641 h->surplus_huge_pages_node[nid]++; in add_hugetlb_folio()
1653 enqueue_hugetlb_folio(h, folio); in add_hugetlb_folio()
1656 static void __update_and_free_hugetlb_folio(struct hstate *h, in __update_and_free_hugetlb_folio() argument
1661 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __update_and_free_hugetlb_folio()
1676 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) { in __update_and_free_hugetlb_folio()
1683 add_hugetlb_folio(h, folio, true); in __update_and_free_hugetlb_folio()
1732 struct hstate *h; in free_hpage_workfn() local
1744 h = size_to_hstate(folio_size(folio)); in free_hpage_workfn()
1746 __update_and_free_hugetlb_folio(h, folio); in free_hpage_workfn()
1753 static inline void flush_free_hpage_work(struct hstate *h) in flush_free_hpage_work() argument
1755 if (hugetlb_vmemmap_optimizable(h)) in flush_free_hpage_work()
1759 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, in update_and_free_hugetlb_folio() argument
1763 __update_and_free_hugetlb_folio(h, folio); in update_and_free_hugetlb_folio()
1778 static void bulk_vmemmap_restore_error(struct hstate *h, in bulk_vmemmap_restore_error() argument
1797 update_and_free_hugetlb_folio(h, folio, false); in bulk_vmemmap_restore_error()
1812 if (hugetlb_vmemmap_restore_folio(h, folio)) { in bulk_vmemmap_restore_error()
1815 add_hugetlb_folio(h, folio, true); in bulk_vmemmap_restore_error()
1822 update_and_free_hugetlb_folio(h, folio, false); in bulk_vmemmap_restore_error()
1829 static void update_and_free_pages_bulk(struct hstate *h, in update_and_free_pages_bulk() argument
1842 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk()
1844 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk()
1865 update_and_free_hugetlb_folio(h, folio, false); in update_and_free_pages_bulk()
1872 struct hstate *h; in size_to_hstate() local
1874 for_each_hstate(h) { in size_to_hstate()
1875 if (huge_page_size(h) == size) in size_to_hstate()
1876 return h; in size_to_hstate()
1887 struct hstate *h = folio_hstate(folio); in free_huge_folio() local
1924 hugetlb_cgroup_uncharge_folio(hstate_index(h), in free_huge_folio()
1925 pages_per_huge_page(h), folio); in free_huge_folio()
1926 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in free_huge_folio()
1927 pages_per_huge_page(h), folio); in free_huge_folio()
1930 h->resv_huge_pages++; in free_huge_folio()
1933 remove_hugetlb_folio(h, folio, false); in free_huge_folio()
1935 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1936 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_folio()
1938 remove_hugetlb_folio(h, folio, true); in free_huge_folio()
1940 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1943 enqueue_hugetlb_folio(h, folio); in free_huge_folio()
1951 static void __prep_account_new_huge_page(struct hstate *h, int nid) in __prep_account_new_huge_page() argument
1954 h->nr_huge_pages++; in __prep_account_new_huge_page()
1955 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1958 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) in init_new_hugetlb_folio() argument
1967 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) in __prep_new_hugetlb_folio() argument
1969 init_new_hugetlb_folio(h, folio); in __prep_new_hugetlb_folio()
1970 hugetlb_vmemmap_optimize_folio(h, folio); in __prep_new_hugetlb_folio()
1973 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) in prep_new_hugetlb_folio() argument
1975 __prep_new_hugetlb_folio(h, folio); in prep_new_hugetlb_folio()
1977 __prep_account_new_huge_page(h, nid); in prep_new_hugetlb_folio()
2001 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, in alloc_buddy_hugetlb_folio() argument
2005 int order = huge_page_order(h); in alloc_buddy_hugetlb_folio()
2065 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, in only_alloc_fresh_hugetlb_folio() argument
2071 if (hstate_is_gigantic(h)) in only_alloc_fresh_hugetlb_folio()
2072 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); in only_alloc_fresh_hugetlb_folio()
2074 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); in only_alloc_fresh_hugetlb_folio()
2076 init_new_hugetlb_folio(h, folio); in only_alloc_fresh_hugetlb_folio()
2087 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, in alloc_fresh_hugetlb_folio() argument
2092 if (hstate_is_gigantic(h)) in alloc_fresh_hugetlb_folio()
2093 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); in alloc_fresh_hugetlb_folio()
2095 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); in alloc_fresh_hugetlb_folio()
2099 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); in alloc_fresh_hugetlb_folio()
2103 static void prep_and_add_allocated_folios(struct hstate *h, in prep_and_add_allocated_folios() argument
2110 hugetlb_vmemmap_optimize_folios(h, folio_list); in prep_and_add_allocated_folios()
2115 __prep_account_new_huge_page(h, folio_nid(folio)); in prep_and_add_allocated_folios()
2116 enqueue_hugetlb_folio(h, folio); in prep_and_add_allocated_folios()
2125 static struct folio *alloc_pool_huge_folio(struct hstate *h, in alloc_pool_huge_folio() argument
2130 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_folio()
2136 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, in alloc_pool_huge_folio()
2152 static struct folio *remove_pool_hugetlb_folio(struct hstate *h, in remove_pool_hugetlb_folio() argument
2159 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in remove_pool_hugetlb_folio()
2164 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_hugetlb_folio()
2165 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_hugetlb_folio()
2166 folio = list_entry(h->hugepage_freelists[node].next, in remove_pool_hugetlb_folio()
2168 remove_hugetlb_folio(h, folio, acct_surplus); in remove_pool_hugetlb_folio()
2206 struct hstate *h = folio_hstate(folio); in dissolve_free_hugetlb_folio() local
2207 if (!available_huge_pages(h)) in dissolve_free_hugetlb_folio()
2229 remove_hugetlb_folio(h, folio, false); in dissolve_free_hugetlb_folio()
2230 h->max_huge_pages--; in dissolve_free_hugetlb_folio()
2246 rc = hugetlb_vmemmap_restore_folio(h, folio); in dissolve_free_hugetlb_folio()
2249 add_hugetlb_folio(h, folio, false); in dissolve_free_hugetlb_folio()
2250 h->max_huge_pages++; in dissolve_free_hugetlb_folio()
2256 update_and_free_hugetlb_folio(h, folio, false); in dissolve_free_hugetlb_folio()
2278 struct hstate *h; in dissolve_free_hugetlb_folios() local
2284 for_each_hstate(h) in dissolve_free_hugetlb_folios()
2285 order = min(order, huge_page_order(h)); in dissolve_free_hugetlb_folios()
2300 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, in alloc_surplus_hugetlb_folio() argument
2305 if (hstate_is_gigantic(h)) in alloc_surplus_hugetlb_folio()
2309 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_hugetlb_folio()
2313 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); in alloc_surplus_hugetlb_folio()
2325 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_hugetlb_folio()
2332 h->surplus_huge_pages++; in alloc_surplus_hugetlb_folio()
2333 h->surplus_huge_pages_node[folio_nid(folio)]++; in alloc_surplus_hugetlb_folio()
2341 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_hugetlb_folio() argument
2346 if (hstate_is_gigantic(h)) in alloc_migrate_hugetlb_folio()
2349 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); in alloc_migrate_hugetlb_folio()
2368 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, in alloc_buddy_hugetlb_folio_with_mpol() argument
2373 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_hugetlb_folio_with_mpol()
2381 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2388 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2393 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, in alloc_hugetlb_folio_reserve() argument
2399 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, in alloc_hugetlb_folio_reserve()
2402 VM_BUG_ON(!h->resv_huge_pages); in alloc_hugetlb_folio_reserve()
2403 h->resv_huge_pages--; in alloc_hugetlb_folio_reserve()
2411 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, in alloc_hugetlb_folio_nodemask() argument
2415 if (available_huge_pages(h)) { in alloc_hugetlb_folio_nodemask()
2418 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in alloc_hugetlb_folio_nodemask()
2431 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); in alloc_hugetlb_folio_nodemask()
2455 static int gather_surplus_pages(struct hstate *h, long delta) in gather_surplus_pages() argument
2465 nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); in gather_surplus_pages()
2468 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2470 h->resv_huge_pages += delta; in gather_surplus_pages()
2483 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), in gather_surplus_pages()
2503 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2504 (h->free_huge_pages + allocated); in gather_surplus_pages()
2524 h->resv_huge_pages += delta; in gather_surplus_pages()
2532 enqueue_hugetlb_folio(h, folio); in gather_surplus_pages()
2556 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2564 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2566 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in return_unused_surplus_pages()
2573 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2586 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); in return_unused_surplus_pages()
2595 update_and_free_pages_bulk(h, &page_list); in return_unused_surplus_pages()
2636 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2649 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2716 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2719 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2722 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2725 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2728 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2731 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2734 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2737 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2740 static long vma_del_reservation(struct hstate *h, in vma_del_reservation() argument
2743 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
2766 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, in restore_reserve_on_error() argument
2769 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2786 (void)vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2788 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2799 rc = vma_del_reservation(h, vma, address); in restore_reserve_on_error()
2835 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2847 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, in alloc_and_dissolve_hugetlb_folio() argument
2850 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_and_dissolve_hugetlb_folio()
2886 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, in alloc_and_dissolve_hugetlb_folio()
2890 __prep_new_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2901 remove_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
2907 __prep_account_new_huge_page(h, nid); in alloc_and_dissolve_hugetlb_folio()
2908 enqueue_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2914 update_and_free_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
2922 update_and_free_hugetlb_folio(h, new_folio, false); in alloc_and_dissolve_hugetlb_folio()
2929 struct hstate *h; in isolate_or_dissolve_huge_page() local
2940 h = folio_hstate(folio); in isolate_or_dissolve_huge_page()
2952 if (hstate_is_gigantic(h)) in isolate_or_dissolve_huge_page()
2958 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); in isolate_or_dissolve_huge_page()
2967 struct hstate *h = hstate_vma(vma); in alloc_hugetlb_folio() local
2969 long map_chg, map_commit, nr_pages = pages_per_huge_page(h); in alloc_hugetlb_folio()
2975 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; in alloc_hugetlb_folio()
2984 idx = hstate_index(h); in alloc_hugetlb_folio()
2990 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_hugetlb_folio()
3027 idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3032 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3042 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_hugetlb_folio()
3045 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); in alloc_hugetlb_folio()
3051 h->resv_huge_pages--; in alloc_hugetlb_folio()
3053 list_add(&folio->lru, &h->hugepage_activelist); in alloc_hugetlb_folio()
3058 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); in alloc_hugetlb_folio()
3063 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3071 map_commit = vma_commit_reservation(h, vma, addr); in alloc_hugetlb_folio()
3085 hugetlb_acct_memory(h, -rsv_adjust); in alloc_hugetlb_folio()
3088 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in alloc_hugetlb_folio()
3089 pages_per_huge_page(h), folio); in alloc_hugetlb_folio()
3101 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_hugetlb_folio()
3104 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3110 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
3117 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3119 int __alloc_bootmem_huge_page(struct hstate *h, int nid) in __alloc_bootmem_huge_page() argument
3126 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3133 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
3135 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3157 huge_page_size(h) - PAGE_SIZE); in __alloc_bootmem_huge_page()
3161 m->hstate = h; in __alloc_bootmem_huge_page()
3188 struct hstate *h, in hugetlb_folio_init_vmemmap() argument
3200 prep_compound_head((struct page *)folio, huge_page_order(h)); in hugetlb_folio_init_vmemmap()
3203 static void __init prep_and_add_bootmem_folios(struct hstate *h, in prep_and_add_bootmem_folios() argument
3210 hugetlb_vmemmap_optimize_folios(h, folio_list); in prep_and_add_bootmem_folios()
3222 pages_per_huge_page(h)); in prep_and_add_bootmem_folios()
3226 __prep_account_new_huge_page(h, folio_nid(folio)); in prep_and_add_bootmem_folios()
3227 enqueue_hugetlb_folio(h, folio); in prep_and_add_bootmem_folios()
3240 struct hstate *h = NULL, *prev_h = NULL; in gather_bootmem_prealloc_node() local
3246 h = m->hstate; in gather_bootmem_prealloc_node()
3251 if (h != prev_h && prev_h != NULL) in gather_bootmem_prealloc_node()
3253 prev_h = h; in gather_bootmem_prealloc_node()
3255 VM_BUG_ON(!hstate_is_gigantic(h)); in gather_bootmem_prealloc_node()
3258 hugetlb_folio_init_vmemmap(folio, h, in gather_bootmem_prealloc_node()
3260 init_new_hugetlb_folio(h, folio); in gather_bootmem_prealloc_node()
3268 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc_node()
3272 prep_and_add_bootmem_folios(h, &folio_list); in gather_bootmem_prealloc_node()
3300 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) in hugetlb_hstate_alloc_pages_onenode() argument
3305 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3306 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages_onenode()
3307 if (!alloc_bootmem_huge_page(h, nid)) in hugetlb_hstate_alloc_pages_onenode()
3311 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in hugetlb_hstate_alloc_pages_onenode()
3313 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, in hugetlb_hstate_alloc_pages_onenode()
3321 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3324 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages_onenode()
3326 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3327 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3328 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3331 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h) in hugetlb_hstate_alloc_pages_specific_nodes() argument
3337 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages_specific_nodes()
3338 hugetlb_hstate_alloc_pages_onenode(h, i); in hugetlb_hstate_alloc_pages_specific_nodes()
3346 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h) in hugetlb_hstate_alloc_pages_errcheck() argument
3348 if (allocated < h->max_huge_pages) { in hugetlb_hstate_alloc_pages_errcheck()
3351 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages_errcheck()
3353 h->max_huge_pages, buf, allocated); in hugetlb_hstate_alloc_pages_errcheck()
3354 h->max_huge_pages = allocated; in hugetlb_hstate_alloc_pages_errcheck()
3360 struct hstate *h = (struct hstate *)arg; in hugetlb_pages_alloc_boot_node() local
3370 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], in hugetlb_pages_alloc_boot_node()
3379 prep_and_add_allocated_folios(h, &folio_list); in hugetlb_pages_alloc_boot_node()
3382 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h) in hugetlb_gigantic_pages_alloc_boot() argument
3386 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_gigantic_pages_alloc_boot()
3387 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) in hugetlb_gigantic_pages_alloc_boot()
3395 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) in hugetlb_pages_alloc_boot() argument
3398 .fn_arg = h, in hugetlb_pages_alloc_boot()
3405 job.size = h->max_huge_pages; in hugetlb_pages_alloc_boot()
3427 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; in hugetlb_pages_alloc_boot()
3430 return h->nr_huge_pages; in hugetlb_pages_alloc_boot()
3444 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
3450 if (hstate_is_gigantic(h) && hugetlb_cma_size) { in hugetlb_hstate_alloc_pages()
3465 if (hugetlb_hstate_alloc_pages_specific_nodes(h)) in hugetlb_hstate_alloc_pages()
3469 if (hstate_is_gigantic(h)) in hugetlb_hstate_alloc_pages()
3470 allocated = hugetlb_gigantic_pages_alloc_boot(h); in hugetlb_hstate_alloc_pages()
3472 allocated = hugetlb_pages_alloc_boot(h); in hugetlb_hstate_alloc_pages()
3474 hugetlb_hstate_alloc_pages_errcheck(allocated, h); in hugetlb_hstate_alloc_pages()
3479 struct hstate *h, *h2; in hugetlb_init_hstates() local
3481 for_each_hstate(h) { in hugetlb_init_hstates()
3483 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
3484 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
3494 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in hugetlb_init_hstates()
3496 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3499 if (h2 == h) in hugetlb_init_hstates()
3501 if (h2->order < h->order && in hugetlb_init_hstates()
3502 h2->order > h->demote_order) in hugetlb_init_hstates()
3503 h->demote_order = h2->order; in hugetlb_init_hstates()
3510 struct hstate *h; in report_hugepages() local
3512 for_each_hstate(h) { in report_hugepages()
3515 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
3517 buf, h->free_huge_pages); in report_hugepages()
3519 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); in report_hugepages()
3524 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3531 if (hstate_is_gigantic(h)) in try_to_free_low()
3539 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3541 if (count >= h->nr_huge_pages) in try_to_free_low()
3545 remove_hugetlb_folio(h, folio, false); in try_to_free_low()
3552 update_and_free_pages_bulk(h, &page_list); in try_to_free_low()
3556 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3567 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
3576 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3577 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3581 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3582 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3583 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3590 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3591 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3595 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
3596 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
3619 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3620 flush_free_hpage_work(h); in set_max_huge_pages()
3632 count += persistent_huge_pages(h) - in set_max_huge_pages()
3633 (h->nr_huge_pages_node[nid] - in set_max_huge_pages()
3634 h->surplus_huge_pages_node[nid]); in set_max_huge_pages()
3652 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
3653 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3655 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3673 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3674 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3679 while (count > (persistent_huge_pages(h) + allocated)) { in set_max_huge_pages()
3690 folio = alloc_pool_huge_folio(h, nodes_allowed, in set_max_huge_pages()
3692 &h->next_nid_to_alloc); in set_max_huge_pages()
3694 prep_and_add_allocated_folios(h, &page_list); in set_max_huge_pages()
3704 prep_and_add_allocated_folios(h, &page_list); in set_max_huge_pages()
3715 prep_and_add_allocated_folios(h, &page_list); in set_max_huge_pages()
3734 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3736 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
3741 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
3742 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0); in set_max_huge_pages()
3750 update_and_free_pages_bulk(h, &page_list); in set_max_huge_pages()
3751 flush_free_hpage_work(h); in set_max_huge_pages()
3754 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
3755 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
3759 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3761 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3919 struct hstate *h; in nr_hugepages_show_common() local
3923 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
3925 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3927 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3933 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
3939 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
3960 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
3969 struct hstate *h; in nr_hugepages_store_common() local
3978 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
3979 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
4020 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
4021 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
4029 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
4031 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
4039 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
4049 struct hstate *h; in free_hugepages_show() local
4053 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
4055 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
4057 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
4066 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
4067 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
4074 struct hstate *h; in surplus_hugepages_show() local
4078 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
4080 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
4082 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
4094 struct hstate *h; in demote_store() local
4101 h = kobj_to_hstate(kobj, &nid); in demote_store()
4111 mutex_lock(&h->resize_lock); in demote_store()
4122 nr_available = h->free_huge_pages_node[nid]; in demote_store()
4124 nr_available = h->free_huge_pages; in demote_store()
4125 nr_available -= h->resv_huge_pages; in demote_store()
4129 rc = demote_pool_huge_page(h, n_mask, nr_demote); in demote_store()
4139 mutex_unlock(&h->resize_lock); in demote_store()
4150 struct hstate *h = kobj_to_hstate(kobj, NULL); in demote_size_show() local
4151 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
4160 struct hstate *h, *demote_hstate; in demote_size_store() local
4174 h = kobj_to_hstate(kobj, NULL); in demote_size_store()
4175 if (demote_order >= h->order) in demote_size_store()
4179 mutex_lock(&h->resize_lock); in demote_size_store()
4180 h->demote_order = demote_order; in demote_size_store()
4181 mutex_unlock(&h->resize_lock); in demote_size_store()
4213 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
4218 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
4220 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
4231 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
4235 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
4305 struct hstate *h; in hugetlb_unregister_node() local
4311 for_each_hstate(h) { in hugetlb_unregister_node()
4312 int idx = hstate_index(h); in hugetlb_unregister_node()
4317 if (h->demote_order) in hugetlb_unregister_node()
4335 struct hstate *h; in hugetlb_register_node() local
4350 for_each_hstate(h) { in hugetlb_register_node()
4351 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
4356 h->name, node->dev.id); in hugetlb_register_node()
4399 struct hstate *h; in hugetlb_sysfs_init() local
4406 for_each_hstate(h) { in hugetlb_sysfs_init()
4407 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
4410 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4506 struct hstate *h; in hugetlb_add_hstate() local
4514 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
4515 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); in hugetlb_add_hstate()
4516 h->order = order; in hugetlb_add_hstate()
4517 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4519 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4520 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4521 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4522 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4523 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4524 huge_page_size(h)/SZ_1K); in hugetlb_add_hstate()
4526 parsed_hstate = h; in hugetlb_add_hstate()
4649 struct hstate *h; in hugepagesz_setup() local
4659 h = size_to_hstate(size); in hugepagesz_setup()
4660 if (h) { in hugepagesz_setup()
4668 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
4679 parsed_hstate = h; in hugepagesz_setup()
4738 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
4743 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4744 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
4776 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
4777 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4789 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
4815 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
4822 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4824 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
4834 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4882 struct hstate *h; in hugetlb_report_meminfo() local
4888 for_each_hstate(h) { in hugetlb_report_meminfo()
4889 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4891 total += huge_page_size(h) * count; in hugetlb_report_meminfo()
4893 if (h == &default_hstate) in hugetlb_report_meminfo()
4901 h->free_huge_pages, in hugetlb_report_meminfo()
4902 h->resv_huge_pages, in hugetlb_report_meminfo()
4903 h->surplus_huge_pages, in hugetlb_report_meminfo()
4904 huge_page_size(h) / SZ_1K); in hugetlb_report_meminfo()
4912 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
4921 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4922 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4923 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4928 struct hstate *h; in hugetlb_show_meminfo_node() local
4933 for_each_hstate(h) in hugetlb_show_meminfo_node()
4936 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4937 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4938 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4939 huge_page_size(h) / SZ_1K); in hugetlb_show_meminfo_node()
4951 struct hstate *h; in hugetlb_total_pages() local
4954 for_each_hstate(h) in hugetlb_total_pages()
4955 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4959 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
4991 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
4994 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
4995 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
5002 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
5049 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
5061 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
5062 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
5072 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
5214 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() local
5215 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
5216 unsigned long npages = pages_per_huge_page(h); in copy_hugetlb_page_range()
5238 last_addr_mask = hugetlb_mask_last_page(h); in copy_hugetlb_page_range()
5265 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5266 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5344 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5345 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5349 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
5398 struct hstate *h = hstate_vma(vma); in move_huge_pte() local
5403 dst_ptl = huge_pte_lock(h, mm, dst_pte); in move_huge_pte()
5404 src_ptl = huge_pte_lockptr(h, mm, src_pte); in move_huge_pte()
5426 struct hstate *h = hstate_vma(vma); in move_hugetlb_page_tables() local
5428 unsigned long sz = huge_page_size(h); in move_hugetlb_page_tables()
5446 last_addr_mask = hugetlb_mask_last_page(h); in move_hugetlb_page_tables()
5495 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
5496 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
5502 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
5503 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
5512 last_addr_mask = hugetlb_mask_last_page(h); in __unmap_hugepage_range()
5521 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
5578 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
5587 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
5596 if (!h->surplus_huge_pages && __vma_private_lock(vma) && in __unmap_hugepage_range()
5613 int rc = vma_needs_reservation(h, vma, address); in __unmap_hugepage_range()
5624 vma_add_reservation(h, vma, address); in __unmap_hugepage_range()
5627 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
5720 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
5729 address = address & huge_page_mask(h); in unmap_ref_private()
5762 address + huge_page_size(h), page, 0); in unmap_ref_private()
5780 struct hstate *h = hstate_vma(vma); in hugetlb_wp() local
5886 idx = vma_hugecache_offset(h, vma, vmf->address); in hugetlb_wp()
5898 huge_page_size(h)); in hugetlb_wp()
5923 ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_wp()
5929 vmf->address + huge_page_size(h)); in hugetlb_wp()
5937 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); in hugetlb_wp()
5948 huge_page_size(h)); in hugetlb_wp()
5961 restore_reserve_on_error(h, vma, vmf->address, new_folio); in hugetlb_wp()
5975 bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
5993 struct hstate *h = hstate_inode(inode); in hugetlb_add_to_page_cache() local
5996 idx <<= huge_page_order(h); in hugetlb_add_to_page_cache()
6013 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
6039 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr, in hugetlb_pte_stable() argument
6045 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_pte_stable()
6057 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
6083 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff); in hugetlb_no_page()
6085 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
6107 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page()
6136 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) in hugetlb_no_page()
6157 restore_reserve_on_error(h, vma, vmf->address, in hugetlb_no_page()
6176 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
6185 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page()
6201 if (vma_needs_reservation(h, vma, vmf->address) < 0) { in hugetlb_no_page()
6206 vma_end_reservation(h, vma, vmf->address); in hugetlb_no_page()
6209 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); in hugetlb_no_page()
6227 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h)); in hugetlb_no_page()
6229 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
6263 restore_reserve_on_error(h, vma, vmf->address, folio); in hugetlb_no_page()
6301 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
6306 .address = address & huge_page_mask(h), in hugetlb_fault()
6309 .pgoff = vma_hugecache_offset(h, vma, in hugetlb_fault()
6310 address & huge_page_mask(h)), in hugetlb_fault()
6334 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h)); in hugetlb_fault()
6349 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
6386 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
6399 if (vma_needs_reservation(h, vma, vmf.address) < 0) { in hugetlb_fault()
6404 vma_end_reservation(h, vma, vmf.address); in hugetlb_fault()
6406 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, in hugetlb_fault()
6412 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); in hugetlb_fault()
6502 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, in alloc_hugetlb_folio_vma() argument
6511 gfp_mask = htlb_alloc_mask(h); in alloc_hugetlb_folio_vma()
6519 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); in alloc_hugetlb_folio_vma()
6539 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mfill_atomic_pte() local
6541 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6542 unsigned long size = huge_page_size(h); in hugetlb_mfill_atomic_pte()
6552 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6572 folio = filemap_lock_hugetlb_folio(h, mapping, idx); in hugetlb_mfill_atomic_pte()
6581 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6601 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6607 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6621 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6664 if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h))) in hugetlb_mfill_atomic_pte()
6679 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6723 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mfill_atomic_pte()
6742 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6756 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
6757 long pages = 0, psize = huge_page_size(h); in hugetlb_change_protection()
6779 last_addr_mask = hugetlb_mask_last_page(h); in hugetlb_change_protection()
6798 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
6892 return pages > 0 ? (pages << h->order) : pages; in hugetlb_change_protection()
6902 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
6958 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6959 chg * pages_per_huge_page(h), &h_cg) < 0) in hugetlb_reserve_pages()
6966 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
6982 if (hugetlb_acct_memory(h, gbl_reserve) < 0) in hugetlb_reserve_pages()
6997 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
7000 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
7017 hstate_index(h), in hugetlb_reserve_pages()
7018 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
7022 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
7039 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
7040 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
7059 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
7081 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
7092 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
7363 unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7365 unsigned long hp_size = huge_page_size(h); in hugetlb_mask_last_page()
7378 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7381 if (huge_page_size(h) == PMD_SIZE) in hugetlb_mask_last_page()
7448 struct hstate *h = folio_hstate(old_folio); in move_hugetlb_state() local
7478 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7479 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7480 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7490 struct hstate *h = hstate_vma(vma); in hugetlb_unshare_pmds() local
7491 unsigned long sz = huge_page_size(h); in hugetlb_unshare_pmds()
7518 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_unshare_pmds()