Lines Matching refs:resv_map
91 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
274 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_lock_read() local
276 down_read(&resv_map->rw_sema); in hugetlb_vma_lock_read()
287 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_unlock_read() local
289 up_read(&resv_map->rw_sema); in hugetlb_vma_unlock_read()
300 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_lock_write() local
302 down_write(&resv_map->rw_sema); in hugetlb_vma_lock_write()
313 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_unlock_write() local
315 up_write(&resv_map->rw_sema); in hugetlb_vma_unlock_write()
327 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_trylock_write() local
329 return down_write_trylock(&resv_map->rw_sema); in hugetlb_vma_trylock_write()
342 struct resv_map *resv_map = vma_resv_map(vma); in hugetlb_vma_assert_locked() local
344 lockdep_assert_held(&resv_map->rw_sema); in hugetlb_vma_assert_locked()
378 struct resv_map *resv_map = vma_resv_map(vma); in __hugetlb_vma_unlock_write_free() local
381 up_write(&resv_map->rw_sema); in __hugetlb_vma_unlock_write_free()
439 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache()
469 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info()
521 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region()
549 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, in hugetlb_resv_map_add()
574 static long add_reservation_in_range(struct resv_map *resv, long f, long t, in add_reservation_in_range()
636 static int allocate_file_region_entries(struct resv_map *resv, in allocate_file_region_entries()
707 static long region_add(struct resv_map *resv, long f, long t, in region_add()
774 static long region_chg(struct resv_map *resv, long f, long t, in region_chg()
810 static void region_abort(struct resv_map *resv, long f, long t, in region_abort()
833 static long region_del(struct resv_map *resv, long f, long t) in region_del()
960 static long region_count(struct resv_map *resv, long f, long t) in region_count()
1066 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, in resv_map_set_hugetlb_cgroup_uncharge_info() argument
1072 resv_map->reservation_counter = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
1073 resv_map->pages_per_hpage = 0; in resv_map_set_hugetlb_cgroup_uncharge_info()
1074 resv_map->css = NULL; in resv_map_set_hugetlb_cgroup_uncharge_info()
1076 resv_map->reservation_counter = in resv_map_set_hugetlb_cgroup_uncharge_info()
1078 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1079 resv_map->css = &h_cg->css; in resv_map_set_hugetlb_cgroup_uncharge_info()
1084 struct resv_map *resv_map_alloc(void) in resv_map_alloc()
1086 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); in resv_map_alloc() local
1089 if (!resv_map || !rg) { in resv_map_alloc()
1090 kfree(resv_map); in resv_map_alloc()
1095 kref_init(&resv_map->refs); in resv_map_alloc()
1096 spin_lock_init(&resv_map->lock); in resv_map_alloc()
1097 INIT_LIST_HEAD(&resv_map->regions); in resv_map_alloc()
1098 init_rwsem(&resv_map->rw_sema); in resv_map_alloc()
1100 resv_map->adds_in_progress = 0; in resv_map_alloc()
1107 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); in resv_map_alloc()
1109 INIT_LIST_HEAD(&resv_map->region_cache); in resv_map_alloc()
1110 list_add(&rg->link, &resv_map->region_cache); in resv_map_alloc()
1111 resv_map->region_cache_count = 1; in resv_map_alloc()
1113 return resv_map; in resv_map_alloc()
1118 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); in resv_map_release() local
1119 struct list_head *head = &resv_map->region_cache; in resv_map_release()
1123 region_del(resv_map, 0, LONG_MAX); in resv_map_release()
1131 VM_BUG_ON(resv_map->adds_in_progress); in resv_map_release()
1133 kfree(resv_map); in resv_map_release()
1136 static inline struct resv_map *inode_resv_map(struct inode *inode) in inode_resv_map()
1146 return (struct resv_map *)(&inode->i_data)->i_private_data; in inode_resv_map()
1149 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map()
1159 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
1164 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map()
1239 struct resv_map *reservations = vma_resv_map(vma); in clear_vma_resv_huge_pages()
2640 struct resv_map *resv; in __vma_reservation_common()
5011 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
5050 struct resv_map *resv; in hugetlb_vm_op_close()
6904 struct resv_map *resv_map; in hugetlb_reserve_pages() local
6940 resv_map = inode_resv_map(inode); in hugetlb_reserve_pages()
6942 chg = region_chg(resv_map, from, to, ®ions_needed); in hugetlb_reserve_pages()
6945 resv_map = resv_map_alloc(); in hugetlb_reserve_pages()
6946 if (!resv_map) in hugetlb_reserve_pages()
6951 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
6966 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
6997 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
7048 region_abort(resv_map, from, to, regions_needed); in hugetlb_reserve_pages()
7050 kref_put(&resv_map->refs, resv_map_release); in hugetlb_reserve_pages()
7060 struct resv_map *resv_map = inode_resv_map(inode); in hugetlb_unreserve_pages() local
7069 if (resv_map) { in hugetlb_unreserve_pages()
7070 chg = region_del(resv_map, start, end); in hugetlb_unreserve_pages()