Home
last modified time | relevance | path

Searched refs:fbatch (Results 1 – 25 of 33) sorted by relevance

12

/linux-6.12.1/include/linux/
Dpagevec.h41 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument
43 fbatch->nr = 0; in folio_batch_init()
44 fbatch->i = 0; in folio_batch_init()
45 fbatch->percpu_pvec_drained = false; in folio_batch_init()
48 static inline void folio_batch_reinit(struct folio_batch *fbatch) in folio_batch_reinit() argument
50 fbatch->nr = 0; in folio_batch_reinit()
51 fbatch->i = 0; in folio_batch_reinit()
54 static inline unsigned int folio_batch_count(struct folio_batch *fbatch) in folio_batch_count() argument
56 return fbatch->nr; in folio_batch_count()
59 static inline unsigned int folio_batch_space(struct folio_batch *fbatch) in folio_batch_space() argument
[all …]
Dpagemap.h975 pgoff_t end, struct folio_batch *fbatch);
977 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
979 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
1291 struct folio_batch *fbatch);
/linux-6.12.1/mm/
Dswap.c124 struct folio_batch fbatch; in put_pages_list() local
127 folio_batch_init(&fbatch); in put_pages_list()
136 if (folio_batch_add(&fbatch, folio) > 0) in put_pages_list()
138 free_unref_folios(&fbatch); in put_pages_list()
141 if (fbatch.nr) in put_pages_list()
142 free_unref_folios(&fbatch); in put_pages_list()
189 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) in folio_batch_move_lru() argument
195 for (i = 0; i < folio_batch_count(fbatch); i++) { in folio_batch_move_lru()
196 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
206 folios_put(fbatch); in folio_batch_move_lru()
[all …]
Dtruncate.c43 struct folio_batch *fbatch, pgoff_t *indices) in clear_shadow_entries() argument
54 for (i = 0; i < folio_batch_count(fbatch); i++) { in clear_shadow_entries()
55 struct folio *folio = fbatch->folios[i]; in clear_shadow_entries()
73 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() argument
82 for (j = 0; j < folio_batch_count(fbatch); j++) in truncate_folio_batch_exceptionals()
83 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
86 if (j == folio_batch_count(fbatch)) in truncate_folio_batch_exceptionals()
95 for (i = j; i < folio_batch_count(fbatch); i++) { in truncate_folio_batch_exceptionals()
96 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
100 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
[all …]
Dmlock.c33 struct folio_batch fbatch; member
186 static void mlock_folio_batch(struct folio_batch *fbatch) in mlock_folio_batch() argument
193 for (i = 0; i < folio_batch_count(fbatch); i++) { in mlock_folio_batch()
194 folio = fbatch->folios[i]; in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
209 folios_put(fbatch); in mlock_folio_batch()
214 struct folio_batch *fbatch; in mlock_drain_local() local
217 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local()
218 if (folio_batch_count(fbatch)) in mlock_drain_local()
219 mlock_folio_batch(fbatch); in mlock_drain_local()
[all …]
Dfilemap.c288 struct folio_batch *fbatch) in page_cache_delete_batch() argument
290 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
297 if (i >= folio_batch_count(fbatch)) in page_cache_delete_batch()
310 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
312 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
329 struct folio_batch *fbatch) in delete_from_page_cache_batch() argument
333 if (!folio_batch_count(fbatch)) in delete_from_page_cache_batch()
338 for (i = 0; i < folio_batch_count(fbatch); i++) { in delete_from_page_cache_batch()
339 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
344 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
[all …]
Dshmem.c944 struct folio_batch fbatch; in shmem_unlock_mapping() local
947 folio_batch_init(&fbatch); in shmem_unlock_mapping()
952 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping()
953 check_move_unevictable_folios(&fbatch); in shmem_unlock_mapping()
954 folio_batch_release(&fbatch); in shmem_unlock_mapping()
998 struct folio_batch fbatch; in shmem_undo_range() local
1012 folio_batch_init(&fbatch); in shmem_undo_range()
1015 &fbatch, indices)) { in shmem_undo_range()
1016 for (i = 0; i < folio_batch_count(&fbatch); i++) { in shmem_undo_range()
1017 folio = fbatch.folios[i]; in shmem_undo_range()
[all …]
Dpage-writeback.c2485 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio()
2487 folio_batch_release(&wbc->fbatch); in writeback_get_folio()
2490 wbc_to_tag(wbc), &wbc->fbatch); in writeback_get_folio()
2491 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio()
2536 folio_batch_init(&wbc->fbatch); in writeback_iter()
2616 folio_batch_release(&wbc->fbatch); in writeback_iter()
Dinternal.h406 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
408 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
744 void free_unref_folios(struct folio_batch *fbatch);
/linux-6.12.1/fs/nilfs2/
Dpage.c247 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local
252 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages()
255 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_copy_dirty_pages()
258 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_dirty_pages()
259 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages()
283 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages()
302 struct folio_batch fbatch; in nilfs_copy_back_pages() local
306 folio_batch_init(&fbatch); in nilfs_copy_back_pages()
308 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages()
312 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_back_pages()
[all …]
Dsegment.c702 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local
716 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers()
720 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers()
723 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_lookup_dirty_data_buffers()
725 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers()
747 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
753 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers()
763 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local
770 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers()
773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers()
[all …]
/linux-6.12.1/fs/ramfs/
Dfile-nommu.c208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local
224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area()
228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area()
235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
/linux-6.12.1/fs/bcachefs/
Dfs-io-pagecache.c255 struct folio_batch fbatch; in bch2_mark_pagecache_unallocated() local
261 folio_batch_init(&fbatch); in bch2_mark_pagecache_unallocated()
264 &index, end_index, &fbatch)) { in bch2_mark_pagecache_unallocated()
265 for (i = 0; i < folio_batch_count(&fbatch); i++) { in bch2_mark_pagecache_unallocated()
266 struct folio *folio = fbatch.folios[i]; in bch2_mark_pagecache_unallocated()
287 folio_batch_release(&fbatch); in bch2_mark_pagecache_unallocated()
299 struct folio_batch fbatch; in bch2_mark_pagecache_reserved() local
306 folio_batch_init(&fbatch); in bch2_mark_pagecache_reserved()
309 &index, end_index, &fbatch)) { in bch2_mark_pagecache_reserved()
310 for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) { in bch2_mark_pagecache_reserved()
[all …]
/linux-6.12.1/drivers/gpu/drm/
Ddrm_gem.c505 static void drm_gem_check_release_batch(struct folio_batch *fbatch) in drm_gem_check_release_batch() argument
507 check_move_unevictable_folios(fbatch); in drm_gem_check_release_batch()
508 __folio_batch_release(fbatch); in drm_gem_check_release_batch()
542 struct folio_batch fbatch; in drm_gem_get_pages() local
589 folio_batch_init(&fbatch); in drm_gem_get_pages()
593 if (!folio_batch_add(&fbatch, f)) in drm_gem_get_pages()
594 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
597 if (fbatch.nr) in drm_gem_get_pages()
598 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages()
617 struct folio_batch fbatch; in drm_gem_put_pages() local
[all …]
/linux-6.12.1/drivers/gpu/drm/i915/gem/
Di915_gem_shmem.c25 static void check_release_folio_batch(struct folio_batch *fbatch) in check_release_folio_batch() argument
27 check_move_unevictable_folios(fbatch); in check_release_folio_batch()
28 __folio_batch_release(fbatch); in check_release_folio_batch()
36 struct folio_batch fbatch; in shmem_sg_free_table() local
42 folio_batch_init(&fbatch); in shmem_sg_free_table()
54 if (!folio_batch_add(&fbatch, folio)) in shmem_sg_free_table()
55 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
57 if (fbatch.nr) in shmem_sg_free_table()
58 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
/linux-6.12.1/fs/gfs2/
Daops.c179 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument
188 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch()
191 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch()
199 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
282 struct folio_batch fbatch; in gfs2_write_cache_jdata() local
292 folio_batch_init(&fbatch); in gfs2_write_cache_jdata()
319 tag, &fbatch); in gfs2_write_cache_jdata()
323 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata()
329 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
/linux-6.12.1/fs/ceph/
Daddr.c955 struct folio_batch fbatch; in ceph_writepages_start() local
985 folio_batch_init(&fbatch); in ceph_writepages_start()
1052 end, tag, &fbatch); in ceph_writepages_start()
1057 page = &fbatch.folios[i]->page; in ceph_writepages_start()
1199 fbatch.folios[i] = NULL; in ceph_writepages_start()
1210 if (!fbatch.folios[j]) in ceph_writepages_start()
1213 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start()
1216 fbatch.nr = n; in ceph_writepages_start()
1221 folio_batch_release(&fbatch); in ceph_writepages_start()
1374 (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start()
[all …]
/linux-6.12.1/fs/f2fs/
Dnode.c1553 struct folio_batch fbatch; in last_fsync_dnode() local
1557 folio_batch_init(&fbatch); in last_fsync_dnode()
1562 &fbatch))) { in last_fsync_dnode()
1566 struct page *page = &fbatch.folios[i]->page; in last_fsync_dnode()
1570 folio_batch_release(&fbatch); in last_fsync_dnode()
1601 folio_batch_release(&fbatch); in last_fsync_dnode()
1770 struct folio_batch fbatch; in f2fs_fsync_node_pages() local
1784 folio_batch_init(&fbatch); in f2fs_fsync_node_pages()
1789 &fbatch))) { in f2fs_fsync_node_pages()
1793 struct page *page = &fbatch.folios[i]->page; in f2fs_fsync_node_pages()
[all …]
Dcheckpoint.c427 struct folio_batch fbatch; in f2fs_sync_meta_pages() local
435 folio_batch_init(&fbatch); in f2fs_sync_meta_pages()
441 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages()
445 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages()
449 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
450 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
482 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
/linux-6.12.1/fs/btrfs/tests/
Dextent-io-tests.c25 struct folio_batch fbatch; in process_page_range() local
32 folio_batch_init(&fbatch); in process_page_range()
36 end_index, &fbatch); in process_page_range()
38 struct folio *folio = fbatch.folios[i]; in process_page_range()
48 folio_batch_release(&fbatch); in process_page_range()
/linux-6.12.1/drivers/gpu/drm/i915/
Di915_gpu_error.c192 static void pool_fini(struct folio_batch *fbatch) in pool_fini() argument
194 folio_batch_release(fbatch); in pool_fini()
197 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) in pool_refill() argument
199 while (folio_batch_space(fbatch)) { in pool_refill()
206 folio_batch_add(fbatch, folio); in pool_refill()
212 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) in pool_init() argument
216 folio_batch_init(fbatch); in pool_init()
218 err = pool_refill(fbatch, gfp); in pool_init()
220 pool_fini(fbatch); in pool_init()
225 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) in pool_alloc() argument
[all …]
/linux-6.12.1/fs/btrfs/
Dextent_io.c204 struct folio_batch fbatch; in __process_folios_contig() local
207 folio_batch_init(&fbatch); in __process_folios_contig()
212 end_index, &fbatch); in __process_folios_contig()
214 struct folio *folio = fbatch.folios[i]; in __process_folios_contig()
219 folio_batch_release(&fbatch); in __process_folios_contig()
249 struct folio_batch fbatch; in lock_delalloc_folios() local
254 folio_batch_init(&fbatch); in lock_delalloc_folios()
259 end_index, &fbatch); in lock_delalloc_folios()
264 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios()
283 folio_batch_release(&fbatch); in lock_delalloc_folios()
[all …]
Dcompression.c287 struct folio_batch fbatch; in end_compressed_writeback() local
295 folio_batch_init(&fbatch); in end_compressed_writeback()
298 &fbatch); in end_compressed_writeback()
304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback()
309 folio_batch_release(&fbatch); in end_compressed_writeback()
/linux-6.12.1/fs/hugetlbfs/
Dinode.c651 struct folio_batch fbatch; in remove_inode_hugepages() local
656 folio_batch_init(&fbatch); in remove_inode_hugepages()
658 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages()
659 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages()
660 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages()
676 folio_batch_release(&fbatch); in remove_inode_hugepages()
/linux-6.12.1/fs/ext4/
Dinode.c1556 struct folio_batch fbatch; in mpage_release_unused_pages() local
1581 folio_batch_init(&fbatch); in mpage_release_unused_pages()
1583 nr = filemap_get_folios(mapping, &index, end, &fbatch); in mpage_release_unused_pages()
1587 struct folio *folio = fbatch.folios[i]; in mpage_release_unused_pages()
1604 folio_batch_release(&fbatch); in mpage_release_unused_pages()
2134 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local
2149 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers()
2151 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); in mpage_map_and_submit_buffers()
2155 struct folio *folio = fbatch.folios[i]; in mpage_map_and_submit_buffers()
2172 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers()
[all …]

12