/linux-6.12.1/drivers/dma-buf/ |
D | udmabuf.c | 29 struct folio **folios; member 51 pfn = folio_pfn(ubuf->folios[pgoff]); in udmabuf_vm_fault() 88 pages[pg] = &ubuf->folios[pg]->page; in vmap_udmabuf() 126 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE, in get_sg_table() 200 kfree(ubuf->folios); in release_udmabuf() 301 struct folio **folios; in udmabuf_create() local 325 ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios), in udmabuf_create() 327 if (!ubuf->folios) { in udmabuf_create() 346 folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL); in udmabuf_create() 347 if (!folios) { in udmabuf_create() [all …]
|
/linux-6.12.1/fs/btrfs/ |
D | accessors.c | 31 token->kaddr = folio_address(eb->folios[0]); in btrfs_init_map_token() 80 token->kaddr = folio_address(token->eb->folios[idx]); \ 86 token->kaddr = folio_address(token->eb->folios[idx + 1]); \ 99 char *kaddr = folio_address(eb->folios[idx]); \ 109 kaddr = folio_address(eb->folios[idx + 1]); \ 135 token->kaddr = folio_address(token->eb->folios[idx]); \ 144 token->kaddr = folio_address(token->eb->folios[idx + 1]); \ 156 char *kaddr = folio_address(eb->folios[idx]); \ 170 kaddr = folio_address(eb->folios[idx + 1]); \
|
D | extent_io.c | 214 struct folio *folio = fbatch.folios[i]; in __process_folios_contig() 264 struct folio *folio = fbatch.folios[i]; in lock_delalloc_folios() 664 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array() 1698 struct folio *folio = eb->folios[0]; in write_one_eb() 1717 struct folio *folio = eb->folios[i]; in write_one_eb() 1936 struct folio *folio = fbatch.folios[i]; in btree_write_cache_pages() 2093 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages() 2532 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_pages() 2600 struct folio *folio = new->folios[i]; in btrfs_clone_extent_buffer() 2632 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); in __alloc_dummy_extent_buffer() [all …]
|
D | compression.h | 97 u64 start, struct folio **folios, unsigned long *out_folios, 161 u64 start, struct folio **folios, unsigned long *out_folios, 172 u64 start, struct folio **folios, unsigned long *out_folios, 182 u64 start, struct folio **folios, unsigned long *out_folios,
|
D | defrag.c | 1158 struct folio **folios, int nr_pages, in defrag_one_locked_target() argument 1167 unsigned long first_index = folios[0]->index; in defrag_one_locked_target() 1184 folio_clear_checked(folios[i]); in defrag_one_locked_target() 1185 btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len); in defrag_one_locked_target() 1201 struct folio **folios; in defrag_one_range() local 1212 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); in defrag_one_range() 1213 if (!folios) in defrag_one_range() 1218 folios[i] = defrag_prepare_one_folio(inode, start_index + i); in defrag_one_range() 1219 if (IS_ERR(folios[i])) { in defrag_one_range() 1220 ret = PTR_ERR(folios[i]); in defrag_one_range() [all …]
|
D | extent_io.h | 121 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member 168 return offset_in_folio(eb->folios[0], offset + eb->start); in get_eb_offset_in_folio() 296 if (folio_order(eb->folios[0])) in num_extent_folios()
|
D | zlib.c | 98 u64 start, struct folio **folios, unsigned long *out_folios, in zlib_compress_folios() argument 139 folios[0] = out_folio; in zlib_compress_folios() 238 folios[nr_folios] = out_folio; in zlib_compress_folios() 274 folios[nr_folios] = out_folio; in zlib_compress_folios()
|
D | lzo.c | 213 u64 start, struct folio **folios, unsigned long *out_folios, in lzo_compress_folios() argument 268 folios, max_nr_folio, in lzo_compress_folios() 292 sizes_ptr = kmap_local_folio(folios[0], 0); in lzo_compress_folios()
|
D | compression.c | 94 struct folio **folios, unsigned long *out_folios, in compression_compress_pages() argument 99 return zlib_compress_folios(ws, mapping, start, folios, in compression_compress_pages() 102 return lzo_compress_folios(ws, mapping, start, folios, in compression_compress_pages() 105 return zstd_compress_folios(ws, mapping, start, folios, in compression_compress_pages() 304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback() 1028 u64 start, struct folio **folios, unsigned long *out_folios, in btrfs_compress_folios() argument 1038 ret = compression_compress_pages(type, workspace, mapping, start, folios, in btrfs_compress_folios()
|
/linux-6.12.1/mm/ |
D | swap.c | 196 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 401 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio() 940 void folios_put_refs(struct folio_batch *folios, unsigned int *refs) in folios_put_refs() argument 946 for (i = 0, j = 0; i < folios->nr; i++) { in folios_put_refs() 947 struct folio *folio = folios->folios[i]; in folios_put_refs() 981 folios->folios[j] = folio; in folios_put_refs() 987 folio_batch_reinit(folios); in folios_put_refs() 991 folios->nr = j; in folios_put_refs() 992 mem_cgroup_uncharge_folios(folios); in folios_put_refs() 993 free_unref_folios(folios); in folios_put_refs() [all …]
|
D | truncate.c | 55 struct folio *folio = fbatch->folios[i]; in clear_shadow_entries() 83 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 96 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() 100 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals() 342 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range() 345 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range() 389 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() 486 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() 617 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
|
D | swap_state.c | 317 struct folio_batch folios; in free_pages_and_swap_cache() local 321 folio_batch_init(&folios); in free_pages_and_swap_cache() 326 refs[folios.nr] = 1; in free_pages_and_swap_cache() 329 refs[folios.nr] = encoded_nr_pages(pages[++i]); in free_pages_and_swap_cache() 331 if (folio_batch_add(&folios, folio) == 0) in free_pages_and_swap_cache() 332 folios_put_refs(&folios, refs); in free_pages_and_swap_cache() 334 if (folios.nr) in free_pages_and_swap_cache() 335 folios_put_refs(&folios, refs); in free_pages_and_swap_cache()
|
D | gup.c | 440 void unpin_folios(struct folio **folios, unsigned long nfolios) in unpin_folios() argument 455 if (folios[i] != folios[j]) in unpin_folios() 458 if (folios[i]) in unpin_folios() 459 gup_put_folio(folios[i], j - i, FOLL_PIN); in unpin_folios() 2287 struct folio **folios; member 2297 return pofs->folios[i]; in pofs_get_folio() 2309 unpin_folios(pofs->folios, pofs->nr_entries); in pofs_unpin() 2468 struct folio **folios) in check_and_migrate_movable_folios() argument 2471 .folios = folios, in check_and_migrate_movable_folios() 2502 struct folio **folios) in check_and_migrate_movable_folios() argument [all …]
|
D | migrate.c | 1959 LIST_HEAD(folios); in migrate_pages_sync() 1965 reason, &folios, split_folios, &astats, in migrate_pages_sync() 1974 list_splice_tail(&folios, ret_folios); in migrate_pages_sync() 1988 list_splice_tail_init(&folios, from); in migrate_pages_sync() 1990 list_move(from->next, &folios); in migrate_pages_sync() 1991 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync() 1994 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync() 2036 LIST_HEAD(folios); in migrate_pages() 2064 list_cut_before(&folios, from, &folio2->lru); in migrate_pages() 2066 list_splice_init(from, &folios); in migrate_pages() [all …]
|
/linux-6.12.1/include/linux/ |
D | pagevec.h | 32 struct folio *folios[PAGEVEC_SIZE]; member 77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 93 return fbatch->folios[fbatch->i++]; in folio_batch_next()
|
D | folio_queue.h | 237 folioq->vec.folios[slot] = folio; in folioq_append() 259 folioq->vec.folios[slot] = folio; in folioq_append_mark() 277 return folioq->vec.folios[slot]; in folioq_folio() 318 folioq->vec.folios[slot] = NULL; in folioq_clear()
|
/linux-6.12.1/tools/mm/ |
D | thpmaps | 252 folios = indexes[index_next:index_end][heads[index_next:index_end]] 256 nr = (int(folios[0]) if len(folios) else index_end) - index_next 261 if len(folios): 264 nr = index_end - int(folios[-1]) 265 folios = folios[:-1] 270 if len(folios): 271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) 273 for index, order in zip(folios, folio_orders):
|
/linux-6.12.1/fs/ramfs/ |
D | file-nommu.c | 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
|
/linux-6.12.1/fs/smb/client/ |
D | compress.c | 164 struct folio *folios[16], *folio; in collect_sample() local 174 nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios), in collect_sample() 180 folio = folios[i]; in collect_sample() 209 } while (nr == ARRAY_SIZE(folios)); in collect_sample()
|
/linux-6.12.1/Documentation/core-api/ |
D | folio_queue.rst | 13 * Adding and removing folios 24 The folio_queue struct forms a single segment in a segmented list of folios 68 the number of folios added. 71 Adding and removing folios 134 of folios added to a segments and the third is a shorthand to indicate if the 137 Not that the count and fullness are not affected by clearing folios from the 197 last segment is reached and the folios it refers to are entirely consumed by
|
/linux-6.12.1/Documentation/mm/ |
D | unevictable-lru.rst | 13 folios. 28 folios and to hide these folios from vmscan. This mechanism is based on a patch 72 The Unevictable LRU infrastructure maintains unevictable folios as if they were 75 (1) We get to "treat unevictable folios just like we treat other folios in the 80 (2) We want to be able to migrate unevictable folios between nodes for memory 82 can only migrate folios that it can successfully isolate from the LRU 83 lists (or "Movable" folios: outside of consideration here). If we were to 84 maintain folios elsewhere than on an LRU-like list, where they can be 88 anonymous, swap-backed folios. This differentiation is only important 89 while the folios are, in fact, evictable. [all …]
|
D | multigen_lru.rst | 92 truncated generation number is an index to ``lrugen->folios[]``. The 96 ``lrugen->folios[]``; otherwise it stores zero. 100 generations, tiers do not have dedicated ``lrugen->folios[]``. In 131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by 226 since each node and memcg combination has an LRU of folios (see 232 the active/inactive LRU (of folios): 255 The multi-gen LRU (of folios) can be disassembled into the following
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io-buffered.c | 42 folios folios; member 54 darray_push(&iter->folios, folio)) { in readpages_iter_init() 58 return iter->folios.nr ? 0 : -ENOMEM; in readpages_iter_init() 69 if (iter->idx >= iter->folios.nr) in readpage_iter_peek() 71 return iter->folios.data[iter->idx]; in readpage_iter_peek() 263 readpages_iter.folios.nr - in bch2_readahead() 285 darray_exit(&readpages_iter.folios); in bch2_readahead() 779 static noinline void folios_trunc(folios *fs, struct folio **fi) in folios_trunc() 796 folios fs; in __bch2_buffered_write()
|
D | fs-io-pagecache.h | 7 typedef DARRAY(struct folio *) folios; typedef 10 u64, fgf_t, gfp_t, folios *);
|
/linux-6.12.1/fs/nilfs2/ |
D | page.c | 259 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages() 313 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() 373 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages() 508 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent()
|