Home
last modified time | relevance | path

Searched refs:folio_mapping (Results 1 – 23 of 23) sorted by relevance

/linux-6.12.1/mm/
Dpage-writeback.c2893 struct address_space *mapping = folio_mapping(folio); in folio_mark_dirty()
2952 struct address_space *mapping = folio_mapping(folio); in __folio_cancel_dirty()
2989 struct address_space *mapping = folio_mapping(folio); in folio_clear_dirty_for_io()
3074 struct address_space *mapping = folio_mapping(folio); in __folio_end_writeback()
3116 struct address_space *mapping = folio_mapping(folio); in __folio_start_writeback()
3188 trace_folio_wait_writeback(folio, folio_mapping(folio)); in folio_wait_writeback()
3210 trace_folio_wait_writeback(folio, folio_mapping(folio)); in folio_wait_writeback_killable()
3234 if (mapping_stable_writes(folio_mapping(folio))) in folio_wait_stable()
Dmemory-failure.c238 mapping = folio_mapping(folio); in hwpoison_filter_dev()
1083 mapping = folio_mapping(folio); in me_pagecache_clean()
1119 struct address_space *mapping = folio_mapping(folio); in me_pagecache_dirty()
1205 mapping = folio_mapping(folio); in me_huge_page()
1625 mapping = folio_mapping(folio); in hwpoison_user_mappings()
2601 if (folio_mapping(folio)) { in unpoison_memory()
2699 ret = mapping_evict_folio(folio_mapping(folio), folio); in soft_offline_in_use_page()
Dmigrate_device.c349 if (folio_mapping(folio)) in migrate_vma_check_page()
732 mapping = folio_mapping(folio); in __migrate_device_pages()
Ddebug.c73 struct address_space *mapping = folio_mapping(folio); in __dump_folio()
Dutil.c844 struct address_space *folio_mapping(struct folio *folio) in folio_mapping() function
861 EXPORT_SYMBOL(folio_mapping);
Dinternal.h434 ret = !mapping_unevictable(folio_mapping(folio)) && in folio_evictable()
456 struct address_space *mapping = folio_mapping(folio); in folio_needs_release()
Drmap.c1110 mapping = folio_mapping(folio); in folio_mkclean()
2657 struct address_space *mapping = folio_mapping(folio); in rmap_walk_file()
Dmigrate.c1045 struct address_space *mapping = folio_mapping(src); in move_to_new_folio()
1486 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { in unmap_and_move_huge_page()
Dvmscan.c482 if (folio_mapping(folio) == mapping) in handle_write_error()
720 BUG_ON(mapping != folio_mapping(folio)); in __remove_mapping()
948 mapping = folio_mapping(folio); in folio_check_dirty_writeback()
1324 mapping = folio_mapping(folio); in shrink_folio_list()
1402 mapping = folio_mapping(folio); in shrink_folio_list()
Dcompaction.c1117 mapping = folio_mapping(folio); in isolate_migratepages_block()
1175 mapping = folio_mapping(folio); in isolate_migratepages_block()
Duserfaultfd.c183 bool page_in_cache = folio_mapping(folio); in mfill_atomic_install_pte()
Dkhugepaged.c1955 if (folio_mapping(folio) != mapping) { in collapse_file()
Dmemcontrol-v1.c811 struct address_space *mapping = folio_mapping(folio); in mem_cgroup_move_account()
Dshmem.c1106 if (folio_mapping(folio) != mapping) { in shmem_undo_range()
Dhugetlb.c1990 struct address_space *mapping = folio_mapping(folio); in hugetlb_folio_mapping_lock_write()
/linux-6.12.1/Documentation/core-api/
Dmm-api.rst117 :functions: folio_mapping
/linux-6.12.1/fs/crypto/
Dinline_crypt.c295 mapping = folio_mapping(folio); in bh_get_inode_and_lblk_num()
/linux-6.12.1/include/linux/
Dpagemap.h535 struct address_space *folio_mapping(struct folio *);
575 return folio_mapping(folio); in folio_flush_mapping()
/linux-6.12.1/fs/nilfs2/
Dpage.c277 filemap_dirty_folio(folio_mapping(dfolio), dfolio); in nilfs_copy_dirty_pages()
/linux-6.12.1/arch/s390/kernel/
Duv.c232 } else if (folio_mapping(folio)) { in expected_folio_refs()
/linux-6.12.1/fs/nfs/
Dwrite.c815 filemap_dirty_folio(folio_mapping(folio), folio); in nfs_mark_request_dirty()
1788 filemap_dirty_folio(folio_mapping(folio), folio); in nfs_commit_resched_write()
/linux-6.12.1/include/trace/events/
Dwriteback.h261 struct address_space *mapping = folio_mapping(folio);
/linux-6.12.1/fs/
Dsplice.c72 mapping = folio_mapping(folio); in page_cache_pipe_buf_try_steal()