Lines Matching refs:mapping
139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
208 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
210 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
212 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded()
226 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
227 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded()
245 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
264 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded()
268 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); in page_cache_ra_unbounded()
291 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded()
305 struct inode *inode = ractl->mapping->host; in do_page_cache_ra()
330 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local
332 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in force_page_cache_ra()
335 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra()
441 err = filemap_add_folio(ractl->mapping, folio, index, gfp); in ra_alloc_folio()
455 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local
458 unsigned int min_order = mapping_min_folio_order(mapping); in page_cache_ra_order()
459 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order()
463 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order()
464 unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping)); in page_cache_ra_order()
470 if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size) in page_cache_ra_order()
475 if (new_order < mapping_max_folio_order(mapping)) in page_cache_ra_order()
478 new_order = min(mapping_max_folio_order(mapping), new_order); in page_cache_ra_order()
484 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order()
490 ractl->_index = mapping_align_index(mapping, index); in page_cache_ra_order()
509 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order()
526 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); in ractl_max_pages()
586 miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages); in page_cache_sync_ra()
657 start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); in page_cache_async_ra()
734 struct address_space *mapping = ractl->mapping; in readahead_expand() local
737 gfp_t gfp_mask = readahead_gfp_mask(mapping); in readahead_expand()
738 unsigned long min_nrpages = mapping_min_folio_nrpages(mapping); in readahead_expand()
739 unsigned int min_order = mapping_min_folio_order(mapping); in readahead_expand()
751 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
760 index = mapping_align_index(mapping, index); in readahead_expand()
761 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()
780 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
789 index = mapping_align_index(mapping, index); in readahead_expand()
790 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()