Lines Matching full:mapping

31 static inline void __clear_shadow_entry(struct address_space *mapping,  in __clear_shadow_entry()  argument
34 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
42 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument
48 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries()
51 spin_lock(&mapping->host->i_lock); in clear_shadow_entries()
52 xa_lock_irq(&mapping->i_pages); in clear_shadow_entries()
58 __clear_shadow_entry(mapping, indices[i], folio); in clear_shadow_entries()
61 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entries()
62 if (mapping_shrinkable(mapping)) in clear_shadow_entries()
63 inode_add_lru(mapping->host); in clear_shadow_entries()
64 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries()
72 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument
79 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals()
89 dax = dax_mapping(mapping); in truncate_folio_batch_exceptionals()
91 spin_lock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
92 xa_lock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals()
105 dax_delete_mapping_entry(mapping, index); in truncate_folio_batch_exceptionals()
109 __clear_shadow_entry(mapping, index, folio); in truncate_folio_batch_exceptionals()
113 xa_unlock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals()
114 if (mapping_shrinkable(mapping)) in truncate_folio_batch_exceptionals()
115 inode_add_lru(mapping->host); in truncate_folio_batch_exceptionals()
116 spin_unlock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
138 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
150 * We need to bail out if page->mapping is no longer equal to the original
151 * mapping. This happens a) when the VM reclaimed the page while we waited on
172 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) in truncate_inode_folio() argument
174 if (folio->mapping != mapping) in truncate_inode_folio()
210 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
219 if (!mapping_inaccessible(folio->mapping)) in truncate_inode_partial_folio()
230 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
237 int generic_error_remove_folio(struct address_space *mapping, in generic_error_remove_folio() argument
240 if (!mapping) in generic_error_remove_folio()
246 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_folio()
248 return truncate_inode_folio(mapping, folio); in generic_error_remove_folio()
254 * @mapping: The mapping this folio belongs to.
263 long mapping_evict_folio(struct address_space *mapping, struct folio *folio) in mapping_evict_folio() argument
266 if (!mapping) in mapping_evict_folio()
277 return remove_mapping(mapping, folio); in mapping_evict_folio()
282 * @mapping: mapping to truncate
297 * mapping is large, it is probably the case that the final pages are the most
304 void truncate_inode_pages_range(struct address_space *mapping, in truncate_inode_pages_range() argument
316 if (mapping_empty(mapping)) in truncate_inode_pages_range()
338 while (index < end && find_lock_entries(mapping, &index, end - 1, in truncate_inode_pages_range()
340 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
343 delete_from_page_cache_batch(mapping, &fbatch); in truncate_inode_pages_range()
351 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); in truncate_inode_pages_range()
365 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, in truncate_inode_pages_range()
378 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in truncate_inode_pages_range()
399 truncate_inode_folio(mapping, folio); in truncate_inode_pages_range()
402 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
410 * @mapping: mapping to truncate
414 * mapping->invalidate_lock.
418 * mapping->nrpages can be non-zero when this function returns even after
419 * truncation of the whole mapping.
421 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
423 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
429 * @mapping: mapping to truncate
436 void truncate_inode_pages_final(struct address_space *mapping) in truncate_inode_pages_final() argument
445 mapping_set_exiting(mapping); in truncate_inode_pages_final()
447 if (!mapping_empty(mapping)) { in truncate_inode_pages_final()
454 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
455 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
458 truncate_inode_pages(mapping, 0); in truncate_inode_pages_final()
464 * @mapping: the address_space which holds the folios to invalidate
472 unsigned long mapping_try_invalidate(struct address_space *mapping, in mapping_try_invalidate() argument
484 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { in mapping_try_invalidate()
496 ret = mapping_evict_folio(mapping, folio); in mapping_try_invalidate()
512 clear_shadow_entries(mapping, &fbatch, indices); in mapping_try_invalidate()
523 * @mapping: the address_space which holds the cache to invalidate
535 unsigned long invalidate_mapping_pages(struct address_space *mapping, in invalidate_mapping_pages() argument
538 return mapping_try_invalidate(mapping, start, end, NULL); in invalidate_mapping_pages()
549 static int invalidate_complete_folio2(struct address_space *mapping, in invalidate_complete_folio2() argument
552 if (folio->mapping != mapping) in invalidate_complete_folio2()
558 spin_lock(&mapping->host->i_lock); in invalidate_complete_folio2()
559 xa_lock_irq(&mapping->i_pages); in invalidate_complete_folio2()
565 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2()
566 if (mapping_shrinkable(mapping)) in invalidate_complete_folio2()
567 inode_add_lru(mapping->host); in invalidate_complete_folio2()
568 spin_unlock(&mapping->host->i_lock); in invalidate_complete_folio2()
570 filemap_free_folio(mapping, folio); in invalidate_complete_folio2()
573 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2()
574 spin_unlock(&mapping->host->i_lock); in invalidate_complete_folio2()
578 static int folio_launder(struct address_space *mapping, struct folio *folio) in folio_launder() argument
582 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder()
584 return mapping->a_ops->launder_folio(folio); in folio_launder()
589 * @mapping: the address_space
598 int invalidate_inode_pages2_range(struct address_space *mapping, in invalidate_inode_pages2_range() argument
610 if (mapping_empty(mapping)) in invalidate_inode_pages2_range()
615 while (find_get_entries(mapping, &index, end, &fbatch, indices)) { in invalidate_inode_pages2_range()
623 if (dax_mapping(mapping) && in invalidate_inode_pages2_range()
624 !dax_invalidate_mapping_entry_sync(mapping, indices[i])) in invalidate_inode_pages2_range()
634 unmap_mapping_pages(mapping, indices[i], in invalidate_inode_pages2_range()
640 if (unlikely(folio->mapping != mapping)) { in invalidate_inode_pages2_range()
651 ret2 = folio_launder(mapping, folio); in invalidate_inode_pages2_range()
653 if (!invalidate_complete_folio2(mapping, folio)) in invalidate_inode_pages2_range()
662 clear_shadow_entries(mapping, &fbatch, indices); in invalidate_inode_pages2_range()
675 if (dax_mapping(mapping)) { in invalidate_inode_pages2_range()
676 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
684 * @mapping: the address_space
691 int invalidate_inode_pages2(struct address_space *mapping) in invalidate_inode_pages2() argument
693 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
714 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() local
726 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
727 truncate_inode_pages(mapping, newsize); in truncate_pagecache()
728 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
820 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range() local
837 unmap_mapping_range(mapping, unmap_start, in truncate_pagecache_range()
839 truncate_inode_pages_range(mapping, lstart, lend); in truncate_pagecache_range()