Lines Matching +full:data +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2002 Russell King
48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) in flush_pfn_alias()
55 unsigned long offset = vaddr & (PAGE_SIZE - 1); in flush_icache_alias()
94 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_pages()
145 /* VIPT non-aliasing D-cache */ in __flush_ptrace_access()
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
165 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access()
179 * Copy user data from/to a page which is mapped into a different
199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument
202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_folio()
203 * page. This ensures that data in the physical page is mutually in __flush_dcache_folio()
204 * coherent with the kernels mapping. in __flush_dcache_folio()
231 * we only need to do one flush - which would be at the relevant in __flush_dcache_folio()
232 * userspace colour, which is congruent with page->index. in __flush_dcache_folio()
234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio()
238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument
240 struct mm_struct *mm = current->active_mm; in __flush_dcache_aliases()
246 * - VIVT cache: we need to also write back and invalidate all user in __flush_dcache_aliases()
247 * data in the current VM view associated with this page. in __flush_dcache_aliases()
248 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases()
250 pgoff = folio->index; in __flush_dcache_aliases()
251 pgoff_end = pgoff + folio_nr_pages(folio) - 1; in __flush_dcache_aliases()
253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases()
261 if (vma->vm_mm != mm) in __flush_dcache_aliases()
263 if (!(vma->vm_flags & VM_MAYSHARE)) in __flush_dcache_aliases()
266 start = vma->vm_start; in __flush_dcache_aliases()
269 offset = pgoff - vma->vm_pgoff; in __flush_dcache_aliases()
270 if (offset > -nr) { in __flush_dcache_aliases()
271 pfn -= offset; in __flush_dcache_aliases()
276 if (start + nr * PAGE_SIZE > vma->vm_end) in __flush_dcache_aliases()
277 nr = (vma->vm_end - start) / PAGE_SIZE; in __flush_dcache_aliases()
281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
289 struct address_space *mapping; in __sync_icache_dcache() local
292 /* only flush non-aliasing VIPT caches for exec mappings */ in __sync_icache_dcache()
303 mapping = folio_flush_mapping(folio); in __sync_icache_dcache()
305 mapping = NULL; in __sync_icache_dcache()
307 if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) in __sync_icache_dcache()
308 __flush_dcache_folio(mapping, folio); in __sync_icache_dcache()
316 * Ensure cache coherency between kernel mapping and userspace mapping
320 * - VIPT non-aliasing cache: fully coherent so nothing required.
321 * - VIVT: fully aliasing, so we need to handle every alias in our
323 * - VIPT aliasing: need to handle one alias in our current VM view.
336 struct address_space *mapping; in flush_dcache_folio() local
346 if (test_bit(PG_dcache_clean, &folio->flags)) in flush_dcache_folio()
347 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
351 mapping = folio_flush_mapping(folio); in flush_dcache_folio()
354 mapping && !folio_mapped(folio)) in flush_dcache_folio()
355 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
357 __flush_dcache_folio(mapping, folio); in flush_dcache_folio()
358 if (mapping && cache_is_vivt()) in flush_dcache_folio()
359 __flush_dcache_aliases(mapping, folio); in flush_dcache_folio()
360 else if (mapping) in flush_dcache_folio()
362 set_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
374 * can safely access the data. The expected sequence is:
377 * -> flush_anon_page
386 /* VIPT non-aliasing caches need do nothing */ in __flush_anon_page()
391 * Write back and invalidate userspace mapping. in __flush_anon_page()
406 * Invalidate kernel mapping. No data should be contained in __flush_anon_page()
407 * in this mapping of the page. FIXME: this is overkill in __flush_anon_page()
408 * since we actually ask for a write-back and invalidate. in __flush_anon_page()