Lines Matching +full:write +full:- +full:back

2  * arch/sh/mm/cache-sh4.c
5 * Copyright (C) 2001 - 2009 Paul Mundt
35 * Write back the range of D-cache, and purge the I-cache.
47 start = data->addr1; in sh4_flush_icache_range()
48 end = data->addr2; in sh4_flush_icache_range()
51 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { in sh4_flush_icache_range()
57 * Selectively flush d-cache then invalidate the i-cache. in sh4_flush_icache_range()
60 start &= ~(L1_CACHE_BYTES-1); in sh4_flush_icache_range()
61 end += L1_CACHE_BYTES-1; in sh4_flush_icache_range()
62 end &= ~(L1_CACHE_BYTES-1); in sh4_flush_icache_range()
74 cpu_data->icache.entry_mask); in sh4_flush_icache_range()
76 /* Clear i-cache line valid-bit */ in sh4_flush_icache_range()
78 for (i = 0; i < cpu_data->icache.ways; i++) { in sh4_flush_icache_range()
81 icacheaddr += cpu_data->icache.way_incr; in sh4_flush_icache_range()
94 * All types of SH-4 require PC to be uncached to operate on the I-cache. in flush_cache_one()
95 * Some types of SH-4 require PC to be uncached to operate on the D-cache. in flush_cache_one()
107 * Write back & invalidate the D-cache of the page.
117 clear_bit(PG_dcache_clean, &folio->flags); in sh4_flush_dcache_folio()
145 /* Flush I-cache */ in flush_icache_all()
190 * of flush_cache_mm for SH-4 is to get rid of aliases from the
191 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
196 * Caller takes mm->mmap_lock.
209 * Write back and invalidate I/D-caches for the page.
225 vma = data->vma; in sh4_flush_cache_page()
226 address = data->addr1 & PAGE_MASK; in sh4_flush_cache_page()
227 pfn = data->addr2; in sh4_flush_cache_page()
231 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page()
234 pmd = pmd_off(vma->vm_mm, address); in sh4_flush_cache_page()
241 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page()
263 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_page()
275 * Write back and invalidate D-caches.
289 vma = data->vma; in sh4_flush_cache_range()
290 start = data->addr1; in sh4_flush_cache_range()
291 end = data->addr2; in sh4_flush_cache_range()
293 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
297 * If cache is only 4k-per-way, there are never any 'aliases'. Since in sh4_flush_cache_range()
305 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_range()
314 * set i.e. associative write)
320 * operation (purge/write-back) is selected by the lower 2 bits of
334 /* Write this way for better assembly. */ in __flush_cache_one()
335 way_count = dcache->ways; in __flush_cache_one()
336 way_incr = dcache->way_incr; in __flush_cache_one()
343 * If I write "=r" for the (temp_pc), it puts this in r6 hence in __flush_cache_one()
344 * trashing exec_offset before it's been added on - why? Hence in __flush_cache_one()
356 * We know there will be >=1 iteration, so write as do-while to avoid in __flush_cache_one()
357 * pointless nead-of-loop check for 0 iterations. in __flush_cache_one()
377 } while (--way_count != 0); in __flush_cache_one()
381 * SH-4 has virtually indexed and physically tagged cache.