1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_CACHEFLUSH_H
7 #define _ASM_RISCV_CACHEFLUSH_H
8 
9 #include <linux/mm.h>
10 
local_flush_icache_all(void)11 static inline void local_flush_icache_all(void)
12 {
13 	asm volatile ("fence.i" ::: "memory");
14 }
15 
local_flush_icache_range(unsigned long start,unsigned long end)16 static inline void local_flush_icache_range(unsigned long start,
17 					    unsigned long end)
18 {
19 	local_flush_icache_all();
20 }
21 
22 #define PG_dcache_clean PG_arch_1
23 
flush_dcache_folio(struct folio * folio)24 static inline void flush_dcache_folio(struct folio *folio)
25 {
26 	if (test_bit(PG_dcache_clean, &folio->flags))
27 		clear_bit(PG_dcache_clean, &folio->flags);
28 }
29 #define flush_dcache_folio flush_dcache_folio
30 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
31 
flush_dcache_page(struct page * page)32 static inline void flush_dcache_page(struct page *page)
33 {
34 	flush_dcache_folio(page_folio(page));
35 }
36 
37 /*
38  * RISC-V doesn't have an instruction to flush parts of the instruction cache,
39  * so instead we just flush the whole thing.
40  */
41 #define flush_icache_range(start, end) flush_icache_all()
42 #define flush_icache_user_page(vma, pg, addr, len)	\
43 do {							\
44 	if (vma->vm_flags & VM_EXEC)			\
45 		flush_icache_mm(vma->vm_mm, 0);		\
46 } while (0)
47 
48 #ifdef CONFIG_64BIT
49 extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
50 extern char _end[];
51 #define flush_cache_vmap flush_cache_vmap
flush_cache_vmap(unsigned long start,unsigned long end)52 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
53 {
54 	if (is_vmalloc_or_module_addr((void *)start)) {
55 		int i;
56 
57 		/*
58 		 * We don't care if concurrently a cpu resets this value since
59 		 * the only place this can happen is in handle_exception() where
60 		 * an sfence.vma is emitted.
61 		 */
62 		for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
63 			new_vmalloc[i] = -1ULL;
64 	}
65 }
66 #define flush_cache_vmap_early(start, end)	local_flush_tlb_kernel_range(start, end)
67 #endif
68 
69 #ifndef CONFIG_SMP
70 
71 #define flush_icache_all() local_flush_icache_all()
72 #define flush_icache_mm(mm, local) flush_icache_all()
73 
74 #else /* CONFIG_SMP */
75 
76 void flush_icache_all(void);
77 void flush_icache_mm(struct mm_struct *mm, bool local);
78 
79 #endif /* CONFIG_SMP */
80 
81 extern unsigned int riscv_cbom_block_size;
82 extern unsigned int riscv_cboz_block_size;
83 void riscv_init_cbo_blocksizes(void);
84 
85 #ifdef CONFIG_RISCV_DMA_NONCOHERENT
86 void riscv_noncoherent_supported(void);
87 void __init riscv_set_dma_cache_alignment(void);
88 #else
riscv_noncoherent_supported(void)89 static inline void riscv_noncoherent_supported(void) {}
riscv_set_dma_cache_alignment(void)90 static inline void riscv_set_dma_cache_alignment(void) {}
91 #endif
92 
93 /*
94  * Bits in sys_riscv_flush_icache()'s flags argument.
95  */
96 #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
97 #define SYS_RISCV_FLUSH_ICACHE_ALL   (SYS_RISCV_FLUSH_ICACHE_LOCAL)
98 
99 #include <asm-generic/cacheflush.h>
100 
101 #endif /* _ASM_RISCV_CACHEFLUSH_H */
102