1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * powerpc KFENCE support.
4 *
5 * Copyright (C) 2020 CS GROUP France
6 */
7
8 #ifndef __ASM_POWERPC_KFENCE_H
9 #define __ASM_POWERPC_KFENCE_H
10
11 #include <linux/mm.h>
12 #include <asm/pgtable.h>
13
14 #ifdef CONFIG_PPC64_ELF_ABI_V1
15 #define ARCH_FUNC_PREFIX "."
16 #endif
17
18 #ifdef CONFIG_KFENCE
19 extern bool kfence_disabled;
20
disable_kfence(void)21 static inline void disable_kfence(void)
22 {
23 kfence_disabled = true;
24 }
25
arch_kfence_init_pool(void)26 static inline bool arch_kfence_init_pool(void)
27 {
28 return !kfence_disabled;
29 }
30 #endif
31
32 #ifdef CONFIG_PPC64
kfence_protect_page(unsigned long addr,bool protect)33 static inline bool kfence_protect_page(unsigned long addr, bool protect)
34 {
35 struct page *page = virt_to_page((void *)addr);
36
37 __kernel_map_pages(page, 1, !protect);
38
39 return true;
40 }
41 #else
kfence_protect_page(unsigned long addr,bool protect)42 static inline bool kfence_protect_page(unsigned long addr, bool protect)
43 {
44 pte_t *kpte = virt_to_kpte(addr);
45
46 if (protect) {
47 pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
48 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
49 } else {
50 pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
51 }
52
53 return true;
54 }
55 #endif
56
57 #endif /* __ASM_POWERPC_KFENCE_H */
58