1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * KFENCE support for LoongArch.
4 *
5 * Author: Enze Li <lienze@kylinos.cn>
6 * Copyright (C) 2022-2023 KylinSoft Corporation.
7 */
8
9 #ifndef _ASM_LOONGARCH_KFENCE_H
10 #define _ASM_LOONGARCH_KFENCE_H
11
12 #include <linux/kfence.h>
13 #include <linux/vmalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlb.h>
16
arch_kfence_init_pool(void)17 static inline bool arch_kfence_init_pool(void)
18 {
19 int err;
20 char *kaddr, *vaddr;
21 char *kfence_pool = __kfence_pool;
22 struct vm_struct *area;
23
24 area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
25 KFENCE_AREA_START, KFENCE_AREA_END,
26 __builtin_return_address(0));
27 if (!area)
28 return false;
29
30 __kfence_pool = (char *)area->addr;
31 err = ioremap_page_range((unsigned long)__kfence_pool,
32 (unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
33 virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
34 if (err) {
35 free_vm_area(area);
36 __kfence_pool = kfence_pool;
37 return false;
38 }
39
40 kaddr = kfence_pool;
41 vaddr = __kfence_pool;
42 while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
43 set_page_address(virt_to_page(kaddr), vaddr);
44 kaddr += PAGE_SIZE;
45 vaddr += PAGE_SIZE;
46 }
47
48 return true;
49 }
50
51 /* Protect the given page and flush TLB. */
kfence_protect_page(unsigned long addr,bool protect)52 static inline bool kfence_protect_page(unsigned long addr, bool protect)
53 {
54 pte_t *pte = virt_to_kpte(addr);
55
56 if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
57 return false;
58
59 if (protect)
60 set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
61 else
62 set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
63
64 preempt_disable();
65 local_flush_tlb_one(addr);
66 preempt_enable();
67
68 return true;
69 }
70
71 #endif /* _ASM_LOONGARCH_KFENCE_H */
72