1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/spinlock.h>
6 #include <linux/stop_machine.h>
7 #include <linux/uaccess.h>
8
9 #include <asm/cacheflush.h>
10 #include <asm/fixmap.h>
11 #include <asm/insn.h>
12 #include <asm/kprobes.h>
13 #include <asm/patching.h>
14 #include <asm/sections.h>
15
16 static DEFINE_RAW_SPINLOCK(patch_lock);
17
is_exit_text(unsigned long addr)18 static bool is_exit_text(unsigned long addr)
19 {
20 /* discarded with init text/data */
21 return system_state < SYSTEM_RUNNING &&
22 addr >= (unsigned long)__exittext_begin &&
23 addr < (unsigned long)__exittext_end;
24 }
25
is_image_text(unsigned long addr)26 static bool is_image_text(unsigned long addr)
27 {
28 return core_kernel_text(addr) || is_exit_text(addr);
29 }
30
patch_map(void * addr,int fixmap)31 static void __kprobes *patch_map(void *addr, int fixmap)
32 {
33 unsigned long uintaddr = (uintptr_t) addr;
34 bool image = is_image_text(uintaddr);
35 struct page *page;
36
37 if (image)
38 page = phys_to_page(__pa_symbol(addr));
39 else if (IS_ENABLED(CONFIG_EXECMEM))
40 page = vmalloc_to_page(addr);
41 else
42 return addr;
43
44 BUG_ON(!page);
45 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
46 (uintaddr & ~PAGE_MASK));
47 }
48
patch_unmap(int fixmap)49 static void __kprobes patch_unmap(int fixmap)
50 {
51 clear_fixmap(fixmap);
52 }
53 /*
54 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
55 * little-endian.
56 */
aarch64_insn_read(void * addr,u32 * insnp)57 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
58 {
59 int ret;
60 __le32 val;
61
62 ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
63 if (!ret)
64 *insnp = le32_to_cpu(val);
65
66 return ret;
67 }
68
__aarch64_insn_write(void * addr,__le32 insn)69 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
70 {
71 void *waddr = addr;
72 unsigned long flags = 0;
73 int ret;
74
75 raw_spin_lock_irqsave(&patch_lock, flags);
76 waddr = patch_map(addr, FIX_TEXT_POKE0);
77
78 ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
79
80 patch_unmap(FIX_TEXT_POKE0);
81 raw_spin_unlock_irqrestore(&patch_lock, flags);
82
83 return ret;
84 }
85
aarch64_insn_write(void * addr,u32 insn)86 int __kprobes aarch64_insn_write(void *addr, u32 insn)
87 {
88 return __aarch64_insn_write(addr, cpu_to_le32(insn));
89 }
90
aarch64_insn_write_literal_u64(void * addr,u64 val)91 noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val)
92 {
93 u64 *waddr;
94 unsigned long flags;
95 int ret;
96
97 raw_spin_lock_irqsave(&patch_lock, flags);
98 waddr = patch_map(addr, FIX_TEXT_POKE0);
99
100 ret = copy_to_kernel_nofault(waddr, &val, sizeof(val));
101
102 patch_unmap(FIX_TEXT_POKE0);
103 raw_spin_unlock_irqrestore(&patch_lock, flags);
104
105 return ret;
106 }
107
108 typedef void text_poke_f(void *dst, void *src, size_t patched, size_t len);
109
__text_poke(text_poke_f func,void * addr,void * src,size_t len)110 static void *__text_poke(text_poke_f func, void *addr, void *src, size_t len)
111 {
112 unsigned long flags;
113 size_t patched = 0;
114 size_t size;
115 void *waddr;
116 void *ptr;
117
118 raw_spin_lock_irqsave(&patch_lock, flags);
119
120 while (patched < len) {
121 ptr = addr + patched;
122 size = min_t(size_t, PAGE_SIZE - offset_in_page(ptr),
123 len - patched);
124
125 waddr = patch_map(ptr, FIX_TEXT_POKE0);
126 func(waddr, src, patched, size);
127 patch_unmap(FIX_TEXT_POKE0);
128
129 patched += size;
130 }
131 raw_spin_unlock_irqrestore(&patch_lock, flags);
132
133 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
134
135 return addr;
136 }
137
text_poke_memcpy(void * dst,void * src,size_t patched,size_t len)138 static void text_poke_memcpy(void *dst, void *src, size_t patched, size_t len)
139 {
140 copy_to_kernel_nofault(dst, src + patched, len);
141 }
142
text_poke_memset(void * dst,void * src,size_t patched,size_t len)143 static void text_poke_memset(void *dst, void *src, size_t patched, size_t len)
144 {
145 u32 c = *(u32 *)src;
146
147 memset32(dst, c, len / 4);
148 }
149
150 /**
151 * aarch64_insn_copy - Copy instructions into (an unused part of) RX memory
152 * @dst: address to modify
153 * @src: source of the copy
154 * @len: length to copy
155 *
156 * Useful for JITs to dump new code blocks into unused regions of RX memory.
157 */
aarch64_insn_copy(void * dst,void * src,size_t len)158 noinstr void *aarch64_insn_copy(void *dst, void *src, size_t len)
159 {
160 /* A64 instructions must be word aligned */
161 if ((uintptr_t)dst & 0x3)
162 return NULL;
163
164 return __text_poke(text_poke_memcpy, dst, src, len);
165 }
166
167 /**
168 * aarch64_insn_set - memset for RX memory regions.
169 * @dst: address to modify
170 * @insn: value to set
171 * @len: length of memory region.
172 *
173 * Useful for JITs to fill regions of RX memory with illegal instructions.
174 */
aarch64_insn_set(void * dst,u32 insn,size_t len)175 noinstr void *aarch64_insn_set(void *dst, u32 insn, size_t len)
176 {
177 if ((uintptr_t)dst & 0x3)
178 return NULL;
179
180 return __text_poke(text_poke_memset, dst, &insn, len);
181 }
182
aarch64_insn_patch_text_nosync(void * addr,u32 insn)183 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
184 {
185 u32 *tp = addr;
186 int ret;
187
188 /* A64 instructions must be word aligned */
189 if ((uintptr_t)tp & 0x3)
190 return -EINVAL;
191
192 ret = aarch64_insn_write(tp, insn);
193 if (ret == 0)
194 caches_clean_inval_pou((uintptr_t)tp,
195 (uintptr_t)tp + AARCH64_INSN_SIZE);
196
197 return ret;
198 }
199
200 struct aarch64_insn_patch {
201 void **text_addrs;
202 u32 *new_insns;
203 int insn_cnt;
204 atomic_t cpu_count;
205 };
206
aarch64_insn_patch_text_cb(void * arg)207 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
208 {
209 int i, ret = 0;
210 struct aarch64_insn_patch *pp = arg;
211
212 /* The last CPU becomes master */
213 if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
214 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
215 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
216 pp->new_insns[i]);
217 /* Notify other processors with an additional increment. */
218 atomic_inc(&pp->cpu_count);
219 } else {
220 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
221 cpu_relax();
222 isb();
223 }
224
225 return ret;
226 }
227
aarch64_insn_patch_text(void * addrs[],u32 insns[],int cnt)228 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
229 {
230 struct aarch64_insn_patch patch = {
231 .text_addrs = addrs,
232 .new_insns = insns,
233 .insn_cnt = cnt,
234 .cpu_count = ATOMIC_INIT(0),
235 };
236
237 if (cnt <= 0)
238 return -EINVAL;
239
240 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
241 cpu_online_mask);
242 }
243