1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <asm/processor.h>
6 #include <asm/page.h>
7 #include <asm/extable.h>
8 #include <asm/kup.h>
9 #include <asm/asm-compat.h>
10
11 #ifdef __powerpc64__
12 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
13 #define TASK_SIZE_MAX TASK_SIZE_USER64
14 #endif
15
16 #include <asm-generic/access_ok.h>
17
18 /*
19 * These are the main single-value transfer routines. They automatically
20 * use the right size if we just have the right pointer type.
21 *
22 * This gets kind of ugly. We want to return _two_ values in "get_user()"
23 * and yet we don't want to do any pointers, because that is too much
24 * of a performance impact. Thus we have a few rather ugly macros here,
25 * and hide all the ugliness from the user.
26 *
27 * The "__xxx" versions of the user access functions are versions that
28 * do not verify the address space, that must have been done previously
29 * with a separate "access_ok()" call (this is used when we do multiple
30 * accesses to the same area of user memory).
31 *
32 * As we use the same address space for kernel and user data on the
33 * PowerPC, we can just do these as direct assignments. (Of course, the
34 * exception handling means that it's no longer "just"...)
35 *
36 */
37 #define __put_user(x, ptr) \
38 ({ \
39 long __pu_err; \
40 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
41 __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
42 __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
43 \
44 might_fault(); \
45 do { \
46 __label__ __pu_failed; \
47 \
48 allow_write_to_user(__pu_addr, __pu_size); \
49 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
50 prevent_write_to_user(__pu_addr, __pu_size); \
51 __pu_err = 0; \
52 break; \
53 \
54 __pu_failed: \
55 prevent_write_to_user(__pu_addr, __pu_size); \
56 __pu_err = -EFAULT; \
57 } while (0); \
58 \
59 __pu_err; \
60 })
61
62 #define put_user(x, ptr) \
63 ({ \
64 __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
65 \
66 access_ok(_pu_addr, sizeof(*(ptr))) ? \
67 __put_user(x, _pu_addr) : -EFAULT; \
68 })
69
70 /*
71 * We don't tell gcc that we are accessing memory, but this is OK
72 * because we do not write to any memory gcc knows about, so there
73 * are no aliasing issues.
74 */
75 /* -mprefixed can generate offsets beyond range, fall back hack */
76 #ifdef CONFIG_PPC_KERNEL_PREFIXED
77 #define __put_user_asm_goto(x, addr, label, op) \
78 asm goto( \
79 "1: " op " %0,0(%1) # put_user\n" \
80 EX_TABLE(1b, %l2) \
81 : \
82 : "r" (x), "b" (addr) \
83 : \
84 : label)
85 #else
86 #define __put_user_asm_goto(x, addr, label, op) \
87 asm goto( \
88 "1: " op "%U1%X1 %0,%1 # put_user\n" \
89 EX_TABLE(1b, %l2) \
90 : \
91 : "r" (x), "m<>" (*addr) \
92 : \
93 : label)
94 #endif
95
96 #ifdef __powerpc64__
97 #ifdef CONFIG_PPC_KERNEL_PREFIXED
98 #define __put_user_asm2_goto(x, ptr, label) \
99 __put_user_asm_goto(x, ptr, label, "std")
100 #else
101 #define __put_user_asm2_goto(x, addr, label) \
102 asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
103 EX_TABLE(1b, %l2) \
104 : \
105 : "r" (x), DS_FORM_CONSTRAINT (*addr) \
106 : \
107 : label)
108 #endif // CONFIG_PPC_KERNEL_PREFIXED
109 #else /* __powerpc64__ */
110 #define __put_user_asm2_goto(x, addr, label) \
111 asm goto( \
112 "1: stw%X1 %0, %1\n" \
113 "2: stw%X1 %L0, %L1\n" \
114 EX_TABLE(1b, %l2) \
115 EX_TABLE(2b, %l2) \
116 : \
117 : "r" (x), "m" (*addr) \
118 : \
119 : label)
120 #endif /* __powerpc64__ */
121
122 #define __put_user_size_goto(x, ptr, size, label) \
123 do { \
124 __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
125 \
126 switch (size) { \
127 case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
128 case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
129 case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
130 case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
131 default: BUILD_BUG(); \
132 } \
133 } while (0)
134
135 /*
136 * This does an atomic 128 byte aligned load from userspace.
137 * Upto caller to do enable_kernel_vmx() before calling!
138 */
139 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
140 __asm__ __volatile__( \
141 ".machine push\n" \
142 ".machine altivec\n" \
143 "1: lvx 0,0,%1 # get user\n" \
144 " stvx 0,0,%2 # put kernel\n" \
145 ".machine pop\n" \
146 "2:\n" \
147 ".section .fixup,\"ax\"\n" \
148 "3: li %0,%3\n" \
149 " b 2b\n" \
150 ".previous\n" \
151 EX_TABLE(1b, 3b) \
152 : "=r" (err) \
153 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
154
155 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
156
157 /* -mprefixed can generate offsets beyond range, fall back hack */
158 #ifdef CONFIG_PPC_KERNEL_PREFIXED
159 #define __get_user_asm_goto(x, addr, label, op) \
160 asm_goto_output( \
161 "1: "op" %0,0(%1) # get_user\n" \
162 EX_TABLE(1b, %l2) \
163 : "=r" (x) \
164 : "b" (addr) \
165 : \
166 : label)
167 #else
168 #define __get_user_asm_goto(x, addr, label, op) \
169 asm_goto_output( \
170 "1: "op"%U1%X1 %0, %1 # get_user\n" \
171 EX_TABLE(1b, %l2) \
172 : "=r" (x) \
173 : "m<>" (*addr) \
174 : \
175 : label)
176 #endif
177
178 #ifdef __powerpc64__
179 #ifdef CONFIG_PPC_KERNEL_PREFIXED
180 #define __get_user_asm2_goto(x, addr, label) \
181 __get_user_asm_goto(x, addr, label, "ld")
182 #else
183 #define __get_user_asm2_goto(x, addr, label) \
184 asm_goto_output( \
185 "1: ld%U1%X1 %0, %1 # get_user\n" \
186 EX_TABLE(1b, %l2) \
187 : "=r" (x) \
188 : DS_FORM_CONSTRAINT (*addr) \
189 : \
190 : label)
191 #endif // CONFIG_PPC_KERNEL_PREFIXED
192 #else /* __powerpc64__ */
193 #define __get_user_asm2_goto(x, addr, label) \
194 asm_goto_output( \
195 "1: lwz%X1 %0, %1\n" \
196 "2: lwz%X1 %L0, %L1\n" \
197 EX_TABLE(1b, %l2) \
198 EX_TABLE(2b, %l2) \
199 : "=&r" (x) \
200 : "m" (*addr) \
201 : \
202 : label)
203 #endif /* __powerpc64__ */
204
205 #define __get_user_size_goto(x, ptr, size, label) \
206 do { \
207 BUILD_BUG_ON(size > sizeof(x)); \
208 switch (size) { \
209 case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
210 case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
211 case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
212 case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
213 default: x = 0; BUILD_BUG(); \
214 } \
215 } while (0)
216
217 #define __get_user_size_allowed(x, ptr, size, retval) \
218 do { \
219 __label__ __gus_failed; \
220 \
221 __get_user_size_goto(x, ptr, size, __gus_failed); \
222 retval = 0; \
223 break; \
224 __gus_failed: \
225 x = 0; \
226 retval = -EFAULT; \
227 } while (0)
228
229 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
230
231 #define __get_user_asm(x, addr, err, op) \
232 __asm__ __volatile__( \
233 "1: "op"%U2%X2 %1, %2 # get_user\n" \
234 "2:\n" \
235 ".section .fixup,\"ax\"\n" \
236 "3: li %0,%3\n" \
237 " li %1,0\n" \
238 " b 2b\n" \
239 ".previous\n" \
240 EX_TABLE(1b, 3b) \
241 : "=r" (err), "=r" (x) \
242 : "m<>" (*addr), "i" (-EFAULT), "0" (err))
243
244 #ifdef __powerpc64__
245 #define __get_user_asm2(x, addr, err) \
246 __get_user_asm(x, addr, err, "ld")
247 #else /* __powerpc64__ */
248 #define __get_user_asm2(x, addr, err) \
249 __asm__ __volatile__( \
250 "1: lwz%X2 %1, %2\n" \
251 "2: lwz%X2 %L1, %L2\n" \
252 "3:\n" \
253 ".section .fixup,\"ax\"\n" \
254 "4: li %0,%3\n" \
255 " li %1,0\n" \
256 " li %1+1,0\n" \
257 " b 3b\n" \
258 ".previous\n" \
259 EX_TABLE(1b, 4b) \
260 EX_TABLE(2b, 4b) \
261 : "=r" (err), "=&r" (x) \
262 : "m" (*addr), "i" (-EFAULT), "0" (err))
263 #endif /* __powerpc64__ */
264
265 #define __get_user_size_allowed(x, ptr, size, retval) \
266 do { \
267 retval = 0; \
268 BUILD_BUG_ON(size > sizeof(x)); \
269 switch (size) { \
270 case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
271 case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
272 case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
273 case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
274 default: x = 0; BUILD_BUG(); \
275 } \
276 } while (0)
277
278 #define __get_user_size_goto(x, ptr, size, label) \
279 do { \
280 long __gus_retval; \
281 \
282 __get_user_size_allowed(x, ptr, size, __gus_retval); \
283 if (__gus_retval) \
284 goto label; \
285 } while (0)
286
287 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
288
289 /*
290 * This is a type: either unsigned long, if the argument fits into
291 * that type, or otherwise unsigned long long.
292 */
293 #define __long_type(x) \
294 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
295
296 #define __get_user(x, ptr) \
297 ({ \
298 long __gu_err; \
299 __long_type(*(ptr)) __gu_val; \
300 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
301 __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
302 \
303 might_fault(); \
304 allow_read_from_user(__gu_addr, __gu_size); \
305 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
306 prevent_read_from_user(__gu_addr, __gu_size); \
307 (x) = (__typeof__(*(ptr)))__gu_val; \
308 \
309 __gu_err; \
310 })
311
312 #define get_user(x, ptr) \
313 ({ \
314 __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
315 \
316 access_ok(_gu_addr, sizeof(*(ptr))) ? \
317 __get_user(x, _gu_addr) : \
318 ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
319 })
320
321 /* more complex routines */
322
323 extern unsigned long __copy_tofrom_user(void __user *to,
324 const void __user *from, unsigned long size);
325
326 #ifdef __powerpc64__
327 static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)328 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
329 {
330 unsigned long ret;
331
332 allow_read_write_user(to, from, n);
333 ret = __copy_tofrom_user(to, from, n);
334 prevent_read_write_user(to, from, n);
335 return ret;
336 }
337 #endif /* __powerpc64__ */
338
raw_copy_from_user(void * to,const void __user * from,unsigned long n)339 static inline unsigned long raw_copy_from_user(void *to,
340 const void __user *from, unsigned long n)
341 {
342 unsigned long ret;
343
344 allow_read_from_user(from, n);
345 ret = __copy_tofrom_user((__force void __user *)to, from, n);
346 prevent_read_from_user(from, n);
347 return ret;
348 }
349
350 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)351 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
352 {
353 unsigned long ret;
354
355 allow_write_to_user(to, n);
356 ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
357 prevent_write_to_user(to, n);
358 return ret;
359 }
360
361 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
362
__clear_user(void __user * addr,unsigned long size)363 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
364 {
365 unsigned long ret;
366
367 might_fault();
368 allow_write_to_user(addr, size);
369 ret = __arch_clear_user(addr, size);
370 prevent_write_to_user(addr, size);
371 return ret;
372 }
373
clear_user(void __user * addr,unsigned long size)374 static inline unsigned long clear_user(void __user *addr, unsigned long size)
375 {
376 return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
377 }
378
379 extern long strncpy_from_user(char *dst, const char __user *src, long count);
380 extern __must_check long strnlen_user(const char __user *str, long n);
381
382 #ifdef CONFIG_ARCH_HAS_COPY_MC
383 unsigned long __must_check
384 copy_mc_generic(void *to, const void *from, unsigned long size);
385
386 static inline unsigned long __must_check
copy_mc_to_kernel(void * to,const void * from,unsigned long size)387 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
388 {
389 return copy_mc_generic(to, from, size);
390 }
391 #define copy_mc_to_kernel copy_mc_to_kernel
392
393 static inline unsigned long __must_check
copy_mc_to_user(void __user * to,const void * from,unsigned long n)394 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
395 {
396 if (check_copy_size(from, n, true)) {
397 if (access_ok(to, n)) {
398 allow_write_to_user(to, n);
399 n = copy_mc_generic((void __force *)to, from, n);
400 prevent_write_to_user(to, n);
401 }
402 }
403
404 return n;
405 }
406 #endif
407
408 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
409 unsigned size);
410
user_access_begin(const void __user * ptr,size_t len)411 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
412 {
413 if (unlikely(!access_ok(ptr, len)))
414 return false;
415
416 might_fault();
417
418 allow_read_write_user((void __user *)ptr, ptr, len);
419 return true;
420 }
421 #define user_access_begin user_access_begin
422 #define user_access_end prevent_current_access_user
423 #define user_access_save prevent_user_access_return
424 #define user_access_restore restore_user_access
425
426 static __must_check __always_inline bool
user_read_access_begin(const void __user * ptr,size_t len)427 user_read_access_begin(const void __user *ptr, size_t len)
428 {
429 if (unlikely(!access_ok(ptr, len)))
430 return false;
431
432 might_fault();
433
434 allow_read_from_user(ptr, len);
435 return true;
436 }
437 #define user_read_access_begin user_read_access_begin
438 #define user_read_access_end prevent_current_read_from_user
439
440 static __must_check __always_inline bool
user_write_access_begin(const void __user * ptr,size_t len)441 user_write_access_begin(const void __user *ptr, size_t len)
442 {
443 if (unlikely(!access_ok(ptr, len)))
444 return false;
445
446 might_fault();
447
448 allow_write_to_user((void __user *)ptr, len);
449 return true;
450 }
451 #define user_write_access_begin user_write_access_begin
452 #define user_write_access_end prevent_current_write_to_user
453
454 #define unsafe_get_user(x, p, e) do { \
455 __long_type(*(p)) __gu_val; \
456 __typeof__(*(p)) __user *__gu_addr = (p); \
457 \
458 __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
459 (x) = (__typeof__(*(p)))__gu_val; \
460 } while (0)
461
462 #define unsafe_put_user(x, p, e) \
463 __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
464
465 #define unsafe_copy_from_user(d, s, l, e) \
466 do { \
467 u8 *_dst = (u8 *)(d); \
468 const u8 __user *_src = (const u8 __user *)(s); \
469 size_t _len = (l); \
470 int _i; \
471 \
472 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
473 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
474 if (_len & 4) { \
475 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
476 _i += 4; \
477 } \
478 if (_len & 2) { \
479 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
480 _i += 2; \
481 } \
482 if (_len & 1) \
483 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
484 } while (0)
485
486 #define unsafe_copy_to_user(d, s, l, e) \
487 do { \
488 u8 __user *_dst = (u8 __user *)(d); \
489 const u8 *_src = (const u8 *)(s); \
490 size_t _len = (l); \
491 int _i; \
492 \
493 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
494 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
495 if (_len & 4) { \
496 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
497 _i += 4; \
498 } \
499 if (_len & 2) { \
500 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
501 _i += 2; \
502 } \
503 if (_len & 1) \
504 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
505 } while (0)
506
507 #define __get_kernel_nofault(dst, src, type, err_label) \
508 __get_user_size_goto(*((type *)(dst)), \
509 (__force type __user *)(src), sizeof(type), err_label)
510
511 #define __put_kernel_nofault(dst, src, type, err_label) \
512 __put_user_size_goto(*((type *)(src)), \
513 (__force type __user *)(dst), sizeof(type), err_label)
514
515 #endif /* _ARCH_POWERPC_UACCESS_H */
516