1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   *  S390 version
4   *    Copyright IBM Corp. 1999, 2000
5   *    Author(s): Hartmut Penner (hp@de.ibm.com),
6   *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7   *
8   *  Derived from "include/asm-i386/uaccess.h"
9   */
10  #ifndef __S390_UACCESS_H
11  #define __S390_UACCESS_H
12  
13  /*
14   * User space memory access functions
15   */
16  #include <asm/asm-extable.h>
17  #include <asm/processor.h>
18  #include <asm/extable.h>
19  #include <asm/facility.h>
20  #include <asm-generic/access_ok.h>
21  #include <linux/instrumented.h>
22  
23  void debug_user_asce(int exit);
24  
25  unsigned long __must_check
26  raw_copy_from_user(void *to, const void __user *from, unsigned long n);
27  
28  unsigned long __must_check
29  raw_copy_to_user(void __user *to, const void *from, unsigned long n);
30  
31  #ifndef CONFIG_KASAN
32  #define INLINE_COPY_FROM_USER
33  #define INLINE_COPY_TO_USER
34  #endif
35  
36  unsigned long __must_check
37  _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
38  
39  static __always_inline unsigned long __must_check
copy_from_user_key(void * to,const void __user * from,unsigned long n,unsigned long key)40  copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
41  {
42  	if (check_copy_size(to, n, false))
43  		n = _copy_from_user_key(to, from, n, key);
44  	return n;
45  }
46  
47  unsigned long __must_check
48  _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
49  
50  static __always_inline unsigned long __must_check
copy_to_user_key(void __user * to,const void * from,unsigned long n,unsigned long key)51  copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
52  {
53  	if (check_copy_size(from, n, true))
54  		n = _copy_to_user_key(to, from, n, key);
55  	return n;
56  }
57  
58  union oac {
59  	unsigned int val;
60  	struct {
61  		struct {
62  			unsigned short key : 4;
63  			unsigned short	   : 4;
64  			unsigned short as  : 2;
65  			unsigned short	   : 4;
66  			unsigned short k   : 1;
67  			unsigned short a   : 1;
68  		} oac1;
69  		struct {
70  			unsigned short key : 4;
71  			unsigned short	   : 4;
72  			unsigned short as  : 2;
73  			unsigned short	   : 4;
74  			unsigned short k   : 1;
75  			unsigned short a   : 1;
76  		} oac2;
77  	};
78  };
79  
80  int __noreturn __put_user_bad(void);
81  
82  #ifdef CONFIG_KMSAN
83  #define get_put_user_noinstr_attributes \
84  	noinline __maybe_unused __no_sanitize_memory
85  #else
86  #define get_put_user_noinstr_attributes __always_inline
87  #endif
88  
89  #define DEFINE_PUT_USER(type)						\
90  static get_put_user_noinstr_attributes int				\
91  __put_user_##type##_noinstr(unsigned type __user *to,			\
92  			    unsigned type *from,			\
93  			    unsigned long size)				\
94  {									\
95  	union oac __oac_spec = {					\
96  		.oac1.as = PSW_BITS_AS_SECONDARY,			\
97  		.oac1.a = 1,						\
98  	};								\
99  	int rc;								\
100  									\
101  	asm volatile(							\
102  		"	lr	0,%[spec]\n"				\
103  		"0:	mvcos	%[_to],%[_from],%[_size]\n"		\
104  		"1:	xr	%[rc],%[rc]\n"				\
105  		"2:\n"							\
106  		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
107  		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
108  		: [rc] "=&d" (rc), [_to] "+Q" (*(to))			\
109  		: [_size] "d" (size), [_from] "Q" (*(from)),		\
110  		  [spec] "d" (__oac_spec.val)				\
111  		: "cc", "0");						\
112  	return rc;							\
113  }									\
114  									\
115  static __always_inline int						\
116  __put_user_##type(unsigned type __user *to, unsigned type *from,	\
117  		  unsigned long size)					\
118  {									\
119  	int rc;								\
120  									\
121  	rc = __put_user_##type##_noinstr(to, from, size);		\
122  	instrument_put_user(*from, to, size);				\
123  	return rc;							\
124  }
125  
126  DEFINE_PUT_USER(char);
127  DEFINE_PUT_USER(short);
128  DEFINE_PUT_USER(int);
129  DEFINE_PUT_USER(long);
130  
__put_user_fn(void * x,void __user * ptr,unsigned long size)131  static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
132  {
133  	int rc;
134  
135  	switch (size) {
136  	case 1:
137  		rc = __put_user_char((unsigned char __user *)ptr,
138  				     (unsigned char *)x,
139  				     size);
140  		break;
141  	case 2:
142  		rc = __put_user_short((unsigned short __user *)ptr,
143  				      (unsigned short *)x,
144  				      size);
145  		break;
146  	case 4:
147  		rc = __put_user_int((unsigned int __user *)ptr,
148  				    (unsigned int *)x,
149  				    size);
150  		break;
151  	case 8:
152  		rc = __put_user_long((unsigned long __user *)ptr,
153  				     (unsigned long *)x,
154  				     size);
155  		break;
156  	default:
157  		__put_user_bad();
158  		break;
159  	}
160  	return rc;
161  }
162  
163  int __noreturn __get_user_bad(void);
164  
165  #define DEFINE_GET_USER(type)						\
166  static get_put_user_noinstr_attributes int				\
167  __get_user_##type##_noinstr(unsigned type *to,				\
168  			    unsigned type __user *from,			\
169  			    unsigned long size)				\
170  {									\
171  	union oac __oac_spec = {					\
172  		.oac2.as = PSW_BITS_AS_SECONDARY,			\
173  		.oac2.a = 1,						\
174  	};								\
175  	int rc;								\
176  									\
177  	asm volatile(							\
178  		"	lr	0,%[spec]\n"				\
179  		"0:	mvcos	0(%[_to]),%[_from],%[_size]\n"		\
180  		"1:	xr	%[rc],%[rc]\n"				\
181  		"2:\n"							\
182  		EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize])	\
183  		EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize])	\
184  		: [rc] "=&d" (rc), "=Q" (*(to))				\
185  		: [_size] "d" (size), [_from] "Q" (*(from)),		\
186  		  [spec] "d" (__oac_spec.val), [_to] "a" (to),		\
187  		  [_ksize] "K" (size)					\
188  		: "cc", "0");						\
189  	return rc;							\
190  }									\
191  									\
192  static __always_inline int						\
193  __get_user_##type(unsigned type *to, unsigned type __user *from,	\
194  		  unsigned long size)					\
195  {									\
196  	int rc;								\
197  									\
198  	rc = __get_user_##type##_noinstr(to, from, size);		\
199  	instrument_get_user(*to);					\
200  	return rc;							\
201  }
202  
203  DEFINE_GET_USER(char);
204  DEFINE_GET_USER(short);
205  DEFINE_GET_USER(int);
206  DEFINE_GET_USER(long);
207  
__get_user_fn(void * x,const void __user * ptr,unsigned long size)208  static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
209  {
210  	int rc;
211  
212  	switch (size) {
213  	case 1:
214  		rc = __get_user_char((unsigned char *)x,
215  				     (unsigned char __user *)ptr,
216  				     size);
217  		break;
218  	case 2:
219  		rc = __get_user_short((unsigned short *)x,
220  				      (unsigned short __user *)ptr,
221  				      size);
222  		break;
223  	case 4:
224  		rc = __get_user_int((unsigned int *)x,
225  				    (unsigned int __user *)ptr,
226  				    size);
227  		break;
228  	case 8:
229  		rc = __get_user_long((unsigned long *)x,
230  				     (unsigned long __user *)ptr,
231  				     size);
232  		break;
233  	default:
234  		__get_user_bad();
235  		break;
236  	}
237  	return rc;
238  }
239  
240  /*
241   * These are the main single-value transfer routines.  They automatically
242   * use the right size if we just have the right pointer type.
243   */
244  #define __put_user(x, ptr)						\
245  ({									\
246  	__typeof__(*(ptr)) __x = (x);					\
247  	int __pu_err = -EFAULT;						\
248  									\
249  	__chk_user_ptr(ptr);						\
250  	switch (sizeof(*(ptr))) {					\
251  	case 1:								\
252  	case 2:								\
253  	case 4:								\
254  	case 8:								\
255  		__pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr)));	\
256  		break;							\
257  	default:							\
258  		__put_user_bad();					\
259  		break;							\
260  	}								\
261  	__builtin_expect(__pu_err, 0);					\
262  })
263  
264  #define put_user(x, ptr)						\
265  ({									\
266  	might_fault();							\
267  	__put_user(x, ptr);						\
268  })
269  
270  #define __get_user(x, ptr)						\
271  ({									\
272  	int __gu_err = -EFAULT;						\
273  									\
274  	__chk_user_ptr(ptr);						\
275  	switch (sizeof(*(ptr))) {					\
276  	case 1: {							\
277  		unsigned char __x;					\
278  									\
279  		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
280  		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
281  		break;							\
282  	};								\
283  	case 2: {							\
284  		unsigned short __x;					\
285  									\
286  		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
287  		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
288  		break;							\
289  	};								\
290  	case 4: {							\
291  		unsigned int __x;					\
292  									\
293  		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
294  		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
295  		break;							\
296  	};								\
297  	case 8: {							\
298  		unsigned long __x;					\
299  									\
300  		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
301  		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
302  		break;							\
303  	};								\
304  	default:							\
305  		__get_user_bad();					\
306  		break;							\
307  	}								\
308  	__builtin_expect(__gu_err, 0);					\
309  })
310  
311  #define get_user(x, ptr)						\
312  ({									\
313  	might_fault();							\
314  	__get_user(x, ptr);						\
315  })
316  
317  /*
318   * Copy a null terminated string from userspace.
319   */
320  long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
321  
322  long __must_check strnlen_user(const char __user *src, long count);
323  
324  /*
325   * Zero Userspace
326   */
327  unsigned long __must_check __clear_user(void __user *to, unsigned long size);
328  
clear_user(void __user * to,unsigned long n)329  static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
330  {
331  	might_fault();
332  	return __clear_user(to, n);
333  }
334  
335  void *__s390_kernel_write(void *dst, const void *src, size_t size);
336  
s390_kernel_write(void * dst,const void * src,size_t size)337  static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
338  {
339  	if (__is_defined(__DECOMPRESSOR))
340  		return memcpy(dst, src, size);
341  	return __s390_kernel_write(dst, src, size);
342  }
343  
344  int __noreturn __put_kernel_bad(void);
345  
346  #define __put_kernel_asm(val, to, insn)					\
347  ({									\
348  	int __rc;							\
349  									\
350  	asm volatile(							\
351  		"0:   " insn "  %[_val],%[_to]\n"			\
352  		"1:	xr	%[rc],%[rc]\n"				\
353  		"2:\n"							\
354  		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
355  		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
356  		: [rc] "=d" (__rc), [_to] "+Q" (*(to))			\
357  		: [_val] "d" (val)					\
358  		: "cc");						\
359  	__rc;								\
360  })
361  
362  #define __put_kernel_nofault(dst, src, type, err_label)			\
363  do {									\
364  	unsigned long __x = (unsigned long)(*((type *)(src)));		\
365  	int __pk_err;							\
366  									\
367  	switch (sizeof(type)) {						\
368  	case 1:								\
369  		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
370  		break;							\
371  	case 2:								\
372  		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
373  		break;							\
374  	case 4:								\
375  		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
376  		break;							\
377  	case 8:								\
378  		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
379  		break;							\
380  	default:							\
381  		__pk_err = __put_kernel_bad();				\
382  		break;							\
383  	}								\
384  	if (unlikely(__pk_err))						\
385  		goto err_label;						\
386  } while (0)
387  
388  int __noreturn __get_kernel_bad(void);
389  
390  #define __get_kernel_asm(val, from, insn)				\
391  ({									\
392  	int __rc;							\
393  									\
394  	asm volatile(							\
395  		"0:   " insn "  %[_val],%[_from]\n"			\
396  		"1:	xr	%[rc],%[rc]\n"				\
397  		"2:\n"							\
398  		EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val])		\
399  		EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val])		\
400  		: [rc] "=d" (__rc), [_val] "=d" (val)			\
401  		: [_from] "Q" (*(from))					\
402  		: "cc");						\
403  	__rc;								\
404  })
405  
406  #define __get_kernel_nofault(dst, src, type, err_label)			\
407  do {									\
408  	int __gk_err;							\
409  									\
410  	switch (sizeof(type)) {						\
411  	case 1: {							\
412  		unsigned char __x;					\
413  									\
414  		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
415  		*((type *)(dst)) = (type)__x;				\
416  		break;							\
417  	};								\
418  	case 2: {							\
419  		unsigned short __x;					\
420  									\
421  		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
422  		*((type *)(dst)) = (type)__x;				\
423  		break;							\
424  	};								\
425  	case 4: {							\
426  		unsigned int __x;					\
427  									\
428  		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
429  		*((type *)(dst)) = (type)__x;				\
430  		break;							\
431  	};								\
432  	case 8: {							\
433  		unsigned long __x;					\
434  									\
435  		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
436  		*((type *)(dst)) = (type)__x;				\
437  		break;							\
438  	};								\
439  	default:							\
440  		__gk_err = __get_kernel_bad();				\
441  		break;							\
442  	}								\
443  	if (unlikely(__gk_err))						\
444  		goto err_label;						\
445  } while (0)
446  
447  void __cmpxchg_user_key_called_with_bad_pointer(void);
448  
449  #define CMPXCHG_USER_KEY_MAX_LOOPS 128
450  
__cmpxchg_user_key(unsigned long address,void * uval,__uint128_t old,__uint128_t new,unsigned long key,int size)451  static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
452  					      __uint128_t old, __uint128_t new,
453  					      unsigned long key, int size)
454  {
455  	int rc = 0;
456  
457  	switch (size) {
458  	case 1: {
459  		unsigned int prev, shift, mask, _old, _new;
460  		unsigned long count;
461  
462  		shift = (3 ^ (address & 3)) << 3;
463  		address ^= address & 3;
464  		_old = ((unsigned int)old & 0xff) << shift;
465  		_new = ((unsigned int)new & 0xff) << shift;
466  		mask = ~(0xff << shift);
467  		asm volatile(
468  			"	spka	0(%[key])\n"
469  			"	sacf	256\n"
470  			"	llill	%[count],%[max_loops]\n"
471  			"0:	l	%[prev],%[address]\n"
472  			"1:	nr	%[prev],%[mask]\n"
473  			"	xilf	%[mask],0xffffffff\n"
474  			"	or	%[new],%[prev]\n"
475  			"	or	%[prev],%[tmp]\n"
476  			"2:	lr	%[tmp],%[prev]\n"
477  			"3:	cs	%[prev],%[new],%[address]\n"
478  			"4:	jnl	5f\n"
479  			"	xr	%[tmp],%[prev]\n"
480  			"	xr	%[new],%[tmp]\n"
481  			"	nr	%[tmp],%[mask]\n"
482  			"	jnz	5f\n"
483  			"	brct	%[count],2b\n"
484  			"5:	sacf	768\n"
485  			"	spka	%[default_key]\n"
486  			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
487  			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
488  			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
489  			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
490  			: [rc] "+&d" (rc),
491  			  [prev] "=&d" (prev),
492  			  [address] "+Q" (*(int *)address),
493  			  [tmp] "+&d" (_old),
494  			  [new] "+&d" (_new),
495  			  [mask] "+&d" (mask),
496  			  [count] "=a" (count)
497  			: [key] "%[count]" (key << 4),
498  			  [default_key] "J" (PAGE_DEFAULT_KEY),
499  			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
500  			: "memory", "cc");
501  		*(unsigned char *)uval = prev >> shift;
502  		if (!count)
503  			rc = -EAGAIN;
504  		return rc;
505  	}
506  	case 2: {
507  		unsigned int prev, shift, mask, _old, _new;
508  		unsigned long count;
509  
510  		shift = (2 ^ (address & 2)) << 3;
511  		address ^= address & 2;
512  		_old = ((unsigned int)old & 0xffff) << shift;
513  		_new = ((unsigned int)new & 0xffff) << shift;
514  		mask = ~(0xffff << shift);
515  		asm volatile(
516  			"	spka	0(%[key])\n"
517  			"	sacf	256\n"
518  			"	llill	%[count],%[max_loops]\n"
519  			"0:	l	%[prev],%[address]\n"
520  			"1:	nr	%[prev],%[mask]\n"
521  			"	xilf	%[mask],0xffffffff\n"
522  			"	or	%[new],%[prev]\n"
523  			"	or	%[prev],%[tmp]\n"
524  			"2:	lr	%[tmp],%[prev]\n"
525  			"3:	cs	%[prev],%[new],%[address]\n"
526  			"4:	jnl	5f\n"
527  			"	xr	%[tmp],%[prev]\n"
528  			"	xr	%[new],%[tmp]\n"
529  			"	nr	%[tmp],%[mask]\n"
530  			"	jnz	5f\n"
531  			"	brct	%[count],2b\n"
532  			"5:	sacf	768\n"
533  			"	spka	%[default_key]\n"
534  			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
535  			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
536  			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
537  			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
538  			: [rc] "+&d" (rc),
539  			  [prev] "=&d" (prev),
540  			  [address] "+Q" (*(int *)address),
541  			  [tmp] "+&d" (_old),
542  			  [new] "+&d" (_new),
543  			  [mask] "+&d" (mask),
544  			  [count] "=a" (count)
545  			: [key] "%[count]" (key << 4),
546  			  [default_key] "J" (PAGE_DEFAULT_KEY),
547  			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
548  			: "memory", "cc");
549  		*(unsigned short *)uval = prev >> shift;
550  		if (!count)
551  			rc = -EAGAIN;
552  		return rc;
553  	}
554  	case 4:	{
555  		unsigned int prev = old;
556  
557  		asm volatile(
558  			"	spka	0(%[key])\n"
559  			"	sacf	256\n"
560  			"0:	cs	%[prev],%[new],%[address]\n"
561  			"1:	sacf	768\n"
562  			"	spka	%[default_key]\n"
563  			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
564  			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
565  			: [rc] "+&d" (rc),
566  			  [prev] "+&d" (prev),
567  			  [address] "+Q" (*(int *)address)
568  			: [new] "d" ((unsigned int)new),
569  			  [key] "a" (key << 4),
570  			  [default_key] "J" (PAGE_DEFAULT_KEY)
571  			: "memory", "cc");
572  		*(unsigned int *)uval = prev;
573  		return rc;
574  	}
575  	case 8: {
576  		unsigned long prev = old;
577  
578  		asm volatile(
579  			"	spka	0(%[key])\n"
580  			"	sacf	256\n"
581  			"0:	csg	%[prev],%[new],%[address]\n"
582  			"1:	sacf	768\n"
583  			"	spka	%[default_key]\n"
584  			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
585  			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
586  			: [rc] "+&d" (rc),
587  			  [prev] "+&d" (prev),
588  			  [address] "+QS" (*(long *)address)
589  			: [new] "d" ((unsigned long)new),
590  			  [key] "a" (key << 4),
591  			  [default_key] "J" (PAGE_DEFAULT_KEY)
592  			: "memory", "cc");
593  		*(unsigned long *)uval = prev;
594  		return rc;
595  	}
596  	case 16: {
597  		__uint128_t prev = old;
598  
599  		asm volatile(
600  			"	spka	0(%[key])\n"
601  			"	sacf	256\n"
602  			"0:	cdsg	%[prev],%[new],%[address]\n"
603  			"1:	sacf	768\n"
604  			"	spka	%[default_key]\n"
605  			EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
606  			EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
607  			: [rc] "+&d" (rc),
608  			  [prev] "+&d" (prev),
609  			  [address] "+QS" (*(__int128_t *)address)
610  			: [new] "d" (new),
611  			  [key] "a" (key << 4),
612  			  [default_key] "J" (PAGE_DEFAULT_KEY)
613  			: "memory", "cc");
614  		*(__uint128_t *)uval = prev;
615  		return rc;
616  	}
617  	}
618  	__cmpxchg_user_key_called_with_bad_pointer();
619  	return rc;
620  }
621  
622  /**
623   * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
624   * @ptr: User space address of value to compare to @old and exchange with
625   *	 @new. Must be aligned to sizeof(*@ptr).
626   * @uval: Address where the old value of *@ptr is written to.
627   * @old: Old value. Compared to the content pointed to by @ptr in order to
628   *	 determine if the exchange occurs. The old value read from *@ptr is
629   *	 written to *@uval.
630   * @new: New value to place at *@ptr.
631   * @key: Access key to use for checking storage key protection.
632   *
633   * Perform a cmpxchg on a user space target, honoring storage key protection.
634   * @key alone determines how key checking is performed, neither
635   * storage-protection-override nor fetch-protection-override apply.
636   * The caller must compare *@uval and @old to determine if values have been
637   * exchanged. In case of an exception *@uval is set to zero.
638   *
639   * Return:     0: cmpxchg executed
640   *	       -EFAULT: an exception happened when trying to access *@ptr
641   *	       -EAGAIN: maxed out number of retries (byte and short only)
642   */
643  #define cmpxchg_user_key(ptr, uval, old, new, key)			\
644  ({									\
645  	__typeof__(ptr) __ptr = (ptr);					\
646  	__typeof__(uval) __uval = (uval);				\
647  									\
648  	BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval)));		\
649  	might_fault();							\
650  	__chk_user_ptr(__ptr);						\
651  	__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval),	\
652  			   (old), (new), (key), sizeof(*(__ptr)));	\
653  })
654  
655  #endif /* __S390_UACCESS_H */
656