1  // SPDX-License-Identifier: GPL-2.0
2  
3  // Generated by scripts/atomic/gen-atomic-fallback.sh
4  // DO NOT MODIFY THIS FILE DIRECTLY
5  
6  #ifndef _LINUX_ATOMIC_FALLBACK_H
7  #define _LINUX_ATOMIC_FALLBACK_H
8  
9  #include <linux/compiler.h>
10  
11  #if defined(arch_xchg)
12  #define raw_xchg arch_xchg
13  #elif defined(arch_xchg_relaxed)
14  #define raw_xchg(...) \
15  	__atomic_op_fence(arch_xchg, __VA_ARGS__)
16  #else
17  extern void raw_xchg_not_implemented(void);
18  #define raw_xchg(...) raw_xchg_not_implemented()
19  #endif
20  
21  #if defined(arch_xchg_acquire)
22  #define raw_xchg_acquire arch_xchg_acquire
23  #elif defined(arch_xchg_relaxed)
24  #define raw_xchg_acquire(...) \
25  	__atomic_op_acquire(arch_xchg, __VA_ARGS__)
26  #elif defined(arch_xchg)
27  #define raw_xchg_acquire arch_xchg
28  #else
29  extern void raw_xchg_acquire_not_implemented(void);
30  #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
31  #endif
32  
33  #if defined(arch_xchg_release)
34  #define raw_xchg_release arch_xchg_release
35  #elif defined(arch_xchg_relaxed)
36  #define raw_xchg_release(...) \
37  	__atomic_op_release(arch_xchg, __VA_ARGS__)
38  #elif defined(arch_xchg)
39  #define raw_xchg_release arch_xchg
40  #else
41  extern void raw_xchg_release_not_implemented(void);
42  #define raw_xchg_release(...) raw_xchg_release_not_implemented()
43  #endif
44  
45  #if defined(arch_xchg_relaxed)
46  #define raw_xchg_relaxed arch_xchg_relaxed
47  #elif defined(arch_xchg)
48  #define raw_xchg_relaxed arch_xchg
49  #else
50  extern void raw_xchg_relaxed_not_implemented(void);
51  #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
52  #endif
53  
54  #if defined(arch_cmpxchg)
55  #define raw_cmpxchg arch_cmpxchg
56  #elif defined(arch_cmpxchg_relaxed)
57  #define raw_cmpxchg(...) \
58  	__atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
59  #else
60  extern void raw_cmpxchg_not_implemented(void);
61  #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
62  #endif
63  
64  #if defined(arch_cmpxchg_acquire)
65  #define raw_cmpxchg_acquire arch_cmpxchg_acquire
66  #elif defined(arch_cmpxchg_relaxed)
67  #define raw_cmpxchg_acquire(...) \
68  	__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
69  #elif defined(arch_cmpxchg)
70  #define raw_cmpxchg_acquire arch_cmpxchg
71  #else
72  extern void raw_cmpxchg_acquire_not_implemented(void);
73  #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
74  #endif
75  
76  #if defined(arch_cmpxchg_release)
77  #define raw_cmpxchg_release arch_cmpxchg_release
78  #elif defined(arch_cmpxchg_relaxed)
79  #define raw_cmpxchg_release(...) \
80  	__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
81  #elif defined(arch_cmpxchg)
82  #define raw_cmpxchg_release arch_cmpxchg
83  #else
84  extern void raw_cmpxchg_release_not_implemented(void);
85  #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
86  #endif
87  
88  #if defined(arch_cmpxchg_relaxed)
89  #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90  #elif defined(arch_cmpxchg)
91  #define raw_cmpxchg_relaxed arch_cmpxchg
92  #else
93  extern void raw_cmpxchg_relaxed_not_implemented(void);
94  #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
95  #endif
96  
97  #if defined(arch_cmpxchg64)
98  #define raw_cmpxchg64 arch_cmpxchg64
99  #elif defined(arch_cmpxchg64_relaxed)
100  #define raw_cmpxchg64(...) \
101  	__atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
102  #else
103  extern void raw_cmpxchg64_not_implemented(void);
104  #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
105  #endif
106  
107  #if defined(arch_cmpxchg64_acquire)
108  #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109  #elif defined(arch_cmpxchg64_relaxed)
110  #define raw_cmpxchg64_acquire(...) \
111  	__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112  #elif defined(arch_cmpxchg64)
113  #define raw_cmpxchg64_acquire arch_cmpxchg64
114  #else
115  extern void raw_cmpxchg64_acquire_not_implemented(void);
116  #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
117  #endif
118  
119  #if defined(arch_cmpxchg64_release)
120  #define raw_cmpxchg64_release arch_cmpxchg64_release
121  #elif defined(arch_cmpxchg64_relaxed)
122  #define raw_cmpxchg64_release(...) \
123  	__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124  #elif defined(arch_cmpxchg64)
125  #define raw_cmpxchg64_release arch_cmpxchg64
126  #else
127  extern void raw_cmpxchg64_release_not_implemented(void);
128  #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
129  #endif
130  
131  #if defined(arch_cmpxchg64_relaxed)
132  #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133  #elif defined(arch_cmpxchg64)
134  #define raw_cmpxchg64_relaxed arch_cmpxchg64
135  #else
136  extern void raw_cmpxchg64_relaxed_not_implemented(void);
137  #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
138  #endif
139  
140  #if defined(arch_cmpxchg128)
141  #define raw_cmpxchg128 arch_cmpxchg128
142  #elif defined(arch_cmpxchg128_relaxed)
143  #define raw_cmpxchg128(...) \
144  	__atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
145  #else
146  extern void raw_cmpxchg128_not_implemented(void);
147  #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
148  #endif
149  
150  #if defined(arch_cmpxchg128_acquire)
151  #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152  #elif defined(arch_cmpxchg128_relaxed)
153  #define raw_cmpxchg128_acquire(...) \
154  	__atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155  #elif defined(arch_cmpxchg128)
156  #define raw_cmpxchg128_acquire arch_cmpxchg128
157  #else
158  extern void raw_cmpxchg128_acquire_not_implemented(void);
159  #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
160  #endif
161  
162  #if defined(arch_cmpxchg128_release)
163  #define raw_cmpxchg128_release arch_cmpxchg128_release
164  #elif defined(arch_cmpxchg128_relaxed)
165  #define raw_cmpxchg128_release(...) \
166  	__atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167  #elif defined(arch_cmpxchg128)
168  #define raw_cmpxchg128_release arch_cmpxchg128
169  #else
170  extern void raw_cmpxchg128_release_not_implemented(void);
171  #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
172  #endif
173  
174  #if defined(arch_cmpxchg128_relaxed)
175  #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176  #elif defined(arch_cmpxchg128)
177  #define raw_cmpxchg128_relaxed arch_cmpxchg128
178  #else
179  extern void raw_cmpxchg128_relaxed_not_implemented(void);
180  #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
181  #endif
182  
183  #if defined(arch_try_cmpxchg)
184  #define raw_try_cmpxchg arch_try_cmpxchg
185  #elif defined(arch_try_cmpxchg_relaxed)
186  #define raw_try_cmpxchg(...) \
187  	__atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
188  #else
189  #define raw_try_cmpxchg(_ptr, _oldp, _new) \
190  ({ \
191  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192  	___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193  	if (unlikely(___r != ___o)) \
194  		*___op = ___r; \
195  	likely(___r == ___o); \
196  })
197  #endif
198  
199  #if defined(arch_try_cmpxchg_acquire)
200  #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201  #elif defined(arch_try_cmpxchg_relaxed)
202  #define raw_try_cmpxchg_acquire(...) \
203  	__atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204  #elif defined(arch_try_cmpxchg)
205  #define raw_try_cmpxchg_acquire arch_try_cmpxchg
206  #else
207  #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
208  ({ \
209  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210  	___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211  	if (unlikely(___r != ___o)) \
212  		*___op = ___r; \
213  	likely(___r == ___o); \
214  })
215  #endif
216  
217  #if defined(arch_try_cmpxchg_release)
218  #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219  #elif defined(arch_try_cmpxchg_relaxed)
220  #define raw_try_cmpxchg_release(...) \
221  	__atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222  #elif defined(arch_try_cmpxchg)
223  #define raw_try_cmpxchg_release arch_try_cmpxchg
224  #else
225  #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
226  ({ \
227  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228  	___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229  	if (unlikely(___r != ___o)) \
230  		*___op = ___r; \
231  	likely(___r == ___o); \
232  })
233  #endif
234  
235  #if defined(arch_try_cmpxchg_relaxed)
236  #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237  #elif defined(arch_try_cmpxchg)
238  #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
239  #else
240  #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
241  ({ \
242  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243  	___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244  	if (unlikely(___r != ___o)) \
245  		*___op = ___r; \
246  	likely(___r == ___o); \
247  })
248  #endif
249  
250  #if defined(arch_try_cmpxchg64)
251  #define raw_try_cmpxchg64 arch_try_cmpxchg64
252  #elif defined(arch_try_cmpxchg64_relaxed)
253  #define raw_try_cmpxchg64(...) \
254  	__atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
255  #else
256  #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
257  ({ \
258  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259  	___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260  	if (unlikely(___r != ___o)) \
261  		*___op = ___r; \
262  	likely(___r == ___o); \
263  })
264  #endif
265  
266  #if defined(arch_try_cmpxchg64_acquire)
267  #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268  #elif defined(arch_try_cmpxchg64_relaxed)
269  #define raw_try_cmpxchg64_acquire(...) \
270  	__atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271  #elif defined(arch_try_cmpxchg64)
272  #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
273  #else
274  #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
275  ({ \
276  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277  	___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278  	if (unlikely(___r != ___o)) \
279  		*___op = ___r; \
280  	likely(___r == ___o); \
281  })
282  #endif
283  
284  #if defined(arch_try_cmpxchg64_release)
285  #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286  #elif defined(arch_try_cmpxchg64_relaxed)
287  #define raw_try_cmpxchg64_release(...) \
288  	__atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289  #elif defined(arch_try_cmpxchg64)
290  #define raw_try_cmpxchg64_release arch_try_cmpxchg64
291  #else
292  #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
293  ({ \
294  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295  	___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296  	if (unlikely(___r != ___o)) \
297  		*___op = ___r; \
298  	likely(___r == ___o); \
299  })
300  #endif
301  
302  #if defined(arch_try_cmpxchg64_relaxed)
303  #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304  #elif defined(arch_try_cmpxchg64)
305  #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
306  #else
307  #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
308  ({ \
309  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310  	___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311  	if (unlikely(___r != ___o)) \
312  		*___op = ___r; \
313  	likely(___r == ___o); \
314  })
315  #endif
316  
317  #if defined(arch_try_cmpxchg128)
318  #define raw_try_cmpxchg128 arch_try_cmpxchg128
319  #elif defined(arch_try_cmpxchg128_relaxed)
320  #define raw_try_cmpxchg128(...) \
321  	__atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
322  #else
323  #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
324  ({ \
325  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326  	___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327  	if (unlikely(___r != ___o)) \
328  		*___op = ___r; \
329  	likely(___r == ___o); \
330  })
331  #endif
332  
333  #if defined(arch_try_cmpxchg128_acquire)
334  #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335  #elif defined(arch_try_cmpxchg128_relaxed)
336  #define raw_try_cmpxchg128_acquire(...) \
337  	__atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338  #elif defined(arch_try_cmpxchg128)
339  #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
340  #else
341  #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
342  ({ \
343  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344  	___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345  	if (unlikely(___r != ___o)) \
346  		*___op = ___r; \
347  	likely(___r == ___o); \
348  })
349  #endif
350  
351  #if defined(arch_try_cmpxchg128_release)
352  #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353  #elif defined(arch_try_cmpxchg128_relaxed)
354  #define raw_try_cmpxchg128_release(...) \
355  	__atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356  #elif defined(arch_try_cmpxchg128)
357  #define raw_try_cmpxchg128_release arch_try_cmpxchg128
358  #else
359  #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
360  ({ \
361  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362  	___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363  	if (unlikely(___r != ___o)) \
364  		*___op = ___r; \
365  	likely(___r == ___o); \
366  })
367  #endif
368  
369  #if defined(arch_try_cmpxchg128_relaxed)
370  #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371  #elif defined(arch_try_cmpxchg128)
372  #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
373  #else
374  #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
375  ({ \
376  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377  	___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378  	if (unlikely(___r != ___o)) \
379  		*___op = ___r; \
380  	likely(___r == ___o); \
381  })
382  #endif
383  
384  #define raw_cmpxchg_local arch_cmpxchg_local
385  
386  #ifdef arch_try_cmpxchg_local
387  #define raw_try_cmpxchg_local arch_try_cmpxchg_local
388  #else
389  #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
390  ({ \
391  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392  	___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393  	if (unlikely(___r != ___o)) \
394  		*___op = ___r; \
395  	likely(___r == ___o); \
396  })
397  #endif
398  
399  #define raw_cmpxchg64_local arch_cmpxchg64_local
400  
401  #ifdef arch_try_cmpxchg64_local
402  #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
403  #else
404  #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
405  ({ \
406  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407  	___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408  	if (unlikely(___r != ___o)) \
409  		*___op = ___r; \
410  	likely(___r == ___o); \
411  })
412  #endif
413  
414  #define raw_cmpxchg128_local arch_cmpxchg128_local
415  
416  #ifdef arch_try_cmpxchg128_local
417  #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
418  #else
419  #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
420  ({ \
421  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422  	___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423  	if (unlikely(___r != ___o)) \
424  		*___op = ___r; \
425  	likely(___r == ___o); \
426  })
427  #endif
428  
429  #define raw_sync_cmpxchg arch_sync_cmpxchg
430  
431  #ifdef arch_sync_try_cmpxchg
432  #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
433  #else
434  #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
435  ({ \
436  	typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437  	___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438  	if (unlikely(___r != ___o)) \
439  		*___op = ___r; \
440  	likely(___r == ___o); \
441  })
442  #endif
443  
444  /**
445   * raw_atomic_read() - atomic load with relaxed ordering
446   * @v: pointer to atomic_t
447   *
448   * Atomically loads the value of @v with relaxed ordering.
449   *
450   * Safe to use in noinstr code; prefer atomic_read() elsewhere.
451   *
452   * Return: The value loaded from @v.
453   */
454  static __always_inline int
raw_atomic_read(const atomic_t * v)455  raw_atomic_read(const atomic_t *v)
456  {
457  	return arch_atomic_read(v);
458  }
459  
460  /**
461   * raw_atomic_read_acquire() - atomic load with acquire ordering
462   * @v: pointer to atomic_t
463   *
464   * Atomically loads the value of @v with acquire ordering.
465   *
466   * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
467   *
468   * Return: The value loaded from @v.
469   */
470  static __always_inline int
raw_atomic_read_acquire(const atomic_t * v)471  raw_atomic_read_acquire(const atomic_t *v)
472  {
473  #if defined(arch_atomic_read_acquire)
474  	return arch_atomic_read_acquire(v);
475  #else
476  	int ret;
477  
478  	if (__native_word(atomic_t)) {
479  		ret = smp_load_acquire(&(v)->counter);
480  	} else {
481  		ret = raw_atomic_read(v);
482  		__atomic_acquire_fence();
483  	}
484  
485  	return ret;
486  #endif
487  }
488  
489  /**
490   * raw_atomic_set() - atomic set with relaxed ordering
491   * @v: pointer to atomic_t
492   * @i: int value to assign
493   *
494   * Atomically sets @v to @i with relaxed ordering.
495   *
496   * Safe to use in noinstr code; prefer atomic_set() elsewhere.
497   *
498   * Return: Nothing.
499   */
500  static __always_inline void
raw_atomic_set(atomic_t * v,int i)501  raw_atomic_set(atomic_t *v, int i)
502  {
503  	arch_atomic_set(v, i);
504  }
505  
506  /**
507   * raw_atomic_set_release() - atomic set with release ordering
508   * @v: pointer to atomic_t
509   * @i: int value to assign
510   *
511   * Atomically sets @v to @i with release ordering.
512   *
513   * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
514   *
515   * Return: Nothing.
516   */
517  static __always_inline void
raw_atomic_set_release(atomic_t * v,int i)518  raw_atomic_set_release(atomic_t *v, int i)
519  {
520  #if defined(arch_atomic_set_release)
521  	arch_atomic_set_release(v, i);
522  #else
523  	if (__native_word(atomic_t)) {
524  		smp_store_release(&(v)->counter, i);
525  	} else {
526  		__atomic_release_fence();
527  		raw_atomic_set(v, i);
528  	}
529  #endif
530  }
531  
532  /**
533   * raw_atomic_add() - atomic add with relaxed ordering
534   * @i: int value to add
535   * @v: pointer to atomic_t
536   *
537   * Atomically updates @v to (@v + @i) with relaxed ordering.
538   *
539   * Safe to use in noinstr code; prefer atomic_add() elsewhere.
540   *
541   * Return: Nothing.
542   */
543  static __always_inline void
raw_atomic_add(int i,atomic_t * v)544  raw_atomic_add(int i, atomic_t *v)
545  {
546  	arch_atomic_add(i, v);
547  }
548  
549  /**
550   * raw_atomic_add_return() - atomic add with full ordering
551   * @i: int value to add
552   * @v: pointer to atomic_t
553   *
554   * Atomically updates @v to (@v + @i) with full ordering.
555   *
556   * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
557   *
558   * Return: The updated value of @v.
559   */
560  static __always_inline int
raw_atomic_add_return(int i,atomic_t * v)561  raw_atomic_add_return(int i, atomic_t *v)
562  {
563  #if defined(arch_atomic_add_return)
564  	return arch_atomic_add_return(i, v);
565  #elif defined(arch_atomic_add_return_relaxed)
566  	int ret;
567  	__atomic_pre_full_fence();
568  	ret = arch_atomic_add_return_relaxed(i, v);
569  	__atomic_post_full_fence();
570  	return ret;
571  #else
572  #error "Unable to define raw_atomic_add_return"
573  #endif
574  }
575  
576  /**
577   * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578   * @i: int value to add
579   * @v: pointer to atomic_t
580   *
581   * Atomically updates @v to (@v + @i) with acquire ordering.
582   *
583   * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
584   *
585   * Return: The updated value of @v.
586   */
587  static __always_inline int
raw_atomic_add_return_acquire(int i,atomic_t * v)588  raw_atomic_add_return_acquire(int i, atomic_t *v)
589  {
590  #if defined(arch_atomic_add_return_acquire)
591  	return arch_atomic_add_return_acquire(i, v);
592  #elif defined(arch_atomic_add_return_relaxed)
593  	int ret = arch_atomic_add_return_relaxed(i, v);
594  	__atomic_acquire_fence();
595  	return ret;
596  #elif defined(arch_atomic_add_return)
597  	return arch_atomic_add_return(i, v);
598  #else
599  #error "Unable to define raw_atomic_add_return_acquire"
600  #endif
601  }
602  
603  /**
604   * raw_atomic_add_return_release() - atomic add with release ordering
605   * @i: int value to add
606   * @v: pointer to atomic_t
607   *
608   * Atomically updates @v to (@v + @i) with release ordering.
609   *
610   * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
611   *
612   * Return: The updated value of @v.
613   */
614  static __always_inline int
raw_atomic_add_return_release(int i,atomic_t * v)615  raw_atomic_add_return_release(int i, atomic_t *v)
616  {
617  #if defined(arch_atomic_add_return_release)
618  	return arch_atomic_add_return_release(i, v);
619  #elif defined(arch_atomic_add_return_relaxed)
620  	__atomic_release_fence();
621  	return arch_atomic_add_return_relaxed(i, v);
622  #elif defined(arch_atomic_add_return)
623  	return arch_atomic_add_return(i, v);
624  #else
625  #error "Unable to define raw_atomic_add_return_release"
626  #endif
627  }
628  
629  /**
630   * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631   * @i: int value to add
632   * @v: pointer to atomic_t
633   *
634   * Atomically updates @v to (@v + @i) with relaxed ordering.
635   *
636   * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
637   *
638   * Return: The updated value of @v.
639   */
640  static __always_inline int
raw_atomic_add_return_relaxed(int i,atomic_t * v)641  raw_atomic_add_return_relaxed(int i, atomic_t *v)
642  {
643  #if defined(arch_atomic_add_return_relaxed)
644  	return arch_atomic_add_return_relaxed(i, v);
645  #elif defined(arch_atomic_add_return)
646  	return arch_atomic_add_return(i, v);
647  #else
648  #error "Unable to define raw_atomic_add_return_relaxed"
649  #endif
650  }
651  
652  /**
653   * raw_atomic_fetch_add() - atomic add with full ordering
654   * @i: int value to add
655   * @v: pointer to atomic_t
656   *
657   * Atomically updates @v to (@v + @i) with full ordering.
658   *
659   * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
660   *
661   * Return: The original value of @v.
662   */
663  static __always_inline int
raw_atomic_fetch_add(int i,atomic_t * v)664  raw_atomic_fetch_add(int i, atomic_t *v)
665  {
666  #if defined(arch_atomic_fetch_add)
667  	return arch_atomic_fetch_add(i, v);
668  #elif defined(arch_atomic_fetch_add_relaxed)
669  	int ret;
670  	__atomic_pre_full_fence();
671  	ret = arch_atomic_fetch_add_relaxed(i, v);
672  	__atomic_post_full_fence();
673  	return ret;
674  #else
675  #error "Unable to define raw_atomic_fetch_add"
676  #endif
677  }
678  
679  /**
680   * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681   * @i: int value to add
682   * @v: pointer to atomic_t
683   *
684   * Atomically updates @v to (@v + @i) with acquire ordering.
685   *
686   * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
687   *
688   * Return: The original value of @v.
689   */
690  static __always_inline int
raw_atomic_fetch_add_acquire(int i,atomic_t * v)691  raw_atomic_fetch_add_acquire(int i, atomic_t *v)
692  {
693  #if defined(arch_atomic_fetch_add_acquire)
694  	return arch_atomic_fetch_add_acquire(i, v);
695  #elif defined(arch_atomic_fetch_add_relaxed)
696  	int ret = arch_atomic_fetch_add_relaxed(i, v);
697  	__atomic_acquire_fence();
698  	return ret;
699  #elif defined(arch_atomic_fetch_add)
700  	return arch_atomic_fetch_add(i, v);
701  #else
702  #error "Unable to define raw_atomic_fetch_add_acquire"
703  #endif
704  }
705  
706  /**
707   * raw_atomic_fetch_add_release() - atomic add with release ordering
708   * @i: int value to add
709   * @v: pointer to atomic_t
710   *
711   * Atomically updates @v to (@v + @i) with release ordering.
712   *
713   * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
714   *
715   * Return: The original value of @v.
716   */
717  static __always_inline int
raw_atomic_fetch_add_release(int i,atomic_t * v)718  raw_atomic_fetch_add_release(int i, atomic_t *v)
719  {
720  #if defined(arch_atomic_fetch_add_release)
721  	return arch_atomic_fetch_add_release(i, v);
722  #elif defined(arch_atomic_fetch_add_relaxed)
723  	__atomic_release_fence();
724  	return arch_atomic_fetch_add_relaxed(i, v);
725  #elif defined(arch_atomic_fetch_add)
726  	return arch_atomic_fetch_add(i, v);
727  #else
728  #error "Unable to define raw_atomic_fetch_add_release"
729  #endif
730  }
731  
732  /**
733   * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734   * @i: int value to add
735   * @v: pointer to atomic_t
736   *
737   * Atomically updates @v to (@v + @i) with relaxed ordering.
738   *
739   * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
740   *
741   * Return: The original value of @v.
742   */
743  static __always_inline int
raw_atomic_fetch_add_relaxed(int i,atomic_t * v)744  raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
745  {
746  #if defined(arch_atomic_fetch_add_relaxed)
747  	return arch_atomic_fetch_add_relaxed(i, v);
748  #elif defined(arch_atomic_fetch_add)
749  	return arch_atomic_fetch_add(i, v);
750  #else
751  #error "Unable to define raw_atomic_fetch_add_relaxed"
752  #endif
753  }
754  
755  /**
756   * raw_atomic_sub() - atomic subtract with relaxed ordering
757   * @i: int value to subtract
758   * @v: pointer to atomic_t
759   *
760   * Atomically updates @v to (@v - @i) with relaxed ordering.
761   *
762   * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
763   *
764   * Return: Nothing.
765   */
766  static __always_inline void
raw_atomic_sub(int i,atomic_t * v)767  raw_atomic_sub(int i, atomic_t *v)
768  {
769  	arch_atomic_sub(i, v);
770  }
771  
772  /**
773   * raw_atomic_sub_return() - atomic subtract with full ordering
774   * @i: int value to subtract
775   * @v: pointer to atomic_t
776   *
777   * Atomically updates @v to (@v - @i) with full ordering.
778   *
779   * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
780   *
781   * Return: The updated value of @v.
782   */
783  static __always_inline int
raw_atomic_sub_return(int i,atomic_t * v)784  raw_atomic_sub_return(int i, atomic_t *v)
785  {
786  #if defined(arch_atomic_sub_return)
787  	return arch_atomic_sub_return(i, v);
788  #elif defined(arch_atomic_sub_return_relaxed)
789  	int ret;
790  	__atomic_pre_full_fence();
791  	ret = arch_atomic_sub_return_relaxed(i, v);
792  	__atomic_post_full_fence();
793  	return ret;
794  #else
795  #error "Unable to define raw_atomic_sub_return"
796  #endif
797  }
798  
799  /**
800   * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801   * @i: int value to subtract
802   * @v: pointer to atomic_t
803   *
804   * Atomically updates @v to (@v - @i) with acquire ordering.
805   *
806   * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
807   *
808   * Return: The updated value of @v.
809   */
810  static __always_inline int
raw_atomic_sub_return_acquire(int i,atomic_t * v)811  raw_atomic_sub_return_acquire(int i, atomic_t *v)
812  {
813  #if defined(arch_atomic_sub_return_acquire)
814  	return arch_atomic_sub_return_acquire(i, v);
815  #elif defined(arch_atomic_sub_return_relaxed)
816  	int ret = arch_atomic_sub_return_relaxed(i, v);
817  	__atomic_acquire_fence();
818  	return ret;
819  #elif defined(arch_atomic_sub_return)
820  	return arch_atomic_sub_return(i, v);
821  #else
822  #error "Unable to define raw_atomic_sub_return_acquire"
823  #endif
824  }
825  
826  /**
827   * raw_atomic_sub_return_release() - atomic subtract with release ordering
828   * @i: int value to subtract
829   * @v: pointer to atomic_t
830   *
831   * Atomically updates @v to (@v - @i) with release ordering.
832   *
833   * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
834   *
835   * Return: The updated value of @v.
836   */
837  static __always_inline int
raw_atomic_sub_return_release(int i,atomic_t * v)838  raw_atomic_sub_return_release(int i, atomic_t *v)
839  {
840  #if defined(arch_atomic_sub_return_release)
841  	return arch_atomic_sub_return_release(i, v);
842  #elif defined(arch_atomic_sub_return_relaxed)
843  	__atomic_release_fence();
844  	return arch_atomic_sub_return_relaxed(i, v);
845  #elif defined(arch_atomic_sub_return)
846  	return arch_atomic_sub_return(i, v);
847  #else
848  #error "Unable to define raw_atomic_sub_return_release"
849  #endif
850  }
851  
852  /**
853   * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854   * @i: int value to subtract
855   * @v: pointer to atomic_t
856   *
857   * Atomically updates @v to (@v - @i) with relaxed ordering.
858   *
859   * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
860   *
861   * Return: The updated value of @v.
862   */
863  static __always_inline int
raw_atomic_sub_return_relaxed(int i,atomic_t * v)864  raw_atomic_sub_return_relaxed(int i, atomic_t *v)
865  {
866  #if defined(arch_atomic_sub_return_relaxed)
867  	return arch_atomic_sub_return_relaxed(i, v);
868  #elif defined(arch_atomic_sub_return)
869  	return arch_atomic_sub_return(i, v);
870  #else
871  #error "Unable to define raw_atomic_sub_return_relaxed"
872  #endif
873  }
874  
875  /**
876   * raw_atomic_fetch_sub() - atomic subtract with full ordering
877   * @i: int value to subtract
878   * @v: pointer to atomic_t
879   *
880   * Atomically updates @v to (@v - @i) with full ordering.
881   *
882   * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
883   *
884   * Return: The original value of @v.
885   */
886  static __always_inline int
raw_atomic_fetch_sub(int i,atomic_t * v)887  raw_atomic_fetch_sub(int i, atomic_t *v)
888  {
889  #if defined(arch_atomic_fetch_sub)
890  	return arch_atomic_fetch_sub(i, v);
891  #elif defined(arch_atomic_fetch_sub_relaxed)
892  	int ret;
893  	__atomic_pre_full_fence();
894  	ret = arch_atomic_fetch_sub_relaxed(i, v);
895  	__atomic_post_full_fence();
896  	return ret;
897  #else
898  #error "Unable to define raw_atomic_fetch_sub"
899  #endif
900  }
901  
902  /**
903   * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904   * @i: int value to subtract
905   * @v: pointer to atomic_t
906   *
907   * Atomically updates @v to (@v - @i) with acquire ordering.
908   *
909   * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
910   *
911   * Return: The original value of @v.
912   */
913  static __always_inline int
raw_atomic_fetch_sub_acquire(int i,atomic_t * v)914  raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
915  {
916  #if defined(arch_atomic_fetch_sub_acquire)
917  	return arch_atomic_fetch_sub_acquire(i, v);
918  #elif defined(arch_atomic_fetch_sub_relaxed)
919  	int ret = arch_atomic_fetch_sub_relaxed(i, v);
920  	__atomic_acquire_fence();
921  	return ret;
922  #elif defined(arch_atomic_fetch_sub)
923  	return arch_atomic_fetch_sub(i, v);
924  #else
925  #error "Unable to define raw_atomic_fetch_sub_acquire"
926  #endif
927  }
928  
929  /**
930   * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931   * @i: int value to subtract
932   * @v: pointer to atomic_t
933   *
934   * Atomically updates @v to (@v - @i) with release ordering.
935   *
936   * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
937   *
938   * Return: The original value of @v.
939   */
940  static __always_inline int
raw_atomic_fetch_sub_release(int i,atomic_t * v)941  raw_atomic_fetch_sub_release(int i, atomic_t *v)
942  {
943  #if defined(arch_atomic_fetch_sub_release)
944  	return arch_atomic_fetch_sub_release(i, v);
945  #elif defined(arch_atomic_fetch_sub_relaxed)
946  	__atomic_release_fence();
947  	return arch_atomic_fetch_sub_relaxed(i, v);
948  #elif defined(arch_atomic_fetch_sub)
949  	return arch_atomic_fetch_sub(i, v);
950  #else
951  #error "Unable to define raw_atomic_fetch_sub_release"
952  #endif
953  }
954  
955  /**
956   * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957   * @i: int value to subtract
958   * @v: pointer to atomic_t
959   *
960   * Atomically updates @v to (@v - @i) with relaxed ordering.
961   *
962   * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
963   *
964   * Return: The original value of @v.
965   */
966  static __always_inline int
raw_atomic_fetch_sub_relaxed(int i,atomic_t * v)967  raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
968  {
969  #if defined(arch_atomic_fetch_sub_relaxed)
970  	return arch_atomic_fetch_sub_relaxed(i, v);
971  #elif defined(arch_atomic_fetch_sub)
972  	return arch_atomic_fetch_sub(i, v);
973  #else
974  #error "Unable to define raw_atomic_fetch_sub_relaxed"
975  #endif
976  }
977  
978  /**
979   * raw_atomic_inc() - atomic increment with relaxed ordering
980   * @v: pointer to atomic_t
981   *
982   * Atomically updates @v to (@v + 1) with relaxed ordering.
983   *
984   * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
985   *
986   * Return: Nothing.
987   */
988  static __always_inline void
raw_atomic_inc(atomic_t * v)989  raw_atomic_inc(atomic_t *v)
990  {
991  #if defined(arch_atomic_inc)
992  	arch_atomic_inc(v);
993  #else
994  	raw_atomic_add(1, v);
995  #endif
996  }
997  
998  /**
999   * raw_atomic_inc_return() - atomic increment with full ordering
1000   * @v: pointer to atomic_t
1001   *
1002   * Atomically updates @v to (@v + 1) with full ordering.
1003   *
1004   * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1005   *
1006   * Return: The updated value of @v.
1007   */
1008  static __always_inline int
raw_atomic_inc_return(atomic_t * v)1009  raw_atomic_inc_return(atomic_t *v)
1010  {
1011  #if defined(arch_atomic_inc_return)
1012  	return arch_atomic_inc_return(v);
1013  #elif defined(arch_atomic_inc_return_relaxed)
1014  	int ret;
1015  	__atomic_pre_full_fence();
1016  	ret = arch_atomic_inc_return_relaxed(v);
1017  	__atomic_post_full_fence();
1018  	return ret;
1019  #else
1020  	return raw_atomic_add_return(1, v);
1021  #endif
1022  }
1023  
1024  /**
1025   * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026   * @v: pointer to atomic_t
1027   *
1028   * Atomically updates @v to (@v + 1) with acquire ordering.
1029   *
1030   * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1031   *
1032   * Return: The updated value of @v.
1033   */
1034  static __always_inline int
raw_atomic_inc_return_acquire(atomic_t * v)1035  raw_atomic_inc_return_acquire(atomic_t *v)
1036  {
1037  #if defined(arch_atomic_inc_return_acquire)
1038  	return arch_atomic_inc_return_acquire(v);
1039  #elif defined(arch_atomic_inc_return_relaxed)
1040  	int ret = arch_atomic_inc_return_relaxed(v);
1041  	__atomic_acquire_fence();
1042  	return ret;
1043  #elif defined(arch_atomic_inc_return)
1044  	return arch_atomic_inc_return(v);
1045  #else
1046  	return raw_atomic_add_return_acquire(1, v);
1047  #endif
1048  }
1049  
1050  /**
1051   * raw_atomic_inc_return_release() - atomic increment with release ordering
1052   * @v: pointer to atomic_t
1053   *
1054   * Atomically updates @v to (@v + 1) with release ordering.
1055   *
1056   * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1057   *
1058   * Return: The updated value of @v.
1059   */
1060  static __always_inline int
raw_atomic_inc_return_release(atomic_t * v)1061  raw_atomic_inc_return_release(atomic_t *v)
1062  {
1063  #if defined(arch_atomic_inc_return_release)
1064  	return arch_atomic_inc_return_release(v);
1065  #elif defined(arch_atomic_inc_return_relaxed)
1066  	__atomic_release_fence();
1067  	return arch_atomic_inc_return_relaxed(v);
1068  #elif defined(arch_atomic_inc_return)
1069  	return arch_atomic_inc_return(v);
1070  #else
1071  	return raw_atomic_add_return_release(1, v);
1072  #endif
1073  }
1074  
1075  /**
1076   * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077   * @v: pointer to atomic_t
1078   *
1079   * Atomically updates @v to (@v + 1) with relaxed ordering.
1080   *
1081   * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1082   *
1083   * Return: The updated value of @v.
1084   */
1085  static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t * v)1086  raw_atomic_inc_return_relaxed(atomic_t *v)
1087  {
1088  #if defined(arch_atomic_inc_return_relaxed)
1089  	return arch_atomic_inc_return_relaxed(v);
1090  #elif defined(arch_atomic_inc_return)
1091  	return arch_atomic_inc_return(v);
1092  #else
1093  	return raw_atomic_add_return_relaxed(1, v);
1094  #endif
1095  }
1096  
1097  /**
1098   * raw_atomic_fetch_inc() - atomic increment with full ordering
1099   * @v: pointer to atomic_t
1100   *
1101   * Atomically updates @v to (@v + 1) with full ordering.
1102   *
1103   * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1104   *
1105   * Return: The original value of @v.
1106   */
1107  static __always_inline int
raw_atomic_fetch_inc(atomic_t * v)1108  raw_atomic_fetch_inc(atomic_t *v)
1109  {
1110  #if defined(arch_atomic_fetch_inc)
1111  	return arch_atomic_fetch_inc(v);
1112  #elif defined(arch_atomic_fetch_inc_relaxed)
1113  	int ret;
1114  	__atomic_pre_full_fence();
1115  	ret = arch_atomic_fetch_inc_relaxed(v);
1116  	__atomic_post_full_fence();
1117  	return ret;
1118  #else
1119  	return raw_atomic_fetch_add(1, v);
1120  #endif
1121  }
1122  
1123  /**
1124   * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125   * @v: pointer to atomic_t
1126   *
1127   * Atomically updates @v to (@v + 1) with acquire ordering.
1128   *
1129   * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1130   *
1131   * Return: The original value of @v.
1132   */
1133  static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t * v)1134  raw_atomic_fetch_inc_acquire(atomic_t *v)
1135  {
1136  #if defined(arch_atomic_fetch_inc_acquire)
1137  	return arch_atomic_fetch_inc_acquire(v);
1138  #elif defined(arch_atomic_fetch_inc_relaxed)
1139  	int ret = arch_atomic_fetch_inc_relaxed(v);
1140  	__atomic_acquire_fence();
1141  	return ret;
1142  #elif defined(arch_atomic_fetch_inc)
1143  	return arch_atomic_fetch_inc(v);
1144  #else
1145  	return raw_atomic_fetch_add_acquire(1, v);
1146  #endif
1147  }
1148  
1149  /**
1150   * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151   * @v: pointer to atomic_t
1152   *
1153   * Atomically updates @v to (@v + 1) with release ordering.
1154   *
1155   * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1156   *
1157   * Return: The original value of @v.
1158   */
1159  static __always_inline int
raw_atomic_fetch_inc_release(atomic_t * v)1160  raw_atomic_fetch_inc_release(atomic_t *v)
1161  {
1162  #if defined(arch_atomic_fetch_inc_release)
1163  	return arch_atomic_fetch_inc_release(v);
1164  #elif defined(arch_atomic_fetch_inc_relaxed)
1165  	__atomic_release_fence();
1166  	return arch_atomic_fetch_inc_relaxed(v);
1167  #elif defined(arch_atomic_fetch_inc)
1168  	return arch_atomic_fetch_inc(v);
1169  #else
1170  	return raw_atomic_fetch_add_release(1, v);
1171  #endif
1172  }
1173  
1174  /**
1175   * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176   * @v: pointer to atomic_t
1177   *
1178   * Atomically updates @v to (@v + 1) with relaxed ordering.
1179   *
1180   * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1181   *
1182   * Return: The original value of @v.
1183   */
1184  static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t * v)1185  raw_atomic_fetch_inc_relaxed(atomic_t *v)
1186  {
1187  #if defined(arch_atomic_fetch_inc_relaxed)
1188  	return arch_atomic_fetch_inc_relaxed(v);
1189  #elif defined(arch_atomic_fetch_inc)
1190  	return arch_atomic_fetch_inc(v);
1191  #else
1192  	return raw_atomic_fetch_add_relaxed(1, v);
1193  #endif
1194  }
1195  
1196  /**
1197   * raw_atomic_dec() - atomic decrement with relaxed ordering
1198   * @v: pointer to atomic_t
1199   *
1200   * Atomically updates @v to (@v - 1) with relaxed ordering.
1201   *
1202   * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1203   *
1204   * Return: Nothing.
1205   */
1206  static __always_inline void
raw_atomic_dec(atomic_t * v)1207  raw_atomic_dec(atomic_t *v)
1208  {
1209  #if defined(arch_atomic_dec)
1210  	arch_atomic_dec(v);
1211  #else
1212  	raw_atomic_sub(1, v);
1213  #endif
1214  }
1215  
1216  /**
1217   * raw_atomic_dec_return() - atomic decrement with full ordering
1218   * @v: pointer to atomic_t
1219   *
1220   * Atomically updates @v to (@v - 1) with full ordering.
1221   *
1222   * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1223   *
1224   * Return: The updated value of @v.
1225   */
1226  static __always_inline int
raw_atomic_dec_return(atomic_t * v)1227  raw_atomic_dec_return(atomic_t *v)
1228  {
1229  #if defined(arch_atomic_dec_return)
1230  	return arch_atomic_dec_return(v);
1231  #elif defined(arch_atomic_dec_return_relaxed)
1232  	int ret;
1233  	__atomic_pre_full_fence();
1234  	ret = arch_atomic_dec_return_relaxed(v);
1235  	__atomic_post_full_fence();
1236  	return ret;
1237  #else
1238  	return raw_atomic_sub_return(1, v);
1239  #endif
1240  }
1241  
1242  /**
1243   * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244   * @v: pointer to atomic_t
1245   *
1246   * Atomically updates @v to (@v - 1) with acquire ordering.
1247   *
1248   * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1249   *
1250   * Return: The updated value of @v.
1251   */
1252  static __always_inline int
raw_atomic_dec_return_acquire(atomic_t * v)1253  raw_atomic_dec_return_acquire(atomic_t *v)
1254  {
1255  #if defined(arch_atomic_dec_return_acquire)
1256  	return arch_atomic_dec_return_acquire(v);
1257  #elif defined(arch_atomic_dec_return_relaxed)
1258  	int ret = arch_atomic_dec_return_relaxed(v);
1259  	__atomic_acquire_fence();
1260  	return ret;
1261  #elif defined(arch_atomic_dec_return)
1262  	return arch_atomic_dec_return(v);
1263  #else
1264  	return raw_atomic_sub_return_acquire(1, v);
1265  #endif
1266  }
1267  
1268  /**
1269   * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270   * @v: pointer to atomic_t
1271   *
1272   * Atomically updates @v to (@v - 1) with release ordering.
1273   *
1274   * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1275   *
1276   * Return: The updated value of @v.
1277   */
1278  static __always_inline int
raw_atomic_dec_return_release(atomic_t * v)1279  raw_atomic_dec_return_release(atomic_t *v)
1280  {
1281  #if defined(arch_atomic_dec_return_release)
1282  	return arch_atomic_dec_return_release(v);
1283  #elif defined(arch_atomic_dec_return_relaxed)
1284  	__atomic_release_fence();
1285  	return arch_atomic_dec_return_relaxed(v);
1286  #elif defined(arch_atomic_dec_return)
1287  	return arch_atomic_dec_return(v);
1288  #else
1289  	return raw_atomic_sub_return_release(1, v);
1290  #endif
1291  }
1292  
1293  /**
1294   * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295   * @v: pointer to atomic_t
1296   *
1297   * Atomically updates @v to (@v - 1) with relaxed ordering.
1298   *
1299   * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1300   *
1301   * Return: The updated value of @v.
1302   */
1303  static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t * v)1304  raw_atomic_dec_return_relaxed(atomic_t *v)
1305  {
1306  #if defined(arch_atomic_dec_return_relaxed)
1307  	return arch_atomic_dec_return_relaxed(v);
1308  #elif defined(arch_atomic_dec_return)
1309  	return arch_atomic_dec_return(v);
1310  #else
1311  	return raw_atomic_sub_return_relaxed(1, v);
1312  #endif
1313  }
1314  
1315  /**
1316   * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317   * @v: pointer to atomic_t
1318   *
1319   * Atomically updates @v to (@v - 1) with full ordering.
1320   *
1321   * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1322   *
1323   * Return: The original value of @v.
1324   */
1325  static __always_inline int
raw_atomic_fetch_dec(atomic_t * v)1326  raw_atomic_fetch_dec(atomic_t *v)
1327  {
1328  #if defined(arch_atomic_fetch_dec)
1329  	return arch_atomic_fetch_dec(v);
1330  #elif defined(arch_atomic_fetch_dec_relaxed)
1331  	int ret;
1332  	__atomic_pre_full_fence();
1333  	ret = arch_atomic_fetch_dec_relaxed(v);
1334  	__atomic_post_full_fence();
1335  	return ret;
1336  #else
1337  	return raw_atomic_fetch_sub(1, v);
1338  #endif
1339  }
1340  
1341  /**
1342   * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343   * @v: pointer to atomic_t
1344   *
1345   * Atomically updates @v to (@v - 1) with acquire ordering.
1346   *
1347   * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1348   *
1349   * Return: The original value of @v.
1350   */
1351  static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t * v)1352  raw_atomic_fetch_dec_acquire(atomic_t *v)
1353  {
1354  #if defined(arch_atomic_fetch_dec_acquire)
1355  	return arch_atomic_fetch_dec_acquire(v);
1356  #elif defined(arch_atomic_fetch_dec_relaxed)
1357  	int ret = arch_atomic_fetch_dec_relaxed(v);
1358  	__atomic_acquire_fence();
1359  	return ret;
1360  #elif defined(arch_atomic_fetch_dec)
1361  	return arch_atomic_fetch_dec(v);
1362  #else
1363  	return raw_atomic_fetch_sub_acquire(1, v);
1364  #endif
1365  }
1366  
1367  /**
1368   * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369   * @v: pointer to atomic_t
1370   *
1371   * Atomically updates @v to (@v - 1) with release ordering.
1372   *
1373   * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1374   *
1375   * Return: The original value of @v.
1376   */
1377  static __always_inline int
raw_atomic_fetch_dec_release(atomic_t * v)1378  raw_atomic_fetch_dec_release(atomic_t *v)
1379  {
1380  #if defined(arch_atomic_fetch_dec_release)
1381  	return arch_atomic_fetch_dec_release(v);
1382  #elif defined(arch_atomic_fetch_dec_relaxed)
1383  	__atomic_release_fence();
1384  	return arch_atomic_fetch_dec_relaxed(v);
1385  #elif defined(arch_atomic_fetch_dec)
1386  	return arch_atomic_fetch_dec(v);
1387  #else
1388  	return raw_atomic_fetch_sub_release(1, v);
1389  #endif
1390  }
1391  
1392  /**
1393   * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394   * @v: pointer to atomic_t
1395   *
1396   * Atomically updates @v to (@v - 1) with relaxed ordering.
1397   *
1398   * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1399   *
1400   * Return: The original value of @v.
1401   */
1402  static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t * v)1403  raw_atomic_fetch_dec_relaxed(atomic_t *v)
1404  {
1405  #if defined(arch_atomic_fetch_dec_relaxed)
1406  	return arch_atomic_fetch_dec_relaxed(v);
1407  #elif defined(arch_atomic_fetch_dec)
1408  	return arch_atomic_fetch_dec(v);
1409  #else
1410  	return raw_atomic_fetch_sub_relaxed(1, v);
1411  #endif
1412  }
1413  
1414  /**
1415   * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1416   * @i: int value
1417   * @v: pointer to atomic_t
1418   *
1419   * Atomically updates @v to (@v & @i) with relaxed ordering.
1420   *
1421   * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1422   *
1423   * Return: Nothing.
1424   */
1425  static __always_inline void
raw_atomic_and(int i,atomic_t * v)1426  raw_atomic_and(int i, atomic_t *v)
1427  {
1428  	arch_atomic_and(i, v);
1429  }
1430  
1431  /**
1432   * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1433   * @i: int value
1434   * @v: pointer to atomic_t
1435   *
1436   * Atomically updates @v to (@v & @i) with full ordering.
1437   *
1438   * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1439   *
1440   * Return: The original value of @v.
1441   */
1442  static __always_inline int
raw_atomic_fetch_and(int i,atomic_t * v)1443  raw_atomic_fetch_and(int i, atomic_t *v)
1444  {
1445  #if defined(arch_atomic_fetch_and)
1446  	return arch_atomic_fetch_and(i, v);
1447  #elif defined(arch_atomic_fetch_and_relaxed)
1448  	int ret;
1449  	__atomic_pre_full_fence();
1450  	ret = arch_atomic_fetch_and_relaxed(i, v);
1451  	__atomic_post_full_fence();
1452  	return ret;
1453  #else
1454  #error "Unable to define raw_atomic_fetch_and"
1455  #endif
1456  }
1457  
1458  /**
1459   * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1460   * @i: int value
1461   * @v: pointer to atomic_t
1462   *
1463   * Atomically updates @v to (@v & @i) with acquire ordering.
1464   *
1465   * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1466   *
1467   * Return: The original value of @v.
1468   */
1469  static __always_inline int
raw_atomic_fetch_and_acquire(int i,atomic_t * v)1470  raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1471  {
1472  #if defined(arch_atomic_fetch_and_acquire)
1473  	return arch_atomic_fetch_and_acquire(i, v);
1474  #elif defined(arch_atomic_fetch_and_relaxed)
1475  	int ret = arch_atomic_fetch_and_relaxed(i, v);
1476  	__atomic_acquire_fence();
1477  	return ret;
1478  #elif defined(arch_atomic_fetch_and)
1479  	return arch_atomic_fetch_and(i, v);
1480  #else
1481  #error "Unable to define raw_atomic_fetch_and_acquire"
1482  #endif
1483  }
1484  
1485  /**
1486   * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1487   * @i: int value
1488   * @v: pointer to atomic_t
1489   *
1490   * Atomically updates @v to (@v & @i) with release ordering.
1491   *
1492   * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1493   *
1494   * Return: The original value of @v.
1495   */
1496  static __always_inline int
raw_atomic_fetch_and_release(int i,atomic_t * v)1497  raw_atomic_fetch_and_release(int i, atomic_t *v)
1498  {
1499  #if defined(arch_atomic_fetch_and_release)
1500  	return arch_atomic_fetch_and_release(i, v);
1501  #elif defined(arch_atomic_fetch_and_relaxed)
1502  	__atomic_release_fence();
1503  	return arch_atomic_fetch_and_relaxed(i, v);
1504  #elif defined(arch_atomic_fetch_and)
1505  	return arch_atomic_fetch_and(i, v);
1506  #else
1507  #error "Unable to define raw_atomic_fetch_and_release"
1508  #endif
1509  }
1510  
1511  /**
1512   * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1513   * @i: int value
1514   * @v: pointer to atomic_t
1515   *
1516   * Atomically updates @v to (@v & @i) with relaxed ordering.
1517   *
1518   * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1519   *
1520   * Return: The original value of @v.
1521   */
1522  static __always_inline int
raw_atomic_fetch_and_relaxed(int i,atomic_t * v)1523  raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1524  {
1525  #if defined(arch_atomic_fetch_and_relaxed)
1526  	return arch_atomic_fetch_and_relaxed(i, v);
1527  #elif defined(arch_atomic_fetch_and)
1528  	return arch_atomic_fetch_and(i, v);
1529  #else
1530  #error "Unable to define raw_atomic_fetch_and_relaxed"
1531  #endif
1532  }
1533  
1534  /**
1535   * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1536   * @i: int value
1537   * @v: pointer to atomic_t
1538   *
1539   * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1540   *
1541   * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1542   *
1543   * Return: Nothing.
1544   */
1545  static __always_inline void
raw_atomic_andnot(int i,atomic_t * v)1546  raw_atomic_andnot(int i, atomic_t *v)
1547  {
1548  #if defined(arch_atomic_andnot)
1549  	arch_atomic_andnot(i, v);
1550  #else
1551  	raw_atomic_and(~i, v);
1552  #endif
1553  }
1554  
1555  /**
1556   * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1557   * @i: int value
1558   * @v: pointer to atomic_t
1559   *
1560   * Atomically updates @v to (@v & ~@i) with full ordering.
1561   *
1562   * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1563   *
1564   * Return: The original value of @v.
1565   */
1566  static __always_inline int
raw_atomic_fetch_andnot(int i,atomic_t * v)1567  raw_atomic_fetch_andnot(int i, atomic_t *v)
1568  {
1569  #if defined(arch_atomic_fetch_andnot)
1570  	return arch_atomic_fetch_andnot(i, v);
1571  #elif defined(arch_atomic_fetch_andnot_relaxed)
1572  	int ret;
1573  	__atomic_pre_full_fence();
1574  	ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575  	__atomic_post_full_fence();
1576  	return ret;
1577  #else
1578  	return raw_atomic_fetch_and(~i, v);
1579  #endif
1580  }
1581  
1582  /**
1583   * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1584   * @i: int value
1585   * @v: pointer to atomic_t
1586   *
1587   * Atomically updates @v to (@v & ~@i) with acquire ordering.
1588   *
1589   * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1590   *
1591   * Return: The original value of @v.
1592   */
1593  static __always_inline int
raw_atomic_fetch_andnot_acquire(int i,atomic_t * v)1594  raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1595  {
1596  #if defined(arch_atomic_fetch_andnot_acquire)
1597  	return arch_atomic_fetch_andnot_acquire(i, v);
1598  #elif defined(arch_atomic_fetch_andnot_relaxed)
1599  	int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600  	__atomic_acquire_fence();
1601  	return ret;
1602  #elif defined(arch_atomic_fetch_andnot)
1603  	return arch_atomic_fetch_andnot(i, v);
1604  #else
1605  	return raw_atomic_fetch_and_acquire(~i, v);
1606  #endif
1607  }
1608  
1609  /**
1610   * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1611   * @i: int value
1612   * @v: pointer to atomic_t
1613   *
1614   * Atomically updates @v to (@v & ~@i) with release ordering.
1615   *
1616   * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1617   *
1618   * Return: The original value of @v.
1619   */
1620  static __always_inline int
raw_atomic_fetch_andnot_release(int i,atomic_t * v)1621  raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1622  {
1623  #if defined(arch_atomic_fetch_andnot_release)
1624  	return arch_atomic_fetch_andnot_release(i, v);
1625  #elif defined(arch_atomic_fetch_andnot_relaxed)
1626  	__atomic_release_fence();
1627  	return arch_atomic_fetch_andnot_relaxed(i, v);
1628  #elif defined(arch_atomic_fetch_andnot)
1629  	return arch_atomic_fetch_andnot(i, v);
1630  #else
1631  	return raw_atomic_fetch_and_release(~i, v);
1632  #endif
1633  }
1634  
1635  /**
1636   * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1637   * @i: int value
1638   * @v: pointer to atomic_t
1639   *
1640   * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1641   *
1642   * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1643   *
1644   * Return: The original value of @v.
1645   */
1646  static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i,atomic_t * v)1647  raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1648  {
1649  #if defined(arch_atomic_fetch_andnot_relaxed)
1650  	return arch_atomic_fetch_andnot_relaxed(i, v);
1651  #elif defined(arch_atomic_fetch_andnot)
1652  	return arch_atomic_fetch_andnot(i, v);
1653  #else
1654  	return raw_atomic_fetch_and_relaxed(~i, v);
1655  #endif
1656  }
1657  
1658  /**
1659   * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1660   * @i: int value
1661   * @v: pointer to atomic_t
1662   *
1663   * Atomically updates @v to (@v | @i) with relaxed ordering.
1664   *
1665   * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1666   *
1667   * Return: Nothing.
1668   */
1669  static __always_inline void
raw_atomic_or(int i,atomic_t * v)1670  raw_atomic_or(int i, atomic_t *v)
1671  {
1672  	arch_atomic_or(i, v);
1673  }
1674  
1675  /**
1676   * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1677   * @i: int value
1678   * @v: pointer to atomic_t
1679   *
1680   * Atomically updates @v to (@v | @i) with full ordering.
1681   *
1682   * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1683   *
1684   * Return: The original value of @v.
1685   */
1686  static __always_inline int
raw_atomic_fetch_or(int i,atomic_t * v)1687  raw_atomic_fetch_or(int i, atomic_t *v)
1688  {
1689  #if defined(arch_atomic_fetch_or)
1690  	return arch_atomic_fetch_or(i, v);
1691  #elif defined(arch_atomic_fetch_or_relaxed)
1692  	int ret;
1693  	__atomic_pre_full_fence();
1694  	ret = arch_atomic_fetch_or_relaxed(i, v);
1695  	__atomic_post_full_fence();
1696  	return ret;
1697  #else
1698  #error "Unable to define raw_atomic_fetch_or"
1699  #endif
1700  }
1701  
1702  /**
1703   * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1704   * @i: int value
1705   * @v: pointer to atomic_t
1706   *
1707   * Atomically updates @v to (@v | @i) with acquire ordering.
1708   *
1709   * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1710   *
1711   * Return: The original value of @v.
1712   */
1713  static __always_inline int
raw_atomic_fetch_or_acquire(int i,atomic_t * v)1714  raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1715  {
1716  #if defined(arch_atomic_fetch_or_acquire)
1717  	return arch_atomic_fetch_or_acquire(i, v);
1718  #elif defined(arch_atomic_fetch_or_relaxed)
1719  	int ret = arch_atomic_fetch_or_relaxed(i, v);
1720  	__atomic_acquire_fence();
1721  	return ret;
1722  #elif defined(arch_atomic_fetch_or)
1723  	return arch_atomic_fetch_or(i, v);
1724  #else
1725  #error "Unable to define raw_atomic_fetch_or_acquire"
1726  #endif
1727  }
1728  
1729  /**
1730   * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1731   * @i: int value
1732   * @v: pointer to atomic_t
1733   *
1734   * Atomically updates @v to (@v | @i) with release ordering.
1735   *
1736   * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1737   *
1738   * Return: The original value of @v.
1739   */
1740  static __always_inline int
raw_atomic_fetch_or_release(int i,atomic_t * v)1741  raw_atomic_fetch_or_release(int i, atomic_t *v)
1742  {
1743  #if defined(arch_atomic_fetch_or_release)
1744  	return arch_atomic_fetch_or_release(i, v);
1745  #elif defined(arch_atomic_fetch_or_relaxed)
1746  	__atomic_release_fence();
1747  	return arch_atomic_fetch_or_relaxed(i, v);
1748  #elif defined(arch_atomic_fetch_or)
1749  	return arch_atomic_fetch_or(i, v);
1750  #else
1751  #error "Unable to define raw_atomic_fetch_or_release"
1752  #endif
1753  }
1754  
1755  /**
1756   * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1757   * @i: int value
1758   * @v: pointer to atomic_t
1759   *
1760   * Atomically updates @v to (@v | @i) with relaxed ordering.
1761   *
1762   * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1763   *
1764   * Return: The original value of @v.
1765   */
1766  static __always_inline int
raw_atomic_fetch_or_relaxed(int i,atomic_t * v)1767  raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1768  {
1769  #if defined(arch_atomic_fetch_or_relaxed)
1770  	return arch_atomic_fetch_or_relaxed(i, v);
1771  #elif defined(arch_atomic_fetch_or)
1772  	return arch_atomic_fetch_or(i, v);
1773  #else
1774  #error "Unable to define raw_atomic_fetch_or_relaxed"
1775  #endif
1776  }
1777  
1778  /**
1779   * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1780   * @i: int value
1781   * @v: pointer to atomic_t
1782   *
1783   * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1784   *
1785   * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1786   *
1787   * Return: Nothing.
1788   */
1789  static __always_inline void
raw_atomic_xor(int i,atomic_t * v)1790  raw_atomic_xor(int i, atomic_t *v)
1791  {
1792  	arch_atomic_xor(i, v);
1793  }
1794  
1795  /**
1796   * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1797   * @i: int value
1798   * @v: pointer to atomic_t
1799   *
1800   * Atomically updates @v to (@v ^ @i) with full ordering.
1801   *
1802   * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1803   *
1804   * Return: The original value of @v.
1805   */
1806  static __always_inline int
raw_atomic_fetch_xor(int i,atomic_t * v)1807  raw_atomic_fetch_xor(int i, atomic_t *v)
1808  {
1809  #if defined(arch_atomic_fetch_xor)
1810  	return arch_atomic_fetch_xor(i, v);
1811  #elif defined(arch_atomic_fetch_xor_relaxed)
1812  	int ret;
1813  	__atomic_pre_full_fence();
1814  	ret = arch_atomic_fetch_xor_relaxed(i, v);
1815  	__atomic_post_full_fence();
1816  	return ret;
1817  #else
1818  #error "Unable to define raw_atomic_fetch_xor"
1819  #endif
1820  }
1821  
1822  /**
1823   * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1824   * @i: int value
1825   * @v: pointer to atomic_t
1826   *
1827   * Atomically updates @v to (@v ^ @i) with acquire ordering.
1828   *
1829   * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1830   *
1831   * Return: The original value of @v.
1832   */
1833  static __always_inline int
raw_atomic_fetch_xor_acquire(int i,atomic_t * v)1834  raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1835  {
1836  #if defined(arch_atomic_fetch_xor_acquire)
1837  	return arch_atomic_fetch_xor_acquire(i, v);
1838  #elif defined(arch_atomic_fetch_xor_relaxed)
1839  	int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840  	__atomic_acquire_fence();
1841  	return ret;
1842  #elif defined(arch_atomic_fetch_xor)
1843  	return arch_atomic_fetch_xor(i, v);
1844  #else
1845  #error "Unable to define raw_atomic_fetch_xor_acquire"
1846  #endif
1847  }
1848  
1849  /**
1850   * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1851   * @i: int value
1852   * @v: pointer to atomic_t
1853   *
1854   * Atomically updates @v to (@v ^ @i) with release ordering.
1855   *
1856   * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1857   *
1858   * Return: The original value of @v.
1859   */
1860  static __always_inline int
raw_atomic_fetch_xor_release(int i,atomic_t * v)1861  raw_atomic_fetch_xor_release(int i, atomic_t *v)
1862  {
1863  #if defined(arch_atomic_fetch_xor_release)
1864  	return arch_atomic_fetch_xor_release(i, v);
1865  #elif defined(arch_atomic_fetch_xor_relaxed)
1866  	__atomic_release_fence();
1867  	return arch_atomic_fetch_xor_relaxed(i, v);
1868  #elif defined(arch_atomic_fetch_xor)
1869  	return arch_atomic_fetch_xor(i, v);
1870  #else
1871  #error "Unable to define raw_atomic_fetch_xor_release"
1872  #endif
1873  }
1874  
1875  /**
1876   * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1877   * @i: int value
1878   * @v: pointer to atomic_t
1879   *
1880   * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1881   *
1882   * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1883   *
1884   * Return: The original value of @v.
1885   */
1886  static __always_inline int
raw_atomic_fetch_xor_relaxed(int i,atomic_t * v)1887  raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1888  {
1889  #if defined(arch_atomic_fetch_xor_relaxed)
1890  	return arch_atomic_fetch_xor_relaxed(i, v);
1891  #elif defined(arch_atomic_fetch_xor)
1892  	return arch_atomic_fetch_xor(i, v);
1893  #else
1894  #error "Unable to define raw_atomic_fetch_xor_relaxed"
1895  #endif
1896  }
1897  
1898  /**
1899   * raw_atomic_xchg() - atomic exchange with full ordering
1900   * @v: pointer to atomic_t
1901   * @new: int value to assign
1902   *
1903   * Atomically updates @v to @new with full ordering.
1904   *
1905   * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1906   *
1907   * Return: The original value of @v.
1908   */
1909  static __always_inline int
raw_atomic_xchg(atomic_t * v,int new)1910  raw_atomic_xchg(atomic_t *v, int new)
1911  {
1912  #if defined(arch_atomic_xchg)
1913  	return arch_atomic_xchg(v, new);
1914  #elif defined(arch_atomic_xchg_relaxed)
1915  	int ret;
1916  	__atomic_pre_full_fence();
1917  	ret = arch_atomic_xchg_relaxed(v, new);
1918  	__atomic_post_full_fence();
1919  	return ret;
1920  #else
1921  	return raw_xchg(&v->counter, new);
1922  #endif
1923  }
1924  
1925  /**
1926   * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927   * @v: pointer to atomic_t
1928   * @new: int value to assign
1929   *
1930   * Atomically updates @v to @new with acquire ordering.
1931   *
1932   * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1933   *
1934   * Return: The original value of @v.
1935   */
1936  static __always_inline int
raw_atomic_xchg_acquire(atomic_t * v,int new)1937  raw_atomic_xchg_acquire(atomic_t *v, int new)
1938  {
1939  #if defined(arch_atomic_xchg_acquire)
1940  	return arch_atomic_xchg_acquire(v, new);
1941  #elif defined(arch_atomic_xchg_relaxed)
1942  	int ret = arch_atomic_xchg_relaxed(v, new);
1943  	__atomic_acquire_fence();
1944  	return ret;
1945  #elif defined(arch_atomic_xchg)
1946  	return arch_atomic_xchg(v, new);
1947  #else
1948  	return raw_xchg_acquire(&v->counter, new);
1949  #endif
1950  }
1951  
1952  /**
1953   * raw_atomic_xchg_release() - atomic exchange with release ordering
1954   * @v: pointer to atomic_t
1955   * @new: int value to assign
1956   *
1957   * Atomically updates @v to @new with release ordering.
1958   *
1959   * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1960   *
1961   * Return: The original value of @v.
1962   */
1963  static __always_inline int
raw_atomic_xchg_release(atomic_t * v,int new)1964  raw_atomic_xchg_release(atomic_t *v, int new)
1965  {
1966  #if defined(arch_atomic_xchg_release)
1967  	return arch_atomic_xchg_release(v, new);
1968  #elif defined(arch_atomic_xchg_relaxed)
1969  	__atomic_release_fence();
1970  	return arch_atomic_xchg_relaxed(v, new);
1971  #elif defined(arch_atomic_xchg)
1972  	return arch_atomic_xchg(v, new);
1973  #else
1974  	return raw_xchg_release(&v->counter, new);
1975  #endif
1976  }
1977  
1978  /**
1979   * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980   * @v: pointer to atomic_t
1981   * @new: int value to assign
1982   *
1983   * Atomically updates @v to @new with relaxed ordering.
1984   *
1985   * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1986   *
1987   * Return: The original value of @v.
1988   */
1989  static __always_inline int
raw_atomic_xchg_relaxed(atomic_t * v,int new)1990  raw_atomic_xchg_relaxed(atomic_t *v, int new)
1991  {
1992  #if defined(arch_atomic_xchg_relaxed)
1993  	return arch_atomic_xchg_relaxed(v, new);
1994  #elif defined(arch_atomic_xchg)
1995  	return arch_atomic_xchg(v, new);
1996  #else
1997  	return raw_xchg_relaxed(&v->counter, new);
1998  #endif
1999  }
2000  
2001  /**
2002   * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003   * @v: pointer to atomic_t
2004   * @old: int value to compare with
2005   * @new: int value to assign
2006   *
2007   * If (@v == @old), atomically updates @v to @new with full ordering.
2008   * Otherwise, @v is not modified and relaxed ordering is provided.
2009   *
2010   * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2011   *
2012   * Return: The original value of @v.
2013   */
2014  static __always_inline int
raw_atomic_cmpxchg(atomic_t * v,int old,int new)2015  raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2016  {
2017  #if defined(arch_atomic_cmpxchg)
2018  	return arch_atomic_cmpxchg(v, old, new);
2019  #elif defined(arch_atomic_cmpxchg_relaxed)
2020  	int ret;
2021  	__atomic_pre_full_fence();
2022  	ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2023  	__atomic_post_full_fence();
2024  	return ret;
2025  #else
2026  	return raw_cmpxchg(&v->counter, old, new);
2027  #endif
2028  }
2029  
2030  /**
2031   * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2032   * @v: pointer to atomic_t
2033   * @old: int value to compare with
2034   * @new: int value to assign
2035   *
2036   * If (@v == @old), atomically updates @v to @new with acquire ordering.
2037   * Otherwise, @v is not modified and relaxed ordering is provided.
2038   *
2039   * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2040   *
2041   * Return: The original value of @v.
2042   */
2043  static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t * v,int old,int new)2044  raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2045  {
2046  #if defined(arch_atomic_cmpxchg_acquire)
2047  	return arch_atomic_cmpxchg_acquire(v, old, new);
2048  #elif defined(arch_atomic_cmpxchg_relaxed)
2049  	int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2050  	__atomic_acquire_fence();
2051  	return ret;
2052  #elif defined(arch_atomic_cmpxchg)
2053  	return arch_atomic_cmpxchg(v, old, new);
2054  #else
2055  	return raw_cmpxchg_acquire(&v->counter, old, new);
2056  #endif
2057  }
2058  
2059  /**
2060   * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2061   * @v: pointer to atomic_t
2062   * @old: int value to compare with
2063   * @new: int value to assign
2064   *
2065   * If (@v == @old), atomically updates @v to @new with release ordering.
2066   * Otherwise, @v is not modified and relaxed ordering is provided.
2067   *
2068   * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2069   *
2070   * Return: The original value of @v.
2071   */
2072  static __always_inline int
raw_atomic_cmpxchg_release(atomic_t * v,int old,int new)2073  raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2074  {
2075  #if defined(arch_atomic_cmpxchg_release)
2076  	return arch_atomic_cmpxchg_release(v, old, new);
2077  #elif defined(arch_atomic_cmpxchg_relaxed)
2078  	__atomic_release_fence();
2079  	return arch_atomic_cmpxchg_relaxed(v, old, new);
2080  #elif defined(arch_atomic_cmpxchg)
2081  	return arch_atomic_cmpxchg(v, old, new);
2082  #else
2083  	return raw_cmpxchg_release(&v->counter, old, new);
2084  #endif
2085  }
2086  
2087  /**
2088   * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2089   * @v: pointer to atomic_t
2090   * @old: int value to compare with
2091   * @new: int value to assign
2092   *
2093   * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2094   * Otherwise, @v is not modified and relaxed ordering is provided.
2095   *
2096   * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2097   *
2098   * Return: The original value of @v.
2099   */
2100  static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t * v,int old,int new)2101  raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2102  {
2103  #if defined(arch_atomic_cmpxchg_relaxed)
2104  	return arch_atomic_cmpxchg_relaxed(v, old, new);
2105  #elif defined(arch_atomic_cmpxchg)
2106  	return arch_atomic_cmpxchg(v, old, new);
2107  #else
2108  	return raw_cmpxchg_relaxed(&v->counter, old, new);
2109  #endif
2110  }
2111  
2112  /**
2113   * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2114   * @v: pointer to atomic_t
2115   * @old: pointer to int value to compare with
2116   * @new: int value to assign
2117   *
2118   * If (@v == @old), atomically updates @v to @new with full ordering.
2119   * Otherwise, @v is not modified, @old is updated to the current value of @v,
2120   * and relaxed ordering is provided.
2121   *
2122   * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2123   *
2124   * Return: @true if the exchange occured, @false otherwise.
2125   */
2126  static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t * v,int * old,int new)2127  raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2128  {
2129  #if defined(arch_atomic_try_cmpxchg)
2130  	return arch_atomic_try_cmpxchg(v, old, new);
2131  #elif defined(arch_atomic_try_cmpxchg_relaxed)
2132  	bool ret;
2133  	__atomic_pre_full_fence();
2134  	ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2135  	__atomic_post_full_fence();
2136  	return ret;
2137  #else
2138  	int r, o = *old;
2139  	r = raw_atomic_cmpxchg(v, o, new);
2140  	if (unlikely(r != o))
2141  		*old = r;
2142  	return likely(r == o);
2143  #endif
2144  }
2145  
2146  /**
2147   * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2148   * @v: pointer to atomic_t
2149   * @old: pointer to int value to compare with
2150   * @new: int value to assign
2151   *
2152   * If (@v == @old), atomically updates @v to @new with acquire ordering.
2153   * Otherwise, @v is not modified, @old is updated to the current value of @v,
2154   * and relaxed ordering is provided.
2155   *
2156   * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2157   *
2158   * Return: @true if the exchange occured, @false otherwise.
2159   */
2160  static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t * v,int * old,int new)2161  raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2162  {
2163  #if defined(arch_atomic_try_cmpxchg_acquire)
2164  	return arch_atomic_try_cmpxchg_acquire(v, old, new);
2165  #elif defined(arch_atomic_try_cmpxchg_relaxed)
2166  	bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2167  	__atomic_acquire_fence();
2168  	return ret;
2169  #elif defined(arch_atomic_try_cmpxchg)
2170  	return arch_atomic_try_cmpxchg(v, old, new);
2171  #else
2172  	int r, o = *old;
2173  	r = raw_atomic_cmpxchg_acquire(v, o, new);
2174  	if (unlikely(r != o))
2175  		*old = r;
2176  	return likely(r == o);
2177  #endif
2178  }
2179  
2180  /**
2181   * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2182   * @v: pointer to atomic_t
2183   * @old: pointer to int value to compare with
2184   * @new: int value to assign
2185   *
2186   * If (@v == @old), atomically updates @v to @new with release ordering.
2187   * Otherwise, @v is not modified, @old is updated to the current value of @v,
2188   * and relaxed ordering is provided.
2189   *
2190   * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2191   *
2192   * Return: @true if the exchange occured, @false otherwise.
2193   */
2194  static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t * v,int * old,int new)2195  raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2196  {
2197  #if defined(arch_atomic_try_cmpxchg_release)
2198  	return arch_atomic_try_cmpxchg_release(v, old, new);
2199  #elif defined(arch_atomic_try_cmpxchg_relaxed)
2200  	__atomic_release_fence();
2201  	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2202  #elif defined(arch_atomic_try_cmpxchg)
2203  	return arch_atomic_try_cmpxchg(v, old, new);
2204  #else
2205  	int r, o = *old;
2206  	r = raw_atomic_cmpxchg_release(v, o, new);
2207  	if (unlikely(r != o))
2208  		*old = r;
2209  	return likely(r == o);
2210  #endif
2211  }
2212  
2213  /**
2214   * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2215   * @v: pointer to atomic_t
2216   * @old: pointer to int value to compare with
2217   * @new: int value to assign
2218   *
2219   * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2220   * Otherwise, @v is not modified, @old is updated to the current value of @v,
2221   * and relaxed ordering is provided.
2222   *
2223   * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2224   *
2225   * Return: @true if the exchange occured, @false otherwise.
2226   */
2227  static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t * v,int * old,int new)2228  raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2229  {
2230  #if defined(arch_atomic_try_cmpxchg_relaxed)
2231  	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2232  #elif defined(arch_atomic_try_cmpxchg)
2233  	return arch_atomic_try_cmpxchg(v, old, new);
2234  #else
2235  	int r, o = *old;
2236  	r = raw_atomic_cmpxchg_relaxed(v, o, new);
2237  	if (unlikely(r != o))
2238  		*old = r;
2239  	return likely(r == o);
2240  #endif
2241  }
2242  
2243  /**
2244   * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2245   * @i: int value to subtract
2246   * @v: pointer to atomic_t
2247   *
2248   * Atomically updates @v to (@v - @i) with full ordering.
2249   *
2250   * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2251   *
2252   * Return: @true if the resulting value of @v is zero, @false otherwise.
2253   */
2254  static __always_inline bool
raw_atomic_sub_and_test(int i,atomic_t * v)2255  raw_atomic_sub_and_test(int i, atomic_t *v)
2256  {
2257  #if defined(arch_atomic_sub_and_test)
2258  	return arch_atomic_sub_and_test(i, v);
2259  #else
2260  	return raw_atomic_sub_return(i, v) == 0;
2261  #endif
2262  }
2263  
2264  /**
2265   * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2266   * @v: pointer to atomic_t
2267   *
2268   * Atomically updates @v to (@v - 1) with full ordering.
2269   *
2270   * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2271   *
2272   * Return: @true if the resulting value of @v is zero, @false otherwise.
2273   */
2274  static __always_inline bool
raw_atomic_dec_and_test(atomic_t * v)2275  raw_atomic_dec_and_test(atomic_t *v)
2276  {
2277  #if defined(arch_atomic_dec_and_test)
2278  	return arch_atomic_dec_and_test(v);
2279  #else
2280  	return raw_atomic_dec_return(v) == 0;
2281  #endif
2282  }
2283  
2284  /**
2285   * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2286   * @v: pointer to atomic_t
2287   *
2288   * Atomically updates @v to (@v + 1) with full ordering.
2289   *
2290   * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2291   *
2292   * Return: @true if the resulting value of @v is zero, @false otherwise.
2293   */
2294  static __always_inline bool
raw_atomic_inc_and_test(atomic_t * v)2295  raw_atomic_inc_and_test(atomic_t *v)
2296  {
2297  #if defined(arch_atomic_inc_and_test)
2298  	return arch_atomic_inc_and_test(v);
2299  #else
2300  	return raw_atomic_inc_return(v) == 0;
2301  #endif
2302  }
2303  
2304  /**
2305   * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2306   * @i: int value to add
2307   * @v: pointer to atomic_t
2308   *
2309   * Atomically updates @v to (@v + @i) with full ordering.
2310   *
2311   * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2312   *
2313   * Return: @true if the resulting value of @v is negative, @false otherwise.
2314   */
2315  static __always_inline bool
raw_atomic_add_negative(int i,atomic_t * v)2316  raw_atomic_add_negative(int i, atomic_t *v)
2317  {
2318  #if defined(arch_atomic_add_negative)
2319  	return arch_atomic_add_negative(i, v);
2320  #elif defined(arch_atomic_add_negative_relaxed)
2321  	bool ret;
2322  	__atomic_pre_full_fence();
2323  	ret = arch_atomic_add_negative_relaxed(i, v);
2324  	__atomic_post_full_fence();
2325  	return ret;
2326  #else
2327  	return raw_atomic_add_return(i, v) < 0;
2328  #endif
2329  }
2330  
2331  /**
2332   * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2333   * @i: int value to add
2334   * @v: pointer to atomic_t
2335   *
2336   * Atomically updates @v to (@v + @i) with acquire ordering.
2337   *
2338   * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2339   *
2340   * Return: @true if the resulting value of @v is negative, @false otherwise.
2341   */
2342  static __always_inline bool
raw_atomic_add_negative_acquire(int i,atomic_t * v)2343  raw_atomic_add_negative_acquire(int i, atomic_t *v)
2344  {
2345  #if defined(arch_atomic_add_negative_acquire)
2346  	return arch_atomic_add_negative_acquire(i, v);
2347  #elif defined(arch_atomic_add_negative_relaxed)
2348  	bool ret = arch_atomic_add_negative_relaxed(i, v);
2349  	__atomic_acquire_fence();
2350  	return ret;
2351  #elif defined(arch_atomic_add_negative)
2352  	return arch_atomic_add_negative(i, v);
2353  #else
2354  	return raw_atomic_add_return_acquire(i, v) < 0;
2355  #endif
2356  }
2357  
2358  /**
2359   * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2360   * @i: int value to add
2361   * @v: pointer to atomic_t
2362   *
2363   * Atomically updates @v to (@v + @i) with release ordering.
2364   *
2365   * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2366   *
2367   * Return: @true if the resulting value of @v is negative, @false otherwise.
2368   */
2369  static __always_inline bool
raw_atomic_add_negative_release(int i,atomic_t * v)2370  raw_atomic_add_negative_release(int i, atomic_t *v)
2371  {
2372  #if defined(arch_atomic_add_negative_release)
2373  	return arch_atomic_add_negative_release(i, v);
2374  #elif defined(arch_atomic_add_negative_relaxed)
2375  	__atomic_release_fence();
2376  	return arch_atomic_add_negative_relaxed(i, v);
2377  #elif defined(arch_atomic_add_negative)
2378  	return arch_atomic_add_negative(i, v);
2379  #else
2380  	return raw_atomic_add_return_release(i, v) < 0;
2381  #endif
2382  }
2383  
2384  /**
2385   * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2386   * @i: int value to add
2387   * @v: pointer to atomic_t
2388   *
2389   * Atomically updates @v to (@v + @i) with relaxed ordering.
2390   *
2391   * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2392   *
2393   * Return: @true if the resulting value of @v is negative, @false otherwise.
2394   */
2395  static __always_inline bool
raw_atomic_add_negative_relaxed(int i,atomic_t * v)2396  raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2397  {
2398  #if defined(arch_atomic_add_negative_relaxed)
2399  	return arch_atomic_add_negative_relaxed(i, v);
2400  #elif defined(arch_atomic_add_negative)
2401  	return arch_atomic_add_negative(i, v);
2402  #else
2403  	return raw_atomic_add_return_relaxed(i, v) < 0;
2404  #endif
2405  }
2406  
2407  /**
2408   * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2409   * @v: pointer to atomic_t
2410   * @a: int value to add
2411   * @u: int value to compare with
2412   *
2413   * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2414   * Otherwise, @v is not modified and relaxed ordering is provided.
2415   *
2416   * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2417   *
2418   * Return: The original value of @v.
2419   */
2420  static __always_inline int
raw_atomic_fetch_add_unless(atomic_t * v,int a,int u)2421  raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2422  {
2423  #if defined(arch_atomic_fetch_add_unless)
2424  	return arch_atomic_fetch_add_unless(v, a, u);
2425  #else
2426  	int c = raw_atomic_read(v);
2427  
2428  	do {
2429  		if (unlikely(c == u))
2430  			break;
2431  	} while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2432  
2433  	return c;
2434  #endif
2435  }
2436  
2437  /**
2438   * raw_atomic_add_unless() - atomic add unless value with full ordering
2439   * @v: pointer to atomic_t
2440   * @a: int value to add
2441   * @u: int value to compare with
2442   *
2443   * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2444   * Otherwise, @v is not modified and relaxed ordering is provided.
2445   *
2446   * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2447   *
2448   * Return: @true if @v was updated, @false otherwise.
2449   */
2450  static __always_inline bool
raw_atomic_add_unless(atomic_t * v,int a,int u)2451  raw_atomic_add_unless(atomic_t *v, int a, int u)
2452  {
2453  #if defined(arch_atomic_add_unless)
2454  	return arch_atomic_add_unless(v, a, u);
2455  #else
2456  	return raw_atomic_fetch_add_unless(v, a, u) != u;
2457  #endif
2458  }
2459  
2460  /**
2461   * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2462   * @v: pointer to atomic_t
2463   *
2464   * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2465   * Otherwise, @v is not modified and relaxed ordering is provided.
2466   *
2467   * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2468   *
2469   * Return: @true if @v was updated, @false otherwise.
2470   */
2471  static __always_inline bool
raw_atomic_inc_not_zero(atomic_t * v)2472  raw_atomic_inc_not_zero(atomic_t *v)
2473  {
2474  #if defined(arch_atomic_inc_not_zero)
2475  	return arch_atomic_inc_not_zero(v);
2476  #else
2477  	return raw_atomic_add_unless(v, 1, 0);
2478  #endif
2479  }
2480  
2481  /**
2482   * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2483   * @v: pointer to atomic_t
2484   *
2485   * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2486   * Otherwise, @v is not modified and relaxed ordering is provided.
2487   *
2488   * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2489   *
2490   * Return: @true if @v was updated, @false otherwise.
2491   */
2492  static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t * v)2493  raw_atomic_inc_unless_negative(atomic_t *v)
2494  {
2495  #if defined(arch_atomic_inc_unless_negative)
2496  	return arch_atomic_inc_unless_negative(v);
2497  #else
2498  	int c = raw_atomic_read(v);
2499  
2500  	do {
2501  		if (unlikely(c < 0))
2502  			return false;
2503  	} while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2504  
2505  	return true;
2506  #endif
2507  }
2508  
2509  /**
2510   * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2511   * @v: pointer to atomic_t
2512   *
2513   * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2514   * Otherwise, @v is not modified and relaxed ordering is provided.
2515   *
2516   * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2517   *
2518   * Return: @true if @v was updated, @false otherwise.
2519   */
2520  static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t * v)2521  raw_atomic_dec_unless_positive(atomic_t *v)
2522  {
2523  #if defined(arch_atomic_dec_unless_positive)
2524  	return arch_atomic_dec_unless_positive(v);
2525  #else
2526  	int c = raw_atomic_read(v);
2527  
2528  	do {
2529  		if (unlikely(c > 0))
2530  			return false;
2531  	} while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2532  
2533  	return true;
2534  #endif
2535  }
2536  
2537  /**
2538   * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2539   * @v: pointer to atomic_t
2540   *
2541   * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2542   * Otherwise, @v is not modified and relaxed ordering is provided.
2543   *
2544   * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2545   *
2546   * Return: The old value of (@v - 1), regardless of whether @v was updated.
2547   */
2548  static __always_inline int
raw_atomic_dec_if_positive(atomic_t * v)2549  raw_atomic_dec_if_positive(atomic_t *v)
2550  {
2551  #if defined(arch_atomic_dec_if_positive)
2552  	return arch_atomic_dec_if_positive(v);
2553  #else
2554  	int dec, c = raw_atomic_read(v);
2555  
2556  	do {
2557  		dec = c - 1;
2558  		if (unlikely(dec < 0))
2559  			break;
2560  	} while (!raw_atomic_try_cmpxchg(v, &c, dec));
2561  
2562  	return dec;
2563  #endif
2564  }
2565  
2566  #ifdef CONFIG_GENERIC_ATOMIC64
2567  #include <asm-generic/atomic64.h>
2568  #endif
2569  
2570  /**
2571   * raw_atomic64_read() - atomic load with relaxed ordering
2572   * @v: pointer to atomic64_t
2573   *
2574   * Atomically loads the value of @v with relaxed ordering.
2575   *
2576   * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2577   *
2578   * Return: The value loaded from @v.
2579   */
2580  static __always_inline s64
raw_atomic64_read(const atomic64_t * v)2581  raw_atomic64_read(const atomic64_t *v)
2582  {
2583  	return arch_atomic64_read(v);
2584  }
2585  
2586  /**
2587   * raw_atomic64_read_acquire() - atomic load with acquire ordering
2588   * @v: pointer to atomic64_t
2589   *
2590   * Atomically loads the value of @v with acquire ordering.
2591   *
2592   * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2593   *
2594   * Return: The value loaded from @v.
2595   */
2596  static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t * v)2597  raw_atomic64_read_acquire(const atomic64_t *v)
2598  {
2599  #if defined(arch_atomic64_read_acquire)
2600  	return arch_atomic64_read_acquire(v);
2601  #else
2602  	s64 ret;
2603  
2604  	if (__native_word(atomic64_t)) {
2605  		ret = smp_load_acquire(&(v)->counter);
2606  	} else {
2607  		ret = raw_atomic64_read(v);
2608  		__atomic_acquire_fence();
2609  	}
2610  
2611  	return ret;
2612  #endif
2613  }
2614  
2615  /**
2616   * raw_atomic64_set() - atomic set with relaxed ordering
2617   * @v: pointer to atomic64_t
2618   * @i: s64 value to assign
2619   *
2620   * Atomically sets @v to @i with relaxed ordering.
2621   *
2622   * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2623   *
2624   * Return: Nothing.
2625   */
2626  static __always_inline void
raw_atomic64_set(atomic64_t * v,s64 i)2627  raw_atomic64_set(atomic64_t *v, s64 i)
2628  {
2629  	arch_atomic64_set(v, i);
2630  }
2631  
2632  /**
2633   * raw_atomic64_set_release() - atomic set with release ordering
2634   * @v: pointer to atomic64_t
2635   * @i: s64 value to assign
2636   *
2637   * Atomically sets @v to @i with release ordering.
2638   *
2639   * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2640   *
2641   * Return: Nothing.
2642   */
2643  static __always_inline void
raw_atomic64_set_release(atomic64_t * v,s64 i)2644  raw_atomic64_set_release(atomic64_t *v, s64 i)
2645  {
2646  #if defined(arch_atomic64_set_release)
2647  	arch_atomic64_set_release(v, i);
2648  #else
2649  	if (__native_word(atomic64_t)) {
2650  		smp_store_release(&(v)->counter, i);
2651  	} else {
2652  		__atomic_release_fence();
2653  		raw_atomic64_set(v, i);
2654  	}
2655  #endif
2656  }
2657  
2658  /**
2659   * raw_atomic64_add() - atomic add with relaxed ordering
2660   * @i: s64 value to add
2661   * @v: pointer to atomic64_t
2662   *
2663   * Atomically updates @v to (@v + @i) with relaxed ordering.
2664   *
2665   * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2666   *
2667   * Return: Nothing.
2668   */
2669  static __always_inline void
raw_atomic64_add(s64 i,atomic64_t * v)2670  raw_atomic64_add(s64 i, atomic64_t *v)
2671  {
2672  	arch_atomic64_add(i, v);
2673  }
2674  
2675  /**
2676   * raw_atomic64_add_return() - atomic add with full ordering
2677   * @i: s64 value to add
2678   * @v: pointer to atomic64_t
2679   *
2680   * Atomically updates @v to (@v + @i) with full ordering.
2681   *
2682   * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2683   *
2684   * Return: The updated value of @v.
2685   */
2686  static __always_inline s64
raw_atomic64_add_return(s64 i,atomic64_t * v)2687  raw_atomic64_add_return(s64 i, atomic64_t *v)
2688  {
2689  #if defined(arch_atomic64_add_return)
2690  	return arch_atomic64_add_return(i, v);
2691  #elif defined(arch_atomic64_add_return_relaxed)
2692  	s64 ret;
2693  	__atomic_pre_full_fence();
2694  	ret = arch_atomic64_add_return_relaxed(i, v);
2695  	__atomic_post_full_fence();
2696  	return ret;
2697  #else
2698  #error "Unable to define raw_atomic64_add_return"
2699  #endif
2700  }
2701  
2702  /**
2703   * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2704   * @i: s64 value to add
2705   * @v: pointer to atomic64_t
2706   *
2707   * Atomically updates @v to (@v + @i) with acquire ordering.
2708   *
2709   * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2710   *
2711   * Return: The updated value of @v.
2712   */
2713  static __always_inline s64
raw_atomic64_add_return_acquire(s64 i,atomic64_t * v)2714  raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2715  {
2716  #if defined(arch_atomic64_add_return_acquire)
2717  	return arch_atomic64_add_return_acquire(i, v);
2718  #elif defined(arch_atomic64_add_return_relaxed)
2719  	s64 ret = arch_atomic64_add_return_relaxed(i, v);
2720  	__atomic_acquire_fence();
2721  	return ret;
2722  #elif defined(arch_atomic64_add_return)
2723  	return arch_atomic64_add_return(i, v);
2724  #else
2725  #error "Unable to define raw_atomic64_add_return_acquire"
2726  #endif
2727  }
2728  
2729  /**
2730   * raw_atomic64_add_return_release() - atomic add with release ordering
2731   * @i: s64 value to add
2732   * @v: pointer to atomic64_t
2733   *
2734   * Atomically updates @v to (@v + @i) with release ordering.
2735   *
2736   * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2737   *
2738   * Return: The updated value of @v.
2739   */
2740  static __always_inline s64
raw_atomic64_add_return_release(s64 i,atomic64_t * v)2741  raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2742  {
2743  #if defined(arch_atomic64_add_return_release)
2744  	return arch_atomic64_add_return_release(i, v);
2745  #elif defined(arch_atomic64_add_return_relaxed)
2746  	__atomic_release_fence();
2747  	return arch_atomic64_add_return_relaxed(i, v);
2748  #elif defined(arch_atomic64_add_return)
2749  	return arch_atomic64_add_return(i, v);
2750  #else
2751  #error "Unable to define raw_atomic64_add_return_release"
2752  #endif
2753  }
2754  
2755  /**
2756   * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2757   * @i: s64 value to add
2758   * @v: pointer to atomic64_t
2759   *
2760   * Atomically updates @v to (@v + @i) with relaxed ordering.
2761   *
2762   * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2763   *
2764   * Return: The updated value of @v.
2765   */
2766  static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i,atomic64_t * v)2767  raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2768  {
2769  #if defined(arch_atomic64_add_return_relaxed)
2770  	return arch_atomic64_add_return_relaxed(i, v);
2771  #elif defined(arch_atomic64_add_return)
2772  	return arch_atomic64_add_return(i, v);
2773  #else
2774  #error "Unable to define raw_atomic64_add_return_relaxed"
2775  #endif
2776  }
2777  
2778  /**
2779   * raw_atomic64_fetch_add() - atomic add with full ordering
2780   * @i: s64 value to add
2781   * @v: pointer to atomic64_t
2782   *
2783   * Atomically updates @v to (@v + @i) with full ordering.
2784   *
2785   * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2786   *
2787   * Return: The original value of @v.
2788   */
2789  static __always_inline s64
raw_atomic64_fetch_add(s64 i,atomic64_t * v)2790  raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2791  {
2792  #if defined(arch_atomic64_fetch_add)
2793  	return arch_atomic64_fetch_add(i, v);
2794  #elif defined(arch_atomic64_fetch_add_relaxed)
2795  	s64 ret;
2796  	__atomic_pre_full_fence();
2797  	ret = arch_atomic64_fetch_add_relaxed(i, v);
2798  	__atomic_post_full_fence();
2799  	return ret;
2800  #else
2801  #error "Unable to define raw_atomic64_fetch_add"
2802  #endif
2803  }
2804  
2805  /**
2806   * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2807   * @i: s64 value to add
2808   * @v: pointer to atomic64_t
2809   *
2810   * Atomically updates @v to (@v + @i) with acquire ordering.
2811   *
2812   * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2813   *
2814   * Return: The original value of @v.
2815   */
2816  static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i,atomic64_t * v)2817  raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2818  {
2819  #if defined(arch_atomic64_fetch_add_acquire)
2820  	return arch_atomic64_fetch_add_acquire(i, v);
2821  #elif defined(arch_atomic64_fetch_add_relaxed)
2822  	s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2823  	__atomic_acquire_fence();
2824  	return ret;
2825  #elif defined(arch_atomic64_fetch_add)
2826  	return arch_atomic64_fetch_add(i, v);
2827  #else
2828  #error "Unable to define raw_atomic64_fetch_add_acquire"
2829  #endif
2830  }
2831  
2832  /**
2833   * raw_atomic64_fetch_add_release() - atomic add with release ordering
2834   * @i: s64 value to add
2835   * @v: pointer to atomic64_t
2836   *
2837   * Atomically updates @v to (@v + @i) with release ordering.
2838   *
2839   * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2840   *
2841   * Return: The original value of @v.
2842   */
2843  static __always_inline s64
raw_atomic64_fetch_add_release(s64 i,atomic64_t * v)2844  raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2845  {
2846  #if defined(arch_atomic64_fetch_add_release)
2847  	return arch_atomic64_fetch_add_release(i, v);
2848  #elif defined(arch_atomic64_fetch_add_relaxed)
2849  	__atomic_release_fence();
2850  	return arch_atomic64_fetch_add_relaxed(i, v);
2851  #elif defined(arch_atomic64_fetch_add)
2852  	return arch_atomic64_fetch_add(i, v);
2853  #else
2854  #error "Unable to define raw_atomic64_fetch_add_release"
2855  #endif
2856  }
2857  
2858  /**
2859   * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2860   * @i: s64 value to add
2861   * @v: pointer to atomic64_t
2862   *
2863   * Atomically updates @v to (@v + @i) with relaxed ordering.
2864   *
2865   * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2866   *
2867   * Return: The original value of @v.
2868   */
2869  static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i,atomic64_t * v)2870  raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2871  {
2872  #if defined(arch_atomic64_fetch_add_relaxed)
2873  	return arch_atomic64_fetch_add_relaxed(i, v);
2874  #elif defined(arch_atomic64_fetch_add)
2875  	return arch_atomic64_fetch_add(i, v);
2876  #else
2877  #error "Unable to define raw_atomic64_fetch_add_relaxed"
2878  #endif
2879  }
2880  
2881  /**
2882   * raw_atomic64_sub() - atomic subtract with relaxed ordering
2883   * @i: s64 value to subtract
2884   * @v: pointer to atomic64_t
2885   *
2886   * Atomically updates @v to (@v - @i) with relaxed ordering.
2887   *
2888   * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2889   *
2890   * Return: Nothing.
2891   */
2892  static __always_inline void
raw_atomic64_sub(s64 i,atomic64_t * v)2893  raw_atomic64_sub(s64 i, atomic64_t *v)
2894  {
2895  	arch_atomic64_sub(i, v);
2896  }
2897  
2898  /**
2899   * raw_atomic64_sub_return() - atomic subtract with full ordering
2900   * @i: s64 value to subtract
2901   * @v: pointer to atomic64_t
2902   *
2903   * Atomically updates @v to (@v - @i) with full ordering.
2904   *
2905   * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2906   *
2907   * Return: The updated value of @v.
2908   */
2909  static __always_inline s64
raw_atomic64_sub_return(s64 i,atomic64_t * v)2910  raw_atomic64_sub_return(s64 i, atomic64_t *v)
2911  {
2912  #if defined(arch_atomic64_sub_return)
2913  	return arch_atomic64_sub_return(i, v);
2914  #elif defined(arch_atomic64_sub_return_relaxed)
2915  	s64 ret;
2916  	__atomic_pre_full_fence();
2917  	ret = arch_atomic64_sub_return_relaxed(i, v);
2918  	__atomic_post_full_fence();
2919  	return ret;
2920  #else
2921  #error "Unable to define raw_atomic64_sub_return"
2922  #endif
2923  }
2924  
2925  /**
2926   * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2927   * @i: s64 value to subtract
2928   * @v: pointer to atomic64_t
2929   *
2930   * Atomically updates @v to (@v - @i) with acquire ordering.
2931   *
2932   * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2933   *
2934   * Return: The updated value of @v.
2935   */
2936  static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i,atomic64_t * v)2937  raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2938  {
2939  #if defined(arch_atomic64_sub_return_acquire)
2940  	return arch_atomic64_sub_return_acquire(i, v);
2941  #elif defined(arch_atomic64_sub_return_relaxed)
2942  	s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2943  	__atomic_acquire_fence();
2944  	return ret;
2945  #elif defined(arch_atomic64_sub_return)
2946  	return arch_atomic64_sub_return(i, v);
2947  #else
2948  #error "Unable to define raw_atomic64_sub_return_acquire"
2949  #endif
2950  }
2951  
2952  /**
2953   * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2954   * @i: s64 value to subtract
2955   * @v: pointer to atomic64_t
2956   *
2957   * Atomically updates @v to (@v - @i) with release ordering.
2958   *
2959   * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2960   *
2961   * Return: The updated value of @v.
2962   */
2963  static __always_inline s64
raw_atomic64_sub_return_release(s64 i,atomic64_t * v)2964  raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2965  {
2966  #if defined(arch_atomic64_sub_return_release)
2967  	return arch_atomic64_sub_return_release(i, v);
2968  #elif defined(arch_atomic64_sub_return_relaxed)
2969  	__atomic_release_fence();
2970  	return arch_atomic64_sub_return_relaxed(i, v);
2971  #elif defined(arch_atomic64_sub_return)
2972  	return arch_atomic64_sub_return(i, v);
2973  #else
2974  #error "Unable to define raw_atomic64_sub_return_release"
2975  #endif
2976  }
2977  
2978  /**
2979   * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2980   * @i: s64 value to subtract
2981   * @v: pointer to atomic64_t
2982   *
2983   * Atomically updates @v to (@v - @i) with relaxed ordering.
2984   *
2985   * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2986   *
2987   * Return: The updated value of @v.
2988   */
2989  static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i,atomic64_t * v)2990  raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2991  {
2992  #if defined(arch_atomic64_sub_return_relaxed)
2993  	return arch_atomic64_sub_return_relaxed(i, v);
2994  #elif defined(arch_atomic64_sub_return)
2995  	return arch_atomic64_sub_return(i, v);
2996  #else
2997  #error "Unable to define raw_atomic64_sub_return_relaxed"
2998  #endif
2999  }
3000  
3001  /**
3002   * raw_atomic64_fetch_sub() - atomic subtract with full ordering
3003   * @i: s64 value to subtract
3004   * @v: pointer to atomic64_t
3005   *
3006   * Atomically updates @v to (@v - @i) with full ordering.
3007   *
3008   * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
3009   *
3010   * Return: The original value of @v.
3011   */
3012  static __always_inline s64
raw_atomic64_fetch_sub(s64 i,atomic64_t * v)3013  raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3014  {
3015  #if defined(arch_atomic64_fetch_sub)
3016  	return arch_atomic64_fetch_sub(i, v);
3017  #elif defined(arch_atomic64_fetch_sub_relaxed)
3018  	s64 ret;
3019  	__atomic_pre_full_fence();
3020  	ret = arch_atomic64_fetch_sub_relaxed(i, v);
3021  	__atomic_post_full_fence();
3022  	return ret;
3023  #else
3024  #error "Unable to define raw_atomic64_fetch_sub"
3025  #endif
3026  }
3027  
3028  /**
3029   * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3030   * @i: s64 value to subtract
3031   * @v: pointer to atomic64_t
3032   *
3033   * Atomically updates @v to (@v - @i) with acquire ordering.
3034   *
3035   * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3036   *
3037   * Return: The original value of @v.
3038   */
3039  static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i,atomic64_t * v)3040  raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3041  {
3042  #if defined(arch_atomic64_fetch_sub_acquire)
3043  	return arch_atomic64_fetch_sub_acquire(i, v);
3044  #elif defined(arch_atomic64_fetch_sub_relaxed)
3045  	s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3046  	__atomic_acquire_fence();
3047  	return ret;
3048  #elif defined(arch_atomic64_fetch_sub)
3049  	return arch_atomic64_fetch_sub(i, v);
3050  #else
3051  #error "Unable to define raw_atomic64_fetch_sub_acquire"
3052  #endif
3053  }
3054  
3055  /**
3056   * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3057   * @i: s64 value to subtract
3058   * @v: pointer to atomic64_t
3059   *
3060   * Atomically updates @v to (@v - @i) with release ordering.
3061   *
3062   * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3063   *
3064   * Return: The original value of @v.
3065   */
3066  static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i,atomic64_t * v)3067  raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3068  {
3069  #if defined(arch_atomic64_fetch_sub_release)
3070  	return arch_atomic64_fetch_sub_release(i, v);
3071  #elif defined(arch_atomic64_fetch_sub_relaxed)
3072  	__atomic_release_fence();
3073  	return arch_atomic64_fetch_sub_relaxed(i, v);
3074  #elif defined(arch_atomic64_fetch_sub)
3075  	return arch_atomic64_fetch_sub(i, v);
3076  #else
3077  #error "Unable to define raw_atomic64_fetch_sub_release"
3078  #endif
3079  }
3080  
3081  /**
3082   * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3083   * @i: s64 value to subtract
3084   * @v: pointer to atomic64_t
3085   *
3086   * Atomically updates @v to (@v - @i) with relaxed ordering.
3087   *
3088   * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3089   *
3090   * Return: The original value of @v.
3091   */
3092  static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i,atomic64_t * v)3093  raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3094  {
3095  #if defined(arch_atomic64_fetch_sub_relaxed)
3096  	return arch_atomic64_fetch_sub_relaxed(i, v);
3097  #elif defined(arch_atomic64_fetch_sub)
3098  	return arch_atomic64_fetch_sub(i, v);
3099  #else
3100  #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3101  #endif
3102  }
3103  
3104  /**
3105   * raw_atomic64_inc() - atomic increment with relaxed ordering
3106   * @v: pointer to atomic64_t
3107   *
3108   * Atomically updates @v to (@v + 1) with relaxed ordering.
3109   *
3110   * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3111   *
3112   * Return: Nothing.
3113   */
3114  static __always_inline void
raw_atomic64_inc(atomic64_t * v)3115  raw_atomic64_inc(atomic64_t *v)
3116  {
3117  #if defined(arch_atomic64_inc)
3118  	arch_atomic64_inc(v);
3119  #else
3120  	raw_atomic64_add(1, v);
3121  #endif
3122  }
3123  
3124  /**
3125   * raw_atomic64_inc_return() - atomic increment with full ordering
3126   * @v: pointer to atomic64_t
3127   *
3128   * Atomically updates @v to (@v + 1) with full ordering.
3129   *
3130   * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3131   *
3132   * Return: The updated value of @v.
3133   */
3134  static __always_inline s64
raw_atomic64_inc_return(atomic64_t * v)3135  raw_atomic64_inc_return(atomic64_t *v)
3136  {
3137  #if defined(arch_atomic64_inc_return)
3138  	return arch_atomic64_inc_return(v);
3139  #elif defined(arch_atomic64_inc_return_relaxed)
3140  	s64 ret;
3141  	__atomic_pre_full_fence();
3142  	ret = arch_atomic64_inc_return_relaxed(v);
3143  	__atomic_post_full_fence();
3144  	return ret;
3145  #else
3146  	return raw_atomic64_add_return(1, v);
3147  #endif
3148  }
3149  
3150  /**
3151   * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3152   * @v: pointer to atomic64_t
3153   *
3154   * Atomically updates @v to (@v + 1) with acquire ordering.
3155   *
3156   * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3157   *
3158   * Return: The updated value of @v.
3159   */
3160  static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t * v)3161  raw_atomic64_inc_return_acquire(atomic64_t *v)
3162  {
3163  #if defined(arch_atomic64_inc_return_acquire)
3164  	return arch_atomic64_inc_return_acquire(v);
3165  #elif defined(arch_atomic64_inc_return_relaxed)
3166  	s64 ret = arch_atomic64_inc_return_relaxed(v);
3167  	__atomic_acquire_fence();
3168  	return ret;
3169  #elif defined(arch_atomic64_inc_return)
3170  	return arch_atomic64_inc_return(v);
3171  #else
3172  	return raw_atomic64_add_return_acquire(1, v);
3173  #endif
3174  }
3175  
3176  /**
3177   * raw_atomic64_inc_return_release() - atomic increment with release ordering
3178   * @v: pointer to atomic64_t
3179   *
3180   * Atomically updates @v to (@v + 1) with release ordering.
3181   *
3182   * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3183   *
3184   * Return: The updated value of @v.
3185   */
3186  static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t * v)3187  raw_atomic64_inc_return_release(atomic64_t *v)
3188  {
3189  #if defined(arch_atomic64_inc_return_release)
3190  	return arch_atomic64_inc_return_release(v);
3191  #elif defined(arch_atomic64_inc_return_relaxed)
3192  	__atomic_release_fence();
3193  	return arch_atomic64_inc_return_relaxed(v);
3194  #elif defined(arch_atomic64_inc_return)
3195  	return arch_atomic64_inc_return(v);
3196  #else
3197  	return raw_atomic64_add_return_release(1, v);
3198  #endif
3199  }
3200  
3201  /**
3202   * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3203   * @v: pointer to atomic64_t
3204   *
3205   * Atomically updates @v to (@v + 1) with relaxed ordering.
3206   *
3207   * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3208   *
3209   * Return: The updated value of @v.
3210   */
3211  static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t * v)3212  raw_atomic64_inc_return_relaxed(atomic64_t *v)
3213  {
3214  #if defined(arch_atomic64_inc_return_relaxed)
3215  	return arch_atomic64_inc_return_relaxed(v);
3216  #elif defined(arch_atomic64_inc_return)
3217  	return arch_atomic64_inc_return(v);
3218  #else
3219  	return raw_atomic64_add_return_relaxed(1, v);
3220  #endif
3221  }
3222  
3223  /**
3224   * raw_atomic64_fetch_inc() - atomic increment with full ordering
3225   * @v: pointer to atomic64_t
3226   *
3227   * Atomically updates @v to (@v + 1) with full ordering.
3228   *
3229   * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3230   *
3231   * Return: The original value of @v.
3232   */
3233  static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t * v)3234  raw_atomic64_fetch_inc(atomic64_t *v)
3235  {
3236  #if defined(arch_atomic64_fetch_inc)
3237  	return arch_atomic64_fetch_inc(v);
3238  #elif defined(arch_atomic64_fetch_inc_relaxed)
3239  	s64 ret;
3240  	__atomic_pre_full_fence();
3241  	ret = arch_atomic64_fetch_inc_relaxed(v);
3242  	__atomic_post_full_fence();
3243  	return ret;
3244  #else
3245  	return raw_atomic64_fetch_add(1, v);
3246  #endif
3247  }
3248  
3249  /**
3250   * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3251   * @v: pointer to atomic64_t
3252   *
3253   * Atomically updates @v to (@v + 1) with acquire ordering.
3254   *
3255   * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3256   *
3257   * Return: The original value of @v.
3258   */
3259  static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t * v)3260  raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3261  {
3262  #if defined(arch_atomic64_fetch_inc_acquire)
3263  	return arch_atomic64_fetch_inc_acquire(v);
3264  #elif defined(arch_atomic64_fetch_inc_relaxed)
3265  	s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3266  	__atomic_acquire_fence();
3267  	return ret;
3268  #elif defined(arch_atomic64_fetch_inc)
3269  	return arch_atomic64_fetch_inc(v);
3270  #else
3271  	return raw_atomic64_fetch_add_acquire(1, v);
3272  #endif
3273  }
3274  
3275  /**
3276   * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3277   * @v: pointer to atomic64_t
3278   *
3279   * Atomically updates @v to (@v + 1) with release ordering.
3280   *
3281   * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3282   *
3283   * Return: The original value of @v.
3284   */
3285  static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t * v)3286  raw_atomic64_fetch_inc_release(atomic64_t *v)
3287  {
3288  #if defined(arch_atomic64_fetch_inc_release)
3289  	return arch_atomic64_fetch_inc_release(v);
3290  #elif defined(arch_atomic64_fetch_inc_relaxed)
3291  	__atomic_release_fence();
3292  	return arch_atomic64_fetch_inc_relaxed(v);
3293  #elif defined(arch_atomic64_fetch_inc)
3294  	return arch_atomic64_fetch_inc(v);
3295  #else
3296  	return raw_atomic64_fetch_add_release(1, v);
3297  #endif
3298  }
3299  
3300  /**
3301   * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3302   * @v: pointer to atomic64_t
3303   *
3304   * Atomically updates @v to (@v + 1) with relaxed ordering.
3305   *
3306   * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3307   *
3308   * Return: The original value of @v.
3309   */
3310  static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t * v)3311  raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3312  {
3313  #if defined(arch_atomic64_fetch_inc_relaxed)
3314  	return arch_atomic64_fetch_inc_relaxed(v);
3315  #elif defined(arch_atomic64_fetch_inc)
3316  	return arch_atomic64_fetch_inc(v);
3317  #else
3318  	return raw_atomic64_fetch_add_relaxed(1, v);
3319  #endif
3320  }
3321  
3322  /**
3323   * raw_atomic64_dec() - atomic decrement with relaxed ordering
3324   * @v: pointer to atomic64_t
3325   *
3326   * Atomically updates @v to (@v - 1) with relaxed ordering.
3327   *
3328   * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3329   *
3330   * Return: Nothing.
3331   */
3332  static __always_inline void
raw_atomic64_dec(atomic64_t * v)3333  raw_atomic64_dec(atomic64_t *v)
3334  {
3335  #if defined(arch_atomic64_dec)
3336  	arch_atomic64_dec(v);
3337  #else
3338  	raw_atomic64_sub(1, v);
3339  #endif
3340  }
3341  
3342  /**
3343   * raw_atomic64_dec_return() - atomic decrement with full ordering
3344   * @v: pointer to atomic64_t
3345   *
3346   * Atomically updates @v to (@v - 1) with full ordering.
3347   *
3348   * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3349   *
3350   * Return: The updated value of @v.
3351   */
3352  static __always_inline s64
raw_atomic64_dec_return(atomic64_t * v)3353  raw_atomic64_dec_return(atomic64_t *v)
3354  {
3355  #if defined(arch_atomic64_dec_return)
3356  	return arch_atomic64_dec_return(v);
3357  #elif defined(arch_atomic64_dec_return_relaxed)
3358  	s64 ret;
3359  	__atomic_pre_full_fence();
3360  	ret = arch_atomic64_dec_return_relaxed(v);
3361  	__atomic_post_full_fence();
3362  	return ret;
3363  #else
3364  	return raw_atomic64_sub_return(1, v);
3365  #endif
3366  }
3367  
3368  /**
3369   * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3370   * @v: pointer to atomic64_t
3371   *
3372   * Atomically updates @v to (@v - 1) with acquire ordering.
3373   *
3374   * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3375   *
3376   * Return: The updated value of @v.
3377   */
3378  static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t * v)3379  raw_atomic64_dec_return_acquire(atomic64_t *v)
3380  {
3381  #if defined(arch_atomic64_dec_return_acquire)
3382  	return arch_atomic64_dec_return_acquire(v);
3383  #elif defined(arch_atomic64_dec_return_relaxed)
3384  	s64 ret = arch_atomic64_dec_return_relaxed(v);
3385  	__atomic_acquire_fence();
3386  	return ret;
3387  #elif defined(arch_atomic64_dec_return)
3388  	return arch_atomic64_dec_return(v);
3389  #else
3390  	return raw_atomic64_sub_return_acquire(1, v);
3391  #endif
3392  }
3393  
3394  /**
3395   * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3396   * @v: pointer to atomic64_t
3397   *
3398   * Atomically updates @v to (@v - 1) with release ordering.
3399   *
3400   * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3401   *
3402   * Return: The updated value of @v.
3403   */
3404  static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t * v)3405  raw_atomic64_dec_return_release(atomic64_t *v)
3406  {
3407  #if defined(arch_atomic64_dec_return_release)
3408  	return arch_atomic64_dec_return_release(v);
3409  #elif defined(arch_atomic64_dec_return_relaxed)
3410  	__atomic_release_fence();
3411  	return arch_atomic64_dec_return_relaxed(v);
3412  #elif defined(arch_atomic64_dec_return)
3413  	return arch_atomic64_dec_return(v);
3414  #else
3415  	return raw_atomic64_sub_return_release(1, v);
3416  #endif
3417  }
3418  
3419  /**
3420   * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3421   * @v: pointer to atomic64_t
3422   *
3423   * Atomically updates @v to (@v - 1) with relaxed ordering.
3424   *
3425   * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3426   *
3427   * Return: The updated value of @v.
3428   */
3429  static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t * v)3430  raw_atomic64_dec_return_relaxed(atomic64_t *v)
3431  {
3432  #if defined(arch_atomic64_dec_return_relaxed)
3433  	return arch_atomic64_dec_return_relaxed(v);
3434  #elif defined(arch_atomic64_dec_return)
3435  	return arch_atomic64_dec_return(v);
3436  #else
3437  	return raw_atomic64_sub_return_relaxed(1, v);
3438  #endif
3439  }
3440  
3441  /**
3442   * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3443   * @v: pointer to atomic64_t
3444   *
3445   * Atomically updates @v to (@v - 1) with full ordering.
3446   *
3447   * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3448   *
3449   * Return: The original value of @v.
3450   */
3451  static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t * v)3452  raw_atomic64_fetch_dec(atomic64_t *v)
3453  {
3454  #if defined(arch_atomic64_fetch_dec)
3455  	return arch_atomic64_fetch_dec(v);
3456  #elif defined(arch_atomic64_fetch_dec_relaxed)
3457  	s64 ret;
3458  	__atomic_pre_full_fence();
3459  	ret = arch_atomic64_fetch_dec_relaxed(v);
3460  	__atomic_post_full_fence();
3461  	return ret;
3462  #else
3463  	return raw_atomic64_fetch_sub(1, v);
3464  #endif
3465  }
3466  
3467  /**
3468   * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3469   * @v: pointer to atomic64_t
3470   *
3471   * Atomically updates @v to (@v - 1) with acquire ordering.
3472   *
3473   * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3474   *
3475   * Return: The original value of @v.
3476   */
3477  static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t * v)3478  raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3479  {
3480  #if defined(arch_atomic64_fetch_dec_acquire)
3481  	return arch_atomic64_fetch_dec_acquire(v);
3482  #elif defined(arch_atomic64_fetch_dec_relaxed)
3483  	s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3484  	__atomic_acquire_fence();
3485  	return ret;
3486  #elif defined(arch_atomic64_fetch_dec)
3487  	return arch_atomic64_fetch_dec(v);
3488  #else
3489  	return raw_atomic64_fetch_sub_acquire(1, v);
3490  #endif
3491  }
3492  
3493  /**
3494   * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3495   * @v: pointer to atomic64_t
3496   *
3497   * Atomically updates @v to (@v - 1) with release ordering.
3498   *
3499   * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3500   *
3501   * Return: The original value of @v.
3502   */
3503  static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t * v)3504  raw_atomic64_fetch_dec_release(atomic64_t *v)
3505  {
3506  #if defined(arch_atomic64_fetch_dec_release)
3507  	return arch_atomic64_fetch_dec_release(v);
3508  #elif defined(arch_atomic64_fetch_dec_relaxed)
3509  	__atomic_release_fence();
3510  	return arch_atomic64_fetch_dec_relaxed(v);
3511  #elif defined(arch_atomic64_fetch_dec)
3512  	return arch_atomic64_fetch_dec(v);
3513  #else
3514  	return raw_atomic64_fetch_sub_release(1, v);
3515  #endif
3516  }
3517  
3518  /**
3519   * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3520   * @v: pointer to atomic64_t
3521   *
3522   * Atomically updates @v to (@v - 1) with relaxed ordering.
3523   *
3524   * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3525   *
3526   * Return: The original value of @v.
3527   */
3528  static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t * v)3529  raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3530  {
3531  #if defined(arch_atomic64_fetch_dec_relaxed)
3532  	return arch_atomic64_fetch_dec_relaxed(v);
3533  #elif defined(arch_atomic64_fetch_dec)
3534  	return arch_atomic64_fetch_dec(v);
3535  #else
3536  	return raw_atomic64_fetch_sub_relaxed(1, v);
3537  #endif
3538  }
3539  
3540  /**
3541   * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3542   * @i: s64 value
3543   * @v: pointer to atomic64_t
3544   *
3545   * Atomically updates @v to (@v & @i) with relaxed ordering.
3546   *
3547   * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3548   *
3549   * Return: Nothing.
3550   */
3551  static __always_inline void
raw_atomic64_and(s64 i,atomic64_t * v)3552  raw_atomic64_and(s64 i, atomic64_t *v)
3553  {
3554  	arch_atomic64_and(i, v);
3555  }
3556  
3557  /**
3558   * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3559   * @i: s64 value
3560   * @v: pointer to atomic64_t
3561   *
3562   * Atomically updates @v to (@v & @i) with full ordering.
3563   *
3564   * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3565   *
3566   * Return: The original value of @v.
3567   */
3568  static __always_inline s64
raw_atomic64_fetch_and(s64 i,atomic64_t * v)3569  raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3570  {
3571  #if defined(arch_atomic64_fetch_and)
3572  	return arch_atomic64_fetch_and(i, v);
3573  #elif defined(arch_atomic64_fetch_and_relaxed)
3574  	s64 ret;
3575  	__atomic_pre_full_fence();
3576  	ret = arch_atomic64_fetch_and_relaxed(i, v);
3577  	__atomic_post_full_fence();
3578  	return ret;
3579  #else
3580  #error "Unable to define raw_atomic64_fetch_and"
3581  #endif
3582  }
3583  
3584  /**
3585   * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3586   * @i: s64 value
3587   * @v: pointer to atomic64_t
3588   *
3589   * Atomically updates @v to (@v & @i) with acquire ordering.
3590   *
3591   * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3592   *
3593   * Return: The original value of @v.
3594   */
3595  static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i,atomic64_t * v)3596  raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3597  {
3598  #if defined(arch_atomic64_fetch_and_acquire)
3599  	return arch_atomic64_fetch_and_acquire(i, v);
3600  #elif defined(arch_atomic64_fetch_and_relaxed)
3601  	s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3602  	__atomic_acquire_fence();
3603  	return ret;
3604  #elif defined(arch_atomic64_fetch_and)
3605  	return arch_atomic64_fetch_and(i, v);
3606  #else
3607  #error "Unable to define raw_atomic64_fetch_and_acquire"
3608  #endif
3609  }
3610  
3611  /**
3612   * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3613   * @i: s64 value
3614   * @v: pointer to atomic64_t
3615   *
3616   * Atomically updates @v to (@v & @i) with release ordering.
3617   *
3618   * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3619   *
3620   * Return: The original value of @v.
3621   */
3622  static __always_inline s64
raw_atomic64_fetch_and_release(s64 i,atomic64_t * v)3623  raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3624  {
3625  #if defined(arch_atomic64_fetch_and_release)
3626  	return arch_atomic64_fetch_and_release(i, v);
3627  #elif defined(arch_atomic64_fetch_and_relaxed)
3628  	__atomic_release_fence();
3629  	return arch_atomic64_fetch_and_relaxed(i, v);
3630  #elif defined(arch_atomic64_fetch_and)
3631  	return arch_atomic64_fetch_and(i, v);
3632  #else
3633  #error "Unable to define raw_atomic64_fetch_and_release"
3634  #endif
3635  }
3636  
3637  /**
3638   * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3639   * @i: s64 value
3640   * @v: pointer to atomic64_t
3641   *
3642   * Atomically updates @v to (@v & @i) with relaxed ordering.
3643   *
3644   * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3645   *
3646   * Return: The original value of @v.
3647   */
3648  static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i,atomic64_t * v)3649  raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3650  {
3651  #if defined(arch_atomic64_fetch_and_relaxed)
3652  	return arch_atomic64_fetch_and_relaxed(i, v);
3653  #elif defined(arch_atomic64_fetch_and)
3654  	return arch_atomic64_fetch_and(i, v);
3655  #else
3656  #error "Unable to define raw_atomic64_fetch_and_relaxed"
3657  #endif
3658  }
3659  
3660  /**
3661   * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3662   * @i: s64 value
3663   * @v: pointer to atomic64_t
3664   *
3665   * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3666   *
3667   * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3668   *
3669   * Return: Nothing.
3670   */
3671  static __always_inline void
raw_atomic64_andnot(s64 i,atomic64_t * v)3672  raw_atomic64_andnot(s64 i, atomic64_t *v)
3673  {
3674  #if defined(arch_atomic64_andnot)
3675  	arch_atomic64_andnot(i, v);
3676  #else
3677  	raw_atomic64_and(~i, v);
3678  #endif
3679  }
3680  
3681  /**
3682   * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3683   * @i: s64 value
3684   * @v: pointer to atomic64_t
3685   *
3686   * Atomically updates @v to (@v & ~@i) with full ordering.
3687   *
3688   * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3689   *
3690   * Return: The original value of @v.
3691   */
3692  static __always_inline s64
raw_atomic64_fetch_andnot(s64 i,atomic64_t * v)3693  raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3694  {
3695  #if defined(arch_atomic64_fetch_andnot)
3696  	return arch_atomic64_fetch_andnot(i, v);
3697  #elif defined(arch_atomic64_fetch_andnot_relaxed)
3698  	s64 ret;
3699  	__atomic_pre_full_fence();
3700  	ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3701  	__atomic_post_full_fence();
3702  	return ret;
3703  #else
3704  	return raw_atomic64_fetch_and(~i, v);
3705  #endif
3706  }
3707  
3708  /**
3709   * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3710   * @i: s64 value
3711   * @v: pointer to atomic64_t
3712   *
3713   * Atomically updates @v to (@v & ~@i) with acquire ordering.
3714   *
3715   * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3716   *
3717   * Return: The original value of @v.
3718   */
3719  static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i,atomic64_t * v)3720  raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3721  {
3722  #if defined(arch_atomic64_fetch_andnot_acquire)
3723  	return arch_atomic64_fetch_andnot_acquire(i, v);
3724  #elif defined(arch_atomic64_fetch_andnot_relaxed)
3725  	s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3726  	__atomic_acquire_fence();
3727  	return ret;
3728  #elif defined(arch_atomic64_fetch_andnot)
3729  	return arch_atomic64_fetch_andnot(i, v);
3730  #else
3731  	return raw_atomic64_fetch_and_acquire(~i, v);
3732  #endif
3733  }
3734  
3735  /**
3736   * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3737   * @i: s64 value
3738   * @v: pointer to atomic64_t
3739   *
3740   * Atomically updates @v to (@v & ~@i) with release ordering.
3741   *
3742   * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3743   *
3744   * Return: The original value of @v.
3745   */
3746  static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i,atomic64_t * v)3747  raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3748  {
3749  #if defined(arch_atomic64_fetch_andnot_release)
3750  	return arch_atomic64_fetch_andnot_release(i, v);
3751  #elif defined(arch_atomic64_fetch_andnot_relaxed)
3752  	__atomic_release_fence();
3753  	return arch_atomic64_fetch_andnot_relaxed(i, v);
3754  #elif defined(arch_atomic64_fetch_andnot)
3755  	return arch_atomic64_fetch_andnot(i, v);
3756  #else
3757  	return raw_atomic64_fetch_and_release(~i, v);
3758  #endif
3759  }
3760  
3761  /**
3762   * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3763   * @i: s64 value
3764   * @v: pointer to atomic64_t
3765   *
3766   * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3767   *
3768   * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3769   *
3770   * Return: The original value of @v.
3771   */
3772  static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i,atomic64_t * v)3773  raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3774  {
3775  #if defined(arch_atomic64_fetch_andnot_relaxed)
3776  	return arch_atomic64_fetch_andnot_relaxed(i, v);
3777  #elif defined(arch_atomic64_fetch_andnot)
3778  	return arch_atomic64_fetch_andnot(i, v);
3779  #else
3780  	return raw_atomic64_fetch_and_relaxed(~i, v);
3781  #endif
3782  }
3783  
3784  /**
3785   * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3786   * @i: s64 value
3787   * @v: pointer to atomic64_t
3788   *
3789   * Atomically updates @v to (@v | @i) with relaxed ordering.
3790   *
3791   * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3792   *
3793   * Return: Nothing.
3794   */
3795  static __always_inline void
raw_atomic64_or(s64 i,atomic64_t * v)3796  raw_atomic64_or(s64 i, atomic64_t *v)
3797  {
3798  	arch_atomic64_or(i, v);
3799  }
3800  
3801  /**
3802   * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3803   * @i: s64 value
3804   * @v: pointer to atomic64_t
3805   *
3806   * Atomically updates @v to (@v | @i) with full ordering.
3807   *
3808   * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3809   *
3810   * Return: The original value of @v.
3811   */
3812  static __always_inline s64
raw_atomic64_fetch_or(s64 i,atomic64_t * v)3813  raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3814  {
3815  #if defined(arch_atomic64_fetch_or)
3816  	return arch_atomic64_fetch_or(i, v);
3817  #elif defined(arch_atomic64_fetch_or_relaxed)
3818  	s64 ret;
3819  	__atomic_pre_full_fence();
3820  	ret = arch_atomic64_fetch_or_relaxed(i, v);
3821  	__atomic_post_full_fence();
3822  	return ret;
3823  #else
3824  #error "Unable to define raw_atomic64_fetch_or"
3825  #endif
3826  }
3827  
3828  /**
3829   * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3830   * @i: s64 value
3831   * @v: pointer to atomic64_t
3832   *
3833   * Atomically updates @v to (@v | @i) with acquire ordering.
3834   *
3835   * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3836   *
3837   * Return: The original value of @v.
3838   */
3839  static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i,atomic64_t * v)3840  raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3841  {
3842  #if defined(arch_atomic64_fetch_or_acquire)
3843  	return arch_atomic64_fetch_or_acquire(i, v);
3844  #elif defined(arch_atomic64_fetch_or_relaxed)
3845  	s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3846  	__atomic_acquire_fence();
3847  	return ret;
3848  #elif defined(arch_atomic64_fetch_or)
3849  	return arch_atomic64_fetch_or(i, v);
3850  #else
3851  #error "Unable to define raw_atomic64_fetch_or_acquire"
3852  #endif
3853  }
3854  
3855  /**
3856   * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3857   * @i: s64 value
3858   * @v: pointer to atomic64_t
3859   *
3860   * Atomically updates @v to (@v | @i) with release ordering.
3861   *
3862   * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3863   *
3864   * Return: The original value of @v.
3865   */
3866  static __always_inline s64
raw_atomic64_fetch_or_release(s64 i,atomic64_t * v)3867  raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3868  {
3869  #if defined(arch_atomic64_fetch_or_release)
3870  	return arch_atomic64_fetch_or_release(i, v);
3871  #elif defined(arch_atomic64_fetch_or_relaxed)
3872  	__atomic_release_fence();
3873  	return arch_atomic64_fetch_or_relaxed(i, v);
3874  #elif defined(arch_atomic64_fetch_or)
3875  	return arch_atomic64_fetch_or(i, v);
3876  #else
3877  #error "Unable to define raw_atomic64_fetch_or_release"
3878  #endif
3879  }
3880  
3881  /**
3882   * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3883   * @i: s64 value
3884   * @v: pointer to atomic64_t
3885   *
3886   * Atomically updates @v to (@v | @i) with relaxed ordering.
3887   *
3888   * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3889   *
3890   * Return: The original value of @v.
3891   */
3892  static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i,atomic64_t * v)3893  raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3894  {
3895  #if defined(arch_atomic64_fetch_or_relaxed)
3896  	return arch_atomic64_fetch_or_relaxed(i, v);
3897  #elif defined(arch_atomic64_fetch_or)
3898  	return arch_atomic64_fetch_or(i, v);
3899  #else
3900  #error "Unable to define raw_atomic64_fetch_or_relaxed"
3901  #endif
3902  }
3903  
3904  /**
3905   * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3906   * @i: s64 value
3907   * @v: pointer to atomic64_t
3908   *
3909   * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3910   *
3911   * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3912   *
3913   * Return: Nothing.
3914   */
3915  static __always_inline void
raw_atomic64_xor(s64 i,atomic64_t * v)3916  raw_atomic64_xor(s64 i, atomic64_t *v)
3917  {
3918  	arch_atomic64_xor(i, v);
3919  }
3920  
3921  /**
3922   * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3923   * @i: s64 value
3924   * @v: pointer to atomic64_t
3925   *
3926   * Atomically updates @v to (@v ^ @i) with full ordering.
3927   *
3928   * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3929   *
3930   * Return: The original value of @v.
3931   */
3932  static __always_inline s64
raw_atomic64_fetch_xor(s64 i,atomic64_t * v)3933  raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3934  {
3935  #if defined(arch_atomic64_fetch_xor)
3936  	return arch_atomic64_fetch_xor(i, v);
3937  #elif defined(arch_atomic64_fetch_xor_relaxed)
3938  	s64 ret;
3939  	__atomic_pre_full_fence();
3940  	ret = arch_atomic64_fetch_xor_relaxed(i, v);
3941  	__atomic_post_full_fence();
3942  	return ret;
3943  #else
3944  #error "Unable to define raw_atomic64_fetch_xor"
3945  #endif
3946  }
3947  
3948  /**
3949   * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3950   * @i: s64 value
3951   * @v: pointer to atomic64_t
3952   *
3953   * Atomically updates @v to (@v ^ @i) with acquire ordering.
3954   *
3955   * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3956   *
3957   * Return: The original value of @v.
3958   */
3959  static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i,atomic64_t * v)3960  raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3961  {
3962  #if defined(arch_atomic64_fetch_xor_acquire)
3963  	return arch_atomic64_fetch_xor_acquire(i, v);
3964  #elif defined(arch_atomic64_fetch_xor_relaxed)
3965  	s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3966  	__atomic_acquire_fence();
3967  	return ret;
3968  #elif defined(arch_atomic64_fetch_xor)
3969  	return arch_atomic64_fetch_xor(i, v);
3970  #else
3971  #error "Unable to define raw_atomic64_fetch_xor_acquire"
3972  #endif
3973  }
3974  
3975  /**
3976   * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3977   * @i: s64 value
3978   * @v: pointer to atomic64_t
3979   *
3980   * Atomically updates @v to (@v ^ @i) with release ordering.
3981   *
3982   * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3983   *
3984   * Return: The original value of @v.
3985   */
3986  static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i,atomic64_t * v)3987  raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3988  {
3989  #if defined(arch_atomic64_fetch_xor_release)
3990  	return arch_atomic64_fetch_xor_release(i, v);
3991  #elif defined(arch_atomic64_fetch_xor_relaxed)
3992  	__atomic_release_fence();
3993  	return arch_atomic64_fetch_xor_relaxed(i, v);
3994  #elif defined(arch_atomic64_fetch_xor)
3995  	return arch_atomic64_fetch_xor(i, v);
3996  #else
3997  #error "Unable to define raw_atomic64_fetch_xor_release"
3998  #endif
3999  }
4000  
4001  /**
4002   * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
4003   * @i: s64 value
4004   * @v: pointer to atomic64_t
4005   *
4006   * Atomically updates @v to (@v ^ @i) with relaxed ordering.
4007   *
4008   * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
4009   *
4010   * Return: The original value of @v.
4011   */
4012  static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i,atomic64_t * v)4013  raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4014  {
4015  #if defined(arch_atomic64_fetch_xor_relaxed)
4016  	return arch_atomic64_fetch_xor_relaxed(i, v);
4017  #elif defined(arch_atomic64_fetch_xor)
4018  	return arch_atomic64_fetch_xor(i, v);
4019  #else
4020  #error "Unable to define raw_atomic64_fetch_xor_relaxed"
4021  #endif
4022  }
4023  
4024  /**
4025   * raw_atomic64_xchg() - atomic exchange with full ordering
4026   * @v: pointer to atomic64_t
4027   * @new: s64 value to assign
4028   *
4029   * Atomically updates @v to @new with full ordering.
4030   *
4031   * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4032   *
4033   * Return: The original value of @v.
4034   */
4035  static __always_inline s64
raw_atomic64_xchg(atomic64_t * v,s64 new)4036  raw_atomic64_xchg(atomic64_t *v, s64 new)
4037  {
4038  #if defined(arch_atomic64_xchg)
4039  	return arch_atomic64_xchg(v, new);
4040  #elif defined(arch_atomic64_xchg_relaxed)
4041  	s64 ret;
4042  	__atomic_pre_full_fence();
4043  	ret = arch_atomic64_xchg_relaxed(v, new);
4044  	__atomic_post_full_fence();
4045  	return ret;
4046  #else
4047  	return raw_xchg(&v->counter, new);
4048  #endif
4049  }
4050  
4051  /**
4052   * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4053   * @v: pointer to atomic64_t
4054   * @new: s64 value to assign
4055   *
4056   * Atomically updates @v to @new with acquire ordering.
4057   *
4058   * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4059   *
4060   * Return: The original value of @v.
4061   */
4062  static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t * v,s64 new)4063  raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4064  {
4065  #if defined(arch_atomic64_xchg_acquire)
4066  	return arch_atomic64_xchg_acquire(v, new);
4067  #elif defined(arch_atomic64_xchg_relaxed)
4068  	s64 ret = arch_atomic64_xchg_relaxed(v, new);
4069  	__atomic_acquire_fence();
4070  	return ret;
4071  #elif defined(arch_atomic64_xchg)
4072  	return arch_atomic64_xchg(v, new);
4073  #else
4074  	return raw_xchg_acquire(&v->counter, new);
4075  #endif
4076  }
4077  
4078  /**
4079   * raw_atomic64_xchg_release() - atomic exchange with release ordering
4080   * @v: pointer to atomic64_t
4081   * @new: s64 value to assign
4082   *
4083   * Atomically updates @v to @new with release ordering.
4084   *
4085   * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4086   *
4087   * Return: The original value of @v.
4088   */
4089  static __always_inline s64
raw_atomic64_xchg_release(atomic64_t * v,s64 new)4090  raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4091  {
4092  #if defined(arch_atomic64_xchg_release)
4093  	return arch_atomic64_xchg_release(v, new);
4094  #elif defined(arch_atomic64_xchg_relaxed)
4095  	__atomic_release_fence();
4096  	return arch_atomic64_xchg_relaxed(v, new);
4097  #elif defined(arch_atomic64_xchg)
4098  	return arch_atomic64_xchg(v, new);
4099  #else
4100  	return raw_xchg_release(&v->counter, new);
4101  #endif
4102  }
4103  
4104  /**
4105   * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4106   * @v: pointer to atomic64_t
4107   * @new: s64 value to assign
4108   *
4109   * Atomically updates @v to @new with relaxed ordering.
4110   *
4111   * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4112   *
4113   * Return: The original value of @v.
4114   */
4115  static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t * v,s64 new)4116  raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4117  {
4118  #if defined(arch_atomic64_xchg_relaxed)
4119  	return arch_atomic64_xchg_relaxed(v, new);
4120  #elif defined(arch_atomic64_xchg)
4121  	return arch_atomic64_xchg(v, new);
4122  #else
4123  	return raw_xchg_relaxed(&v->counter, new);
4124  #endif
4125  }
4126  
4127  /**
4128   * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4129   * @v: pointer to atomic64_t
4130   * @old: s64 value to compare with
4131   * @new: s64 value to assign
4132   *
4133   * If (@v == @old), atomically updates @v to @new with full ordering.
4134   * Otherwise, @v is not modified and relaxed ordering is provided.
4135   *
4136   * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4137   *
4138   * Return: The original value of @v.
4139   */
4140  static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)4141  raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4142  {
4143  #if defined(arch_atomic64_cmpxchg)
4144  	return arch_atomic64_cmpxchg(v, old, new);
4145  #elif defined(arch_atomic64_cmpxchg_relaxed)
4146  	s64 ret;
4147  	__atomic_pre_full_fence();
4148  	ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4149  	__atomic_post_full_fence();
4150  	return ret;
4151  #else
4152  	return raw_cmpxchg(&v->counter, old, new);
4153  #endif
4154  }
4155  
4156  /**
4157   * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4158   * @v: pointer to atomic64_t
4159   * @old: s64 value to compare with
4160   * @new: s64 value to assign
4161   *
4162   * If (@v == @old), atomically updates @v to @new with acquire ordering.
4163   * Otherwise, @v is not modified and relaxed ordering is provided.
4164   *
4165   * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4166   *
4167   * Return: The original value of @v.
4168   */
4169  static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t * v,s64 old,s64 new)4170  raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4171  {
4172  #if defined(arch_atomic64_cmpxchg_acquire)
4173  	return arch_atomic64_cmpxchg_acquire(v, old, new);
4174  #elif defined(arch_atomic64_cmpxchg_relaxed)
4175  	s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4176  	__atomic_acquire_fence();
4177  	return ret;
4178  #elif defined(arch_atomic64_cmpxchg)
4179  	return arch_atomic64_cmpxchg(v, old, new);
4180  #else
4181  	return raw_cmpxchg_acquire(&v->counter, old, new);
4182  #endif
4183  }
4184  
4185  /**
4186   * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4187   * @v: pointer to atomic64_t
4188   * @old: s64 value to compare with
4189   * @new: s64 value to assign
4190   *
4191   * If (@v == @old), atomically updates @v to @new with release ordering.
4192   * Otherwise, @v is not modified and relaxed ordering is provided.
4193   *
4194   * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4195   *
4196   * Return: The original value of @v.
4197   */
4198  static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t * v,s64 old,s64 new)4199  raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4200  {
4201  #if defined(arch_atomic64_cmpxchg_release)
4202  	return arch_atomic64_cmpxchg_release(v, old, new);
4203  #elif defined(arch_atomic64_cmpxchg_relaxed)
4204  	__atomic_release_fence();
4205  	return arch_atomic64_cmpxchg_relaxed(v, old, new);
4206  #elif defined(arch_atomic64_cmpxchg)
4207  	return arch_atomic64_cmpxchg(v, old, new);
4208  #else
4209  	return raw_cmpxchg_release(&v->counter, old, new);
4210  #endif
4211  }
4212  
4213  /**
4214   * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4215   * @v: pointer to atomic64_t
4216   * @old: s64 value to compare with
4217   * @new: s64 value to assign
4218   *
4219   * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4220   * Otherwise, @v is not modified and relaxed ordering is provided.
4221   *
4222   * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4223   *
4224   * Return: The original value of @v.
4225   */
4226  static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t * v,s64 old,s64 new)4227  raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4228  {
4229  #if defined(arch_atomic64_cmpxchg_relaxed)
4230  	return arch_atomic64_cmpxchg_relaxed(v, old, new);
4231  #elif defined(arch_atomic64_cmpxchg)
4232  	return arch_atomic64_cmpxchg(v, old, new);
4233  #else
4234  	return raw_cmpxchg_relaxed(&v->counter, old, new);
4235  #endif
4236  }
4237  
4238  /**
4239   * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4240   * @v: pointer to atomic64_t
4241   * @old: pointer to s64 value to compare with
4242   * @new: s64 value to assign
4243   *
4244   * If (@v == @old), atomically updates @v to @new with full ordering.
4245   * Otherwise, @v is not modified, @old is updated to the current value of @v,
4246   * and relaxed ordering is provided.
4247   *
4248   * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4249   *
4250   * Return: @true if the exchange occured, @false otherwise.
4251   */
4252  static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)4253  raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4254  {
4255  #if defined(arch_atomic64_try_cmpxchg)
4256  	return arch_atomic64_try_cmpxchg(v, old, new);
4257  #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4258  	bool ret;
4259  	__atomic_pre_full_fence();
4260  	ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4261  	__atomic_post_full_fence();
4262  	return ret;
4263  #else
4264  	s64 r, o = *old;
4265  	r = raw_atomic64_cmpxchg(v, o, new);
4266  	if (unlikely(r != o))
4267  		*old = r;
4268  	return likely(r == o);
4269  #endif
4270  }
4271  
4272  /**
4273   * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4274   * @v: pointer to atomic64_t
4275   * @old: pointer to s64 value to compare with
4276   * @new: s64 value to assign
4277   *
4278   * If (@v == @old), atomically updates @v to @new with acquire ordering.
4279   * Otherwise, @v is not modified, @old is updated to the current value of @v,
4280   * and relaxed ordering is provided.
4281   *
4282   * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4283   *
4284   * Return: @true if the exchange occured, @false otherwise.
4285   */
4286  static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t * v,s64 * old,s64 new)4287  raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4288  {
4289  #if defined(arch_atomic64_try_cmpxchg_acquire)
4290  	return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4291  #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4292  	bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4293  	__atomic_acquire_fence();
4294  	return ret;
4295  #elif defined(arch_atomic64_try_cmpxchg)
4296  	return arch_atomic64_try_cmpxchg(v, old, new);
4297  #else
4298  	s64 r, o = *old;
4299  	r = raw_atomic64_cmpxchg_acquire(v, o, new);
4300  	if (unlikely(r != o))
4301  		*old = r;
4302  	return likely(r == o);
4303  #endif
4304  }
4305  
4306  /**
4307   * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4308   * @v: pointer to atomic64_t
4309   * @old: pointer to s64 value to compare with
4310   * @new: s64 value to assign
4311   *
4312   * If (@v == @old), atomically updates @v to @new with release ordering.
4313   * Otherwise, @v is not modified, @old is updated to the current value of @v,
4314   * and relaxed ordering is provided.
4315   *
4316   * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4317   *
4318   * Return: @true if the exchange occured, @false otherwise.
4319   */
4320  static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t * v,s64 * old,s64 new)4321  raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4322  {
4323  #if defined(arch_atomic64_try_cmpxchg_release)
4324  	return arch_atomic64_try_cmpxchg_release(v, old, new);
4325  #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4326  	__atomic_release_fence();
4327  	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4328  #elif defined(arch_atomic64_try_cmpxchg)
4329  	return arch_atomic64_try_cmpxchg(v, old, new);
4330  #else
4331  	s64 r, o = *old;
4332  	r = raw_atomic64_cmpxchg_release(v, o, new);
4333  	if (unlikely(r != o))
4334  		*old = r;
4335  	return likely(r == o);
4336  #endif
4337  }
4338  
4339  /**
4340   * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4341   * @v: pointer to atomic64_t
4342   * @old: pointer to s64 value to compare with
4343   * @new: s64 value to assign
4344   *
4345   * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4346   * Otherwise, @v is not modified, @old is updated to the current value of @v,
4347   * and relaxed ordering is provided.
4348   *
4349   * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4350   *
4351   * Return: @true if the exchange occured, @false otherwise.
4352   */
4353  static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t * v,s64 * old,s64 new)4354  raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4355  {
4356  #if defined(arch_atomic64_try_cmpxchg_relaxed)
4357  	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4358  #elif defined(arch_atomic64_try_cmpxchg)
4359  	return arch_atomic64_try_cmpxchg(v, old, new);
4360  #else
4361  	s64 r, o = *old;
4362  	r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4363  	if (unlikely(r != o))
4364  		*old = r;
4365  	return likely(r == o);
4366  #endif
4367  }
4368  
4369  /**
4370   * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4371   * @i: s64 value to subtract
4372   * @v: pointer to atomic64_t
4373   *
4374   * Atomically updates @v to (@v - @i) with full ordering.
4375   *
4376   * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4377   *
4378   * Return: @true if the resulting value of @v is zero, @false otherwise.
4379   */
4380  static __always_inline bool
raw_atomic64_sub_and_test(s64 i,atomic64_t * v)4381  raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4382  {
4383  #if defined(arch_atomic64_sub_and_test)
4384  	return arch_atomic64_sub_and_test(i, v);
4385  #else
4386  	return raw_atomic64_sub_return(i, v) == 0;
4387  #endif
4388  }
4389  
4390  /**
4391   * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4392   * @v: pointer to atomic64_t
4393   *
4394   * Atomically updates @v to (@v - 1) with full ordering.
4395   *
4396   * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4397   *
4398   * Return: @true if the resulting value of @v is zero, @false otherwise.
4399   */
4400  static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t * v)4401  raw_atomic64_dec_and_test(atomic64_t *v)
4402  {
4403  #if defined(arch_atomic64_dec_and_test)
4404  	return arch_atomic64_dec_and_test(v);
4405  #else
4406  	return raw_atomic64_dec_return(v) == 0;
4407  #endif
4408  }
4409  
4410  /**
4411   * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4412   * @v: pointer to atomic64_t
4413   *
4414   * Atomically updates @v to (@v + 1) with full ordering.
4415   *
4416   * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4417   *
4418   * Return: @true if the resulting value of @v is zero, @false otherwise.
4419   */
4420  static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t * v)4421  raw_atomic64_inc_and_test(atomic64_t *v)
4422  {
4423  #if defined(arch_atomic64_inc_and_test)
4424  	return arch_atomic64_inc_and_test(v);
4425  #else
4426  	return raw_atomic64_inc_return(v) == 0;
4427  #endif
4428  }
4429  
4430  /**
4431   * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4432   * @i: s64 value to add
4433   * @v: pointer to atomic64_t
4434   *
4435   * Atomically updates @v to (@v + @i) with full ordering.
4436   *
4437   * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4438   *
4439   * Return: @true if the resulting value of @v is negative, @false otherwise.
4440   */
4441  static __always_inline bool
raw_atomic64_add_negative(s64 i,atomic64_t * v)4442  raw_atomic64_add_negative(s64 i, atomic64_t *v)
4443  {
4444  #if defined(arch_atomic64_add_negative)
4445  	return arch_atomic64_add_negative(i, v);
4446  #elif defined(arch_atomic64_add_negative_relaxed)
4447  	bool ret;
4448  	__atomic_pre_full_fence();
4449  	ret = arch_atomic64_add_negative_relaxed(i, v);
4450  	__atomic_post_full_fence();
4451  	return ret;
4452  #else
4453  	return raw_atomic64_add_return(i, v) < 0;
4454  #endif
4455  }
4456  
4457  /**
4458   * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4459   * @i: s64 value to add
4460   * @v: pointer to atomic64_t
4461   *
4462   * Atomically updates @v to (@v + @i) with acquire ordering.
4463   *
4464   * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4465   *
4466   * Return: @true if the resulting value of @v is negative, @false otherwise.
4467   */
4468  static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i,atomic64_t * v)4469  raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4470  {
4471  #if defined(arch_atomic64_add_negative_acquire)
4472  	return arch_atomic64_add_negative_acquire(i, v);
4473  #elif defined(arch_atomic64_add_negative_relaxed)
4474  	bool ret = arch_atomic64_add_negative_relaxed(i, v);
4475  	__atomic_acquire_fence();
4476  	return ret;
4477  #elif defined(arch_atomic64_add_negative)
4478  	return arch_atomic64_add_negative(i, v);
4479  #else
4480  	return raw_atomic64_add_return_acquire(i, v) < 0;
4481  #endif
4482  }
4483  
4484  /**
4485   * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4486   * @i: s64 value to add
4487   * @v: pointer to atomic64_t
4488   *
4489   * Atomically updates @v to (@v + @i) with release ordering.
4490   *
4491   * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4492   *
4493   * Return: @true if the resulting value of @v is negative, @false otherwise.
4494   */
4495  static __always_inline bool
raw_atomic64_add_negative_release(s64 i,atomic64_t * v)4496  raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4497  {
4498  #if defined(arch_atomic64_add_negative_release)
4499  	return arch_atomic64_add_negative_release(i, v);
4500  #elif defined(arch_atomic64_add_negative_relaxed)
4501  	__atomic_release_fence();
4502  	return arch_atomic64_add_negative_relaxed(i, v);
4503  #elif defined(arch_atomic64_add_negative)
4504  	return arch_atomic64_add_negative(i, v);
4505  #else
4506  	return raw_atomic64_add_return_release(i, v) < 0;
4507  #endif
4508  }
4509  
4510  /**
4511   * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4512   * @i: s64 value to add
4513   * @v: pointer to atomic64_t
4514   *
4515   * Atomically updates @v to (@v + @i) with relaxed ordering.
4516   *
4517   * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4518   *
4519   * Return: @true if the resulting value of @v is negative, @false otherwise.
4520   */
4521  static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i,atomic64_t * v)4522  raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4523  {
4524  #if defined(arch_atomic64_add_negative_relaxed)
4525  	return arch_atomic64_add_negative_relaxed(i, v);
4526  #elif defined(arch_atomic64_add_negative)
4527  	return arch_atomic64_add_negative(i, v);
4528  #else
4529  	return raw_atomic64_add_return_relaxed(i, v) < 0;
4530  #endif
4531  }
4532  
4533  /**
4534   * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4535   * @v: pointer to atomic64_t
4536   * @a: s64 value to add
4537   * @u: s64 value to compare with
4538   *
4539   * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4540   * Otherwise, @v is not modified and relaxed ordering is provided.
4541   *
4542   * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4543   *
4544   * Return: The original value of @v.
4545   */
4546  static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)4547  raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4548  {
4549  #if defined(arch_atomic64_fetch_add_unless)
4550  	return arch_atomic64_fetch_add_unless(v, a, u);
4551  #else
4552  	s64 c = raw_atomic64_read(v);
4553  
4554  	do {
4555  		if (unlikely(c == u))
4556  			break;
4557  	} while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4558  
4559  	return c;
4560  #endif
4561  }
4562  
4563  /**
4564   * raw_atomic64_add_unless() - atomic add unless value with full ordering
4565   * @v: pointer to atomic64_t
4566   * @a: s64 value to add
4567   * @u: s64 value to compare with
4568   *
4569   * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4570   * Otherwise, @v is not modified and relaxed ordering is provided.
4571   *
4572   * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4573   *
4574   * Return: @true if @v was updated, @false otherwise.
4575   */
4576  static __always_inline bool
raw_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)4577  raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4578  {
4579  #if defined(arch_atomic64_add_unless)
4580  	return arch_atomic64_add_unless(v, a, u);
4581  #else
4582  	return raw_atomic64_fetch_add_unless(v, a, u) != u;
4583  #endif
4584  }
4585  
4586  /**
4587   * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4588   * @v: pointer to atomic64_t
4589   *
4590   * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4591   * Otherwise, @v is not modified and relaxed ordering is provided.
4592   *
4593   * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4594   *
4595   * Return: @true if @v was updated, @false otherwise.
4596   */
4597  static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t * v)4598  raw_atomic64_inc_not_zero(atomic64_t *v)
4599  {
4600  #if defined(arch_atomic64_inc_not_zero)
4601  	return arch_atomic64_inc_not_zero(v);
4602  #else
4603  	return raw_atomic64_add_unless(v, 1, 0);
4604  #endif
4605  }
4606  
4607  /**
4608   * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4609   * @v: pointer to atomic64_t
4610   *
4611   * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4612   * Otherwise, @v is not modified and relaxed ordering is provided.
4613   *
4614   * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4615   *
4616   * Return: @true if @v was updated, @false otherwise.
4617   */
4618  static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t * v)4619  raw_atomic64_inc_unless_negative(atomic64_t *v)
4620  {
4621  #if defined(arch_atomic64_inc_unless_negative)
4622  	return arch_atomic64_inc_unless_negative(v);
4623  #else
4624  	s64 c = raw_atomic64_read(v);
4625  
4626  	do {
4627  		if (unlikely(c < 0))
4628  			return false;
4629  	} while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4630  
4631  	return true;
4632  #endif
4633  }
4634  
4635  /**
4636   * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4637   * @v: pointer to atomic64_t
4638   *
4639   * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4640   * Otherwise, @v is not modified and relaxed ordering is provided.
4641   *
4642   * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4643   *
4644   * Return: @true if @v was updated, @false otherwise.
4645   */
4646  static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t * v)4647  raw_atomic64_dec_unless_positive(atomic64_t *v)
4648  {
4649  #if defined(arch_atomic64_dec_unless_positive)
4650  	return arch_atomic64_dec_unless_positive(v);
4651  #else
4652  	s64 c = raw_atomic64_read(v);
4653  
4654  	do {
4655  		if (unlikely(c > 0))
4656  			return false;
4657  	} while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4658  
4659  	return true;
4660  #endif
4661  }
4662  
4663  /**
4664   * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4665   * @v: pointer to atomic64_t
4666   *
4667   * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4668   * Otherwise, @v is not modified and relaxed ordering is provided.
4669   *
4670   * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4671   *
4672   * Return: The old value of (@v - 1), regardless of whether @v was updated.
4673   */
4674  static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t * v)4675  raw_atomic64_dec_if_positive(atomic64_t *v)
4676  {
4677  #if defined(arch_atomic64_dec_if_positive)
4678  	return arch_atomic64_dec_if_positive(v);
4679  #else
4680  	s64 dec, c = raw_atomic64_read(v);
4681  
4682  	do {
4683  		dec = c - 1;
4684  		if (unlikely(dec < 0))
4685  			break;
4686  	} while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4687  
4688  	return dec;
4689  #endif
4690  }
4691  
4692  #endif /* _LINUX_ATOMIC_FALLBACK_H */
4693  // b565db590afeeff0d7c9485ccbca5bb6e155749f
4694