1  /* SPDX-License-Identifier: GPL-2.0+ */
2  #ifndef _LINUX_XARRAY_H
3  #define _LINUX_XARRAY_H
4  /*
5   * eXtensible Arrays
6   * Copyright (c) 2017 Microsoft Corporation
7   * Author: Matthew Wilcox <willy@infradead.org>
8   *
9   * See Documentation/core-api/xarray.rst for how to use the XArray.
10   */
11  
12  #include <linux/bitmap.h>
13  #include <linux/bug.h>
14  #include <linux/compiler.h>
15  #include <linux/err.h>
16  #include <linux/gfp.h>
17  #include <linux/kconfig.h>
18  #include <linux/limits.h>
19  #include <linux/lockdep.h>
20  #include <linux/rcupdate.h>
21  #include <linux/sched/mm.h>
22  #include <linux/spinlock.h>
23  #include <linux/types.h>
24  
25  struct list_lru;
26  
27  /*
28   * The bottom two bits of the entry determine how the XArray interprets
29   * the contents:
30   *
31   * 00: Pointer entry
32   * 10: Internal entry
33   * x1: Value entry or tagged pointer
34   *
35   * Attempting to store internal entries in the XArray is a bug.
36   *
37   * Most internal entries are pointers to the next node in the tree.
38   * The following internal entries have a special meaning:
39   *
40   * 0-62: Sibling entries
41   * 256: Retry entry
42   * 257: Zero entry
43   *
44   * Errors are also represented as internal entries, but use the negative
45   * space (-4094 to -2).  They're never stored in the slots array; only
46   * returned by the normal API.
47   */
48  
49  #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
50  
51  /**
52   * xa_mk_value() - Create an XArray entry from an integer.
53   * @v: Value to store in XArray.
54   *
55   * Context: Any context.
56   * Return: An entry suitable for storing in the XArray.
57   */
xa_mk_value(unsigned long v)58  static inline void *xa_mk_value(unsigned long v)
59  {
60  	WARN_ON((long)v < 0);
61  	return (void *)((v << 1) | 1);
62  }
63  
64  /**
65   * xa_to_value() - Get value stored in an XArray entry.
66   * @entry: XArray entry.
67   *
68   * Context: Any context.
69   * Return: The value stored in the XArray entry.
70   */
xa_to_value(const void * entry)71  static inline unsigned long xa_to_value(const void *entry)
72  {
73  	return (unsigned long)entry >> 1;
74  }
75  
76  /**
77   * xa_is_value() - Determine if an entry is a value.
78   * @entry: XArray entry.
79   *
80   * Context: Any context.
81   * Return: True if the entry is a value, false if it is a pointer.
82   */
xa_is_value(const void * entry)83  static inline bool xa_is_value(const void *entry)
84  {
85  	return (unsigned long)entry & 1;
86  }
87  
88  /**
89   * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
90   * @p: Plain pointer.
91   * @tag: Tag value (0, 1 or 3).
92   *
93   * If the user of the XArray prefers, they can tag their pointers instead
94   * of storing value entries.  Three tags are available (0, 1 and 3).
95   * These are distinct from the xa_mark_t as they are not replicated up
96   * through the array and cannot be searched for.
97   *
98   * Context: Any context.
99   * Return: An XArray entry.
100   */
xa_tag_pointer(void * p,unsigned long tag)101  static inline void *xa_tag_pointer(void *p, unsigned long tag)
102  {
103  	return (void *)((unsigned long)p | tag);
104  }
105  
106  /**
107   * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
108   * @entry: XArray entry.
109   *
110   * If you have stored a tagged pointer in the XArray, call this function
111   * to get the untagged version of the pointer.
112   *
113   * Context: Any context.
114   * Return: A pointer.
115   */
xa_untag_pointer(void * entry)116  static inline void *xa_untag_pointer(void *entry)
117  {
118  	return (void *)((unsigned long)entry & ~3UL);
119  }
120  
121  /**
122   * xa_pointer_tag() - Get the tag stored in an XArray entry.
123   * @entry: XArray entry.
124   *
125   * If you have stored a tagged pointer in the XArray, call this function
126   * to get the tag of that pointer.
127   *
128   * Context: Any context.
129   * Return: A tag.
130   */
xa_pointer_tag(void * entry)131  static inline unsigned int xa_pointer_tag(void *entry)
132  {
133  	return (unsigned long)entry & 3UL;
134  }
135  
136  /*
137   * xa_mk_internal() - Create an internal entry.
138   * @v: Value to turn into an internal entry.
139   *
140   * Internal entries are used for a number of purposes.  Entries 0-255 are
141   * used for sibling entries (only 0-62 are used by the current code).  256
142   * is used for the retry entry.  257 is used for the reserved / zero entry.
143   * Negative internal entries are used to represent errnos.  Node pointers
144   * are also tagged as internal entries in some situations.
145   *
146   * Context: Any context.
147   * Return: An XArray internal entry corresponding to this value.
148   */
xa_mk_internal(unsigned long v)149  static inline void *xa_mk_internal(unsigned long v)
150  {
151  	return (void *)((v << 2) | 2);
152  }
153  
154  /*
155   * xa_to_internal() - Extract the value from an internal entry.
156   * @entry: XArray entry.
157   *
158   * Context: Any context.
159   * Return: The value which was stored in the internal entry.
160   */
xa_to_internal(const void * entry)161  static inline unsigned long xa_to_internal(const void *entry)
162  {
163  	return (unsigned long)entry >> 2;
164  }
165  
166  /*
167   * xa_is_internal() - Is the entry an internal entry?
168   * @entry: XArray entry.
169   *
170   * Context: Any context.
171   * Return: %true if the entry is an internal entry.
172   */
xa_is_internal(const void * entry)173  static inline bool xa_is_internal(const void *entry)
174  {
175  	return ((unsigned long)entry & 3) == 2;
176  }
177  
178  #define XA_ZERO_ENTRY		xa_mk_internal(257)
179  
180  /**
181   * xa_is_zero() - Is the entry a zero entry?
182   * @entry: Entry retrieved from the XArray
183   *
184   * The normal API will return NULL as the contents of a slot containing
185   * a zero entry.  You can only see zero entries by using the advanced API.
186   *
187   * Return: %true if the entry is a zero entry.
188   */
xa_is_zero(const void * entry)189  static inline bool xa_is_zero(const void *entry)
190  {
191  	return unlikely(entry == XA_ZERO_ENTRY);
192  }
193  
194  /**
195   * xa_is_err() - Report whether an XArray operation returned an error
196   * @entry: Result from calling an XArray function
197   *
198   * If an XArray operation cannot complete an operation, it will return
199   * a special value indicating an error.  This function tells you
200   * whether an error occurred; xa_err() tells you which error occurred.
201   *
202   * Context: Any context.
203   * Return: %true if the entry indicates an error.
204   */
xa_is_err(const void * entry)205  static inline bool xa_is_err(const void *entry)
206  {
207  	return unlikely(xa_is_internal(entry) &&
208  			entry >= xa_mk_internal(-MAX_ERRNO));
209  }
210  
211  /**
212   * xa_err() - Turn an XArray result into an errno.
213   * @entry: Result from calling an XArray function.
214   *
215   * If an XArray operation cannot complete an operation, it will return
216   * a special pointer value which encodes an errno.  This function extracts
217   * the errno from the pointer value, or returns 0 if the pointer does not
218   * represent an errno.
219   *
220   * Context: Any context.
221   * Return: A negative errno or 0.
222   */
xa_err(void * entry)223  static inline int xa_err(void *entry)
224  {
225  	/* xa_to_internal() would not do sign extension. */
226  	if (xa_is_err(entry))
227  		return (long)entry >> 2;
228  	return 0;
229  }
230  
231  /**
232   * struct xa_limit - Represents a range of IDs.
233   * @min: The lowest ID to allocate (inclusive).
234   * @max: The maximum ID to allocate (inclusive).
235   *
236   * This structure is used either directly or via the XA_LIMIT() macro
237   * to communicate the range of IDs that are valid for allocation.
238   * Three common ranges are predefined for you:
239   * * xa_limit_32b	- [0 - UINT_MAX]
240   * * xa_limit_31b	- [0 - INT_MAX]
241   * * xa_limit_16b	- [0 - USHRT_MAX]
242   */
243  struct xa_limit {
244  	u32 max;
245  	u32 min;
246  };
247  
248  #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
249  
250  #define xa_limit_32b	XA_LIMIT(0, UINT_MAX)
251  #define xa_limit_31b	XA_LIMIT(0, INT_MAX)
252  #define xa_limit_16b	XA_LIMIT(0, USHRT_MAX)
253  
254  typedef unsigned __bitwise xa_mark_t;
255  #define XA_MARK_0		((__force xa_mark_t)0U)
256  #define XA_MARK_1		((__force xa_mark_t)1U)
257  #define XA_MARK_2		((__force xa_mark_t)2U)
258  #define XA_PRESENT		((__force xa_mark_t)8U)
259  #define XA_MARK_MAX		XA_MARK_2
260  #define XA_FREE_MARK		XA_MARK_0
261  
262  enum xa_lock_type {
263  	XA_LOCK_IRQ = 1,
264  	XA_LOCK_BH = 2,
265  };
266  
267  /*
268   * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
269   * and we remain compatible with that.
270   */
271  #define XA_FLAGS_LOCK_IRQ	((__force gfp_t)XA_LOCK_IRQ)
272  #define XA_FLAGS_LOCK_BH	((__force gfp_t)XA_LOCK_BH)
273  #define XA_FLAGS_TRACK_FREE	((__force gfp_t)4U)
274  #define XA_FLAGS_ZERO_BUSY	((__force gfp_t)8U)
275  #define XA_FLAGS_ALLOC_WRAPPED	((__force gfp_t)16U)
276  #define XA_FLAGS_ACCOUNT	((__force gfp_t)32U)
277  #define XA_FLAGS_MARK(mark)	((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
278  						(__force unsigned)(mark)))
279  
280  /* ALLOC is for a normal 0-based alloc.  ALLOC1 is for an 1-based alloc */
281  #define XA_FLAGS_ALLOC	(XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
282  #define XA_FLAGS_ALLOC1	(XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
283  
284  /**
285   * struct xarray - The anchor of the XArray.
286   * @xa_lock: Lock that protects the contents of the XArray.
287   *
288   * To use the xarray, define it statically or embed it in your data structure.
289   * It is a very small data structure, so it does not usually make sense to
290   * allocate it separately and keep a pointer to it in your data structure.
291   *
292   * You may use the xa_lock to protect your own data structures as well.
293   */
294  /*
295   * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
296   * If the only non-NULL entry in the array is at index 0, @xa_head is that
297   * entry.  If any other entry in the array is non-NULL, @xa_head points
298   * to an @xa_node.
299   */
300  struct xarray {
301  	spinlock_t	xa_lock;
302  /* private: The rest of the data structure is not to be used directly. */
303  	gfp_t		xa_flags;
304  	void __rcu *	xa_head;
305  };
306  
307  #define XARRAY_INIT(name, flags) {				\
308  	.xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),		\
309  	.xa_flags = flags,					\
310  	.xa_head = NULL,					\
311  }
312  
313  /**
314   * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
315   * @name: A string that names your XArray.
316   * @flags: XA_FLAG values.
317   *
318   * This is intended for file scope definitions of XArrays.  It declares
319   * and initialises an empty XArray with the chosen name and flags.  It is
320   * equivalent to calling xa_init_flags() on the array, but it does the
321   * initialisation at compiletime instead of runtime.
322   */
323  #define DEFINE_XARRAY_FLAGS(name, flags)				\
324  	struct xarray name = XARRAY_INIT(name, flags)
325  
326  /**
327   * DEFINE_XARRAY() - Define an XArray.
328   * @name: A string that names your XArray.
329   *
330   * This is intended for file scope definitions of XArrays.  It declares
331   * and initialises an empty XArray with the chosen name.  It is equivalent
332   * to calling xa_init() on the array, but it does the initialisation at
333   * compiletime instead of runtime.
334   */
335  #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
336  
337  /**
338   * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
339   * @name: A string that names your XArray.
340   *
341   * This is intended for file scope definitions of allocating XArrays.
342   * See also DEFINE_XARRAY().
343   */
344  #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
345  
346  /**
347   * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
348   * @name: A string that names your XArray.
349   *
350   * This is intended for file scope definitions of allocating XArrays.
351   * See also DEFINE_XARRAY().
352   */
353  #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
354  
355  void *xa_load(struct xarray *, unsigned long index);
356  void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
357  void *xa_erase(struct xarray *, unsigned long index);
358  void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
359  			void *entry, gfp_t);
360  bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
361  void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
362  void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
363  void *xa_find(struct xarray *xa, unsigned long *index,
364  		unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
365  void *xa_find_after(struct xarray *xa, unsigned long *index,
366  		unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
367  unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
368  		unsigned long max, unsigned int n, xa_mark_t);
369  void xa_destroy(struct xarray *);
370  
371  /**
372   * xa_init_flags() - Initialise an empty XArray with flags.
373   * @xa: XArray.
374   * @flags: XA_FLAG values.
375   *
376   * If you need to initialise an XArray with special flags (eg you need
377   * to take the lock from interrupt context), use this function instead
378   * of xa_init().
379   *
380   * Context: Any context.
381   */
xa_init_flags(struct xarray * xa,gfp_t flags)382  static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
383  {
384  	spin_lock_init(&xa->xa_lock);
385  	xa->xa_flags = flags;
386  	xa->xa_head = NULL;
387  }
388  
389  /**
390   * xa_init() - Initialise an empty XArray.
391   * @xa: XArray.
392   *
393   * An empty XArray is full of NULL entries.
394   *
395   * Context: Any context.
396   */
xa_init(struct xarray * xa)397  static inline void xa_init(struct xarray *xa)
398  {
399  	xa_init_flags(xa, 0);
400  }
401  
402  /**
403   * xa_empty() - Determine if an array has any present entries.
404   * @xa: XArray.
405   *
406   * Context: Any context.
407   * Return: %true if the array contains only NULL pointers.
408   */
xa_empty(const struct xarray * xa)409  static inline bool xa_empty(const struct xarray *xa)
410  {
411  	return xa->xa_head == NULL;
412  }
413  
414  /**
415   * xa_marked() - Inquire whether any entry in this array has a mark set
416   * @xa: Array
417   * @mark: Mark value
418   *
419   * Context: Any context.
420   * Return: %true if any entry has this mark set.
421   */
xa_marked(const struct xarray * xa,xa_mark_t mark)422  static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
423  {
424  	return xa->xa_flags & XA_FLAGS_MARK(mark);
425  }
426  
427  /**
428   * xa_for_each_range() - Iterate over a portion of an XArray.
429   * @xa: XArray.
430   * @index: Index of @entry.
431   * @entry: Entry retrieved from array.
432   * @start: First index to retrieve from array.
433   * @last: Last index to retrieve from array.
434   *
435   * During the iteration, @entry will have the value of the entry stored
436   * in @xa at @index.  You may modify @index during the iteration if you
437   * want to skip or reprocess indices.  It is safe to modify the array
438   * during the iteration.  At the end of the iteration, @entry will be set
439   * to NULL and @index will have a value less than or equal to max.
440   *
441   * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n).  You have
442   * to handle your own locking with xas_for_each(), and if you have to unlock
443   * after each iteration, it will also end up being O(n.log(n)).
444   * xa_for_each_range() will spin if it hits a retry entry; if you intend to
445   * see retry entries, you should use the xas_for_each() iterator instead.
446   * The xas_for_each() iterator will expand into more inline code than
447   * xa_for_each_range().
448   *
449   * Context: Any context.  Takes and releases the RCU lock.
450   */
451  #define xa_for_each_range(xa, index, entry, start, last)		\
452  	for (index = start,						\
453  	     entry = xa_find(xa, &index, last, XA_PRESENT);		\
454  	     entry;							\
455  	     entry = xa_find_after(xa, &index, last, XA_PRESENT))
456  
457  /**
458   * xa_for_each_start() - Iterate over a portion of an XArray.
459   * @xa: XArray.
460   * @index: Index of @entry.
461   * @entry: Entry retrieved from array.
462   * @start: First index to retrieve from array.
463   *
464   * During the iteration, @entry will have the value of the entry stored
465   * in @xa at @index.  You may modify @index during the iteration if you
466   * want to skip or reprocess indices.  It is safe to modify the array
467   * during the iteration.  At the end of the iteration, @entry will be set
468   * to NULL and @index will have a value less than or equal to max.
469   *
470   * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n).  You have
471   * to handle your own locking with xas_for_each(), and if you have to unlock
472   * after each iteration, it will also end up being O(n.log(n)).
473   * xa_for_each_start() will spin if it hits a retry entry; if you intend to
474   * see retry entries, you should use the xas_for_each() iterator instead.
475   * The xas_for_each() iterator will expand into more inline code than
476   * xa_for_each_start().
477   *
478   * Context: Any context.  Takes and releases the RCU lock.
479   */
480  #define xa_for_each_start(xa, index, entry, start) \
481  	xa_for_each_range(xa, index, entry, start, ULONG_MAX)
482  
483  /**
484   * xa_for_each() - Iterate over present entries in an XArray.
485   * @xa: XArray.
486   * @index: Index of @entry.
487   * @entry: Entry retrieved from array.
488   *
489   * During the iteration, @entry will have the value of the entry stored
490   * in @xa at @index.  You may modify @index during the iteration if you want
491   * to skip or reprocess indices.  It is safe to modify the array during the
492   * iteration.  At the end of the iteration, @entry will be set to NULL and
493   * @index will have a value less than or equal to max.
494   *
495   * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n).  You have
496   * to handle your own locking with xas_for_each(), and if you have to unlock
497   * after each iteration, it will also end up being O(n.log(n)).  xa_for_each()
498   * will spin if it hits a retry entry; if you intend to see retry entries,
499   * you should use the xas_for_each() iterator instead.  The xas_for_each()
500   * iterator will expand into more inline code than xa_for_each().
501   *
502   * Context: Any context.  Takes and releases the RCU lock.
503   */
504  #define xa_for_each(xa, index, entry) \
505  	xa_for_each_start(xa, index, entry, 0)
506  
507  /**
508   * xa_for_each_marked() - Iterate over marked entries in an XArray.
509   * @xa: XArray.
510   * @index: Index of @entry.
511   * @entry: Entry retrieved from array.
512   * @filter: Selection criterion.
513   *
514   * During the iteration, @entry will have the value of the entry stored
515   * in @xa at @index.  The iteration will skip all entries in the array
516   * which do not match @filter.  You may modify @index during the iteration
517   * if you want to skip or reprocess indices.  It is safe to modify the array
518   * during the iteration.  At the end of the iteration, @entry will be set to
519   * NULL and @index will have a value less than or equal to max.
520   *
521   * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
522   * You have to handle your own locking with xas_for_each(), and if you have
523   * to unlock after each iteration, it will also end up being O(n.log(n)).
524   * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
525   * see retry entries, you should use the xas_for_each_marked() iterator
526   * instead.  The xas_for_each_marked() iterator will expand into more inline
527   * code than xa_for_each_marked().
528   *
529   * Context: Any context.  Takes and releases the RCU lock.
530   */
531  #define xa_for_each_marked(xa, index, entry, filter) \
532  	for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
533  	     entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
534  
535  #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
536  #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
537  #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
538  #define xa_lock_bh(xa)		spin_lock_bh(&(xa)->xa_lock)
539  #define xa_unlock_bh(xa)	spin_unlock_bh(&(xa)->xa_lock)
540  #define xa_lock_irq(xa)		spin_lock_irq(&(xa)->xa_lock)
541  #define xa_unlock_irq(xa)	spin_unlock_irq(&(xa)->xa_lock)
542  #define xa_lock_irqsave(xa, flags) \
543  				spin_lock_irqsave(&(xa)->xa_lock, flags)
544  #define xa_unlock_irqrestore(xa, flags) \
545  				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
546  #define xa_lock_nested(xa, subclass) \
547  				spin_lock_nested(&(xa)->xa_lock, subclass)
548  #define xa_lock_bh_nested(xa, subclass) \
549  				spin_lock_bh_nested(&(xa)->xa_lock, subclass)
550  #define xa_lock_irq_nested(xa, subclass) \
551  				spin_lock_irq_nested(&(xa)->xa_lock, subclass)
552  #define xa_lock_irqsave_nested(xa, flags, subclass) \
553  		spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
554  
555  /*
556   * Versions of the normal API which require the caller to hold the
557   * xa_lock.  If the GFP flags allow it, they will drop the lock to
558   * allocate memory, then reacquire it afterwards.  These functions
559   * may also re-enable interrupts if the XArray flags indicate the
560   * locking should be interrupt safe.
561   */
562  void *__xa_erase(struct xarray *, unsigned long index);
563  void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
564  void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
565  		void *entry, gfp_t);
566  int __must_check __xa_insert(struct xarray *, unsigned long index,
567  		void *entry, gfp_t);
568  int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
569  		struct xa_limit, gfp_t);
570  int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
571  		struct xa_limit, u32 *next, gfp_t);
572  void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
573  void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
574  
575  /**
576   * xa_store_bh() - Store this entry in the XArray.
577   * @xa: XArray.
578   * @index: Index into array.
579   * @entry: New entry.
580   * @gfp: Memory allocation flags.
581   *
582   * This function is like calling xa_store() except it disables softirqs
583   * while holding the array lock.
584   *
585   * Context: Any context.  Takes and releases the xa_lock while
586   * disabling softirqs.
587   * Return: The old entry at this index or xa_err() if an error happened.
588   */
xa_store_bh(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)589  static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
590  		void *entry, gfp_t gfp)
591  {
592  	void *curr;
593  
594  	might_alloc(gfp);
595  	xa_lock_bh(xa);
596  	curr = __xa_store(xa, index, entry, gfp);
597  	xa_unlock_bh(xa);
598  
599  	return curr;
600  }
601  
602  /**
603   * xa_store_irq() - Store this entry in the XArray.
604   * @xa: XArray.
605   * @index: Index into array.
606   * @entry: New entry.
607   * @gfp: Memory allocation flags.
608   *
609   * This function is like calling xa_store() except it disables interrupts
610   * while holding the array lock.
611   *
612   * Context: Process context.  Takes and releases the xa_lock while
613   * disabling interrupts.
614   * Return: The old entry at this index or xa_err() if an error happened.
615   */
xa_store_irq(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)616  static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
617  		void *entry, gfp_t gfp)
618  {
619  	void *curr;
620  
621  	might_alloc(gfp);
622  	xa_lock_irq(xa);
623  	curr = __xa_store(xa, index, entry, gfp);
624  	xa_unlock_irq(xa);
625  
626  	return curr;
627  }
628  
629  /**
630   * xa_erase_bh() - Erase this entry from the XArray.
631   * @xa: XArray.
632   * @index: Index of entry.
633   *
634   * After this function returns, loading from @index will return %NULL.
635   * If the index is part of a multi-index entry, all indices will be erased
636   * and none of the entries will be part of a multi-index entry.
637   *
638   * Context: Any context.  Takes and releases the xa_lock while
639   * disabling softirqs.
640   * Return: The entry which used to be at this index.
641   */
xa_erase_bh(struct xarray * xa,unsigned long index)642  static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
643  {
644  	void *entry;
645  
646  	xa_lock_bh(xa);
647  	entry = __xa_erase(xa, index);
648  	xa_unlock_bh(xa);
649  
650  	return entry;
651  }
652  
653  /**
654   * xa_erase_irq() - Erase this entry from the XArray.
655   * @xa: XArray.
656   * @index: Index of entry.
657   *
658   * After this function returns, loading from @index will return %NULL.
659   * If the index is part of a multi-index entry, all indices will be erased
660   * and none of the entries will be part of a multi-index entry.
661   *
662   * Context: Process context.  Takes and releases the xa_lock while
663   * disabling interrupts.
664   * Return: The entry which used to be at this index.
665   */
xa_erase_irq(struct xarray * xa,unsigned long index)666  static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
667  {
668  	void *entry;
669  
670  	xa_lock_irq(xa);
671  	entry = __xa_erase(xa, index);
672  	xa_unlock_irq(xa);
673  
674  	return entry;
675  }
676  
677  /**
678   * xa_cmpxchg() - Conditionally replace an entry in the XArray.
679   * @xa: XArray.
680   * @index: Index into array.
681   * @old: Old value to test against.
682   * @entry: New value to place in array.
683   * @gfp: Memory allocation flags.
684   *
685   * If the entry at @index is the same as @old, replace it with @entry.
686   * If the return value is equal to @old, then the exchange was successful.
687   *
688   * Context: Any context.  Takes and releases the xa_lock.  May sleep
689   * if the @gfp flags permit.
690   * Return: The old value at this index or xa_err() if an error happened.
691   */
xa_cmpxchg(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)692  static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
693  			void *old, void *entry, gfp_t gfp)
694  {
695  	void *curr;
696  
697  	might_alloc(gfp);
698  	xa_lock(xa);
699  	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
700  	xa_unlock(xa);
701  
702  	return curr;
703  }
704  
705  /**
706   * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
707   * @xa: XArray.
708   * @index: Index into array.
709   * @old: Old value to test against.
710   * @entry: New value to place in array.
711   * @gfp: Memory allocation flags.
712   *
713   * This function is like calling xa_cmpxchg() except it disables softirqs
714   * while holding the array lock.
715   *
716   * Context: Any context.  Takes and releases the xa_lock while
717   * disabling softirqs.  May sleep if the @gfp flags permit.
718   * Return: The old value at this index or xa_err() if an error happened.
719   */
xa_cmpxchg_bh(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)720  static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
721  			void *old, void *entry, gfp_t gfp)
722  {
723  	void *curr;
724  
725  	might_alloc(gfp);
726  	xa_lock_bh(xa);
727  	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
728  	xa_unlock_bh(xa);
729  
730  	return curr;
731  }
732  
733  /**
734   * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
735   * @xa: XArray.
736   * @index: Index into array.
737   * @old: Old value to test against.
738   * @entry: New value to place in array.
739   * @gfp: Memory allocation flags.
740   *
741   * This function is like calling xa_cmpxchg() except it disables interrupts
742   * while holding the array lock.
743   *
744   * Context: Process context.  Takes and releases the xa_lock while
745   * disabling interrupts.  May sleep if the @gfp flags permit.
746   * Return: The old value at this index or xa_err() if an error happened.
747   */
xa_cmpxchg_irq(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)748  static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
749  			void *old, void *entry, gfp_t gfp)
750  {
751  	void *curr;
752  
753  	might_alloc(gfp);
754  	xa_lock_irq(xa);
755  	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
756  	xa_unlock_irq(xa);
757  
758  	return curr;
759  }
760  
761  /**
762   * xa_insert() - Store this entry in the XArray unless another entry is
763   *			already present.
764   * @xa: XArray.
765   * @index: Index into array.
766   * @entry: New entry.
767   * @gfp: Memory allocation flags.
768   *
769   * Inserting a NULL entry will store a reserved entry (like xa_reserve())
770   * if no entry is present.  Inserting will fail if a reserved entry is
771   * present, even though loading from this index will return NULL.
772   *
773   * Context: Any context.  Takes and releases the xa_lock.  May sleep if
774   * the @gfp flags permit.
775   * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
776   * -ENOMEM if memory could not be allocated.
777   */
xa_insert(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)778  static inline int __must_check xa_insert(struct xarray *xa,
779  		unsigned long index, void *entry, gfp_t gfp)
780  {
781  	int err;
782  
783  	might_alloc(gfp);
784  	xa_lock(xa);
785  	err = __xa_insert(xa, index, entry, gfp);
786  	xa_unlock(xa);
787  
788  	return err;
789  }
790  
791  /**
792   * xa_insert_bh() - Store this entry in the XArray unless another entry is
793   *			already present.
794   * @xa: XArray.
795   * @index: Index into array.
796   * @entry: New entry.
797   * @gfp: Memory allocation flags.
798   *
799   * Inserting a NULL entry will store a reserved entry (like xa_reserve())
800   * if no entry is present.  Inserting will fail if a reserved entry is
801   * present, even though loading from this index will return NULL.
802   *
803   * Context: Any context.  Takes and releases the xa_lock while
804   * disabling softirqs.  May sleep if the @gfp flags permit.
805   * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
806   * -ENOMEM if memory could not be allocated.
807   */
xa_insert_bh(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)808  static inline int __must_check xa_insert_bh(struct xarray *xa,
809  		unsigned long index, void *entry, gfp_t gfp)
810  {
811  	int err;
812  
813  	might_alloc(gfp);
814  	xa_lock_bh(xa);
815  	err = __xa_insert(xa, index, entry, gfp);
816  	xa_unlock_bh(xa);
817  
818  	return err;
819  }
820  
821  /**
822   * xa_insert_irq() - Store this entry in the XArray unless another entry is
823   *			already present.
824   * @xa: XArray.
825   * @index: Index into array.
826   * @entry: New entry.
827   * @gfp: Memory allocation flags.
828   *
829   * Inserting a NULL entry will store a reserved entry (like xa_reserve())
830   * if no entry is present.  Inserting will fail if a reserved entry is
831   * present, even though loading from this index will return NULL.
832   *
833   * Context: Process context.  Takes and releases the xa_lock while
834   * disabling interrupts.  May sleep if the @gfp flags permit.
835   * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
836   * -ENOMEM if memory could not be allocated.
837   */
xa_insert_irq(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)838  static inline int __must_check xa_insert_irq(struct xarray *xa,
839  		unsigned long index, void *entry, gfp_t gfp)
840  {
841  	int err;
842  
843  	might_alloc(gfp);
844  	xa_lock_irq(xa);
845  	err = __xa_insert(xa, index, entry, gfp);
846  	xa_unlock_irq(xa);
847  
848  	return err;
849  }
850  
851  /**
852   * xa_alloc() - Find somewhere to store this entry in the XArray.
853   * @xa: XArray.
854   * @id: Pointer to ID.
855   * @entry: New entry.
856   * @limit: Range of ID to allocate.
857   * @gfp: Memory allocation flags.
858   *
859   * Finds an empty entry in @xa between @limit.min and @limit.max,
860   * stores the index into the @id pointer, then stores the entry at
861   * that index.  A concurrent lookup will not see an uninitialised @id.
862   *
863   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
864   * in xa_init_flags().
865   *
866   * Context: Any context.  Takes and releases the xa_lock.  May sleep if
867   * the @gfp flags permit.
868   * Return: 0 on success, -ENOMEM if memory could not be allocated or
869   * -EBUSY if there are no free entries in @limit.
870   */
xa_alloc(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)871  static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
872  		void *entry, struct xa_limit limit, gfp_t gfp)
873  {
874  	int err;
875  
876  	might_alloc(gfp);
877  	xa_lock(xa);
878  	err = __xa_alloc(xa, id, entry, limit, gfp);
879  	xa_unlock(xa);
880  
881  	return err;
882  }
883  
884  /**
885   * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
886   * @xa: XArray.
887   * @id: Pointer to ID.
888   * @entry: New entry.
889   * @limit: Range of ID to allocate.
890   * @gfp: Memory allocation flags.
891   *
892   * Finds an empty entry in @xa between @limit.min and @limit.max,
893   * stores the index into the @id pointer, then stores the entry at
894   * that index.  A concurrent lookup will not see an uninitialised @id.
895   *
896   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
897   * in xa_init_flags().
898   *
899   * Context: Any context.  Takes and releases the xa_lock while
900   * disabling softirqs.  May sleep if the @gfp flags permit.
901   * Return: 0 on success, -ENOMEM if memory could not be allocated or
902   * -EBUSY if there are no free entries in @limit.
903   */
xa_alloc_bh(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)904  static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
905  		void *entry, struct xa_limit limit, gfp_t gfp)
906  {
907  	int err;
908  
909  	might_alloc(gfp);
910  	xa_lock_bh(xa);
911  	err = __xa_alloc(xa, id, entry, limit, gfp);
912  	xa_unlock_bh(xa);
913  
914  	return err;
915  }
916  
917  /**
918   * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
919   * @xa: XArray.
920   * @id: Pointer to ID.
921   * @entry: New entry.
922   * @limit: Range of ID to allocate.
923   * @gfp: Memory allocation flags.
924   *
925   * Finds an empty entry in @xa between @limit.min and @limit.max,
926   * stores the index into the @id pointer, then stores the entry at
927   * that index.  A concurrent lookup will not see an uninitialised @id.
928   *
929   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
930   * in xa_init_flags().
931   *
932   * Context: Process context.  Takes and releases the xa_lock while
933   * disabling interrupts.  May sleep if the @gfp flags permit.
934   * Return: 0 on success, -ENOMEM if memory could not be allocated or
935   * -EBUSY if there are no free entries in @limit.
936   */
xa_alloc_irq(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)937  static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
938  		void *entry, struct xa_limit limit, gfp_t gfp)
939  {
940  	int err;
941  
942  	might_alloc(gfp);
943  	xa_lock_irq(xa);
944  	err = __xa_alloc(xa, id, entry, limit, gfp);
945  	xa_unlock_irq(xa);
946  
947  	return err;
948  }
949  
950  /**
951   * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
952   * @xa: XArray.
953   * @id: Pointer to ID.
954   * @entry: New entry.
955   * @limit: Range of allocated ID.
956   * @next: Pointer to next ID to allocate.
957   * @gfp: Memory allocation flags.
958   *
959   * Finds an empty entry in @xa between @limit.min and @limit.max,
960   * stores the index into the @id pointer, then stores the entry at
961   * that index.  A concurrent lookup will not see an uninitialised @id.
962   * The search for an empty entry will start at @next and will wrap
963   * around if necessary.
964   *
965   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
966   * in xa_init_flags().
967   *
968   * Context: Any context.  Takes and releases the xa_lock.  May sleep if
969   * the @gfp flags permit.
970   * Return: 0 if the allocation succeeded without wrapping.  1 if the
971   * allocation succeeded after wrapping, -ENOMEM if memory could not be
972   * allocated or -EBUSY if there are no free entries in @limit.
973   */
xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)974  static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
975  		struct xa_limit limit, u32 *next, gfp_t gfp)
976  {
977  	int err;
978  
979  	might_alloc(gfp);
980  	xa_lock(xa);
981  	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
982  	xa_unlock(xa);
983  
984  	return err;
985  }
986  
987  /**
988   * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
989   * @xa: XArray.
990   * @id: Pointer to ID.
991   * @entry: New entry.
992   * @limit: Range of allocated ID.
993   * @next: Pointer to next ID to allocate.
994   * @gfp: Memory allocation flags.
995   *
996   * Finds an empty entry in @xa between @limit.min and @limit.max,
997   * stores the index into the @id pointer, then stores the entry at
998   * that index.  A concurrent lookup will not see an uninitialised @id.
999   * The search for an empty entry will start at @next and will wrap
1000   * around if necessary.
1001   *
1002   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1003   * in xa_init_flags().
1004   *
1005   * Context: Any context.  Takes and releases the xa_lock while
1006   * disabling softirqs.  May sleep if the @gfp flags permit.
1007   * Return: 0 if the allocation succeeded without wrapping.  1 if the
1008   * allocation succeeded after wrapping, -ENOMEM if memory could not be
1009   * allocated or -EBUSY if there are no free entries in @limit.
1010   */
xa_alloc_cyclic_bh(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)1011  static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
1012  		struct xa_limit limit, u32 *next, gfp_t gfp)
1013  {
1014  	int err;
1015  
1016  	might_alloc(gfp);
1017  	xa_lock_bh(xa);
1018  	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1019  	xa_unlock_bh(xa);
1020  
1021  	return err;
1022  }
1023  
1024  /**
1025   * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
1026   * @xa: XArray.
1027   * @id: Pointer to ID.
1028   * @entry: New entry.
1029   * @limit: Range of allocated ID.
1030   * @next: Pointer to next ID to allocate.
1031   * @gfp: Memory allocation flags.
1032   *
1033   * Finds an empty entry in @xa between @limit.min and @limit.max,
1034   * stores the index into the @id pointer, then stores the entry at
1035   * that index.  A concurrent lookup will not see an uninitialised @id.
1036   * The search for an empty entry will start at @next and will wrap
1037   * around if necessary.
1038   *
1039   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1040   * in xa_init_flags().
1041   *
1042   * Context: Process context.  Takes and releases the xa_lock while
1043   * disabling interrupts.  May sleep if the @gfp flags permit.
1044   * Return: 0 if the allocation succeeded without wrapping.  1 if the
1045   * allocation succeeded after wrapping, -ENOMEM if memory could not be
1046   * allocated or -EBUSY if there are no free entries in @limit.
1047   */
xa_alloc_cyclic_irq(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)1048  static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
1049  		struct xa_limit limit, u32 *next, gfp_t gfp)
1050  {
1051  	int err;
1052  
1053  	might_alloc(gfp);
1054  	xa_lock_irq(xa);
1055  	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1056  	xa_unlock_irq(xa);
1057  
1058  	return err;
1059  }
1060  
1061  /**
1062   * xa_reserve() - Reserve this index in the XArray.
1063   * @xa: XArray.
1064   * @index: Index into array.
1065   * @gfp: Memory allocation flags.
1066   *
1067   * Ensures there is somewhere to store an entry at @index in the array.
1068   * If there is already something stored at @index, this function does
1069   * nothing.  If there was nothing there, the entry is marked as reserved.
1070   * Loading from a reserved entry returns a %NULL pointer.
1071   *
1072   * If you do not use the entry that you have reserved, call xa_release()
1073   * or xa_erase() to free any unnecessary memory.
1074   *
1075   * Context: Any context.  Takes and releases the xa_lock.
1076   * May sleep if the @gfp flags permit.
1077   * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1078   */
1079  static inline __must_check
xa_reserve(struct xarray * xa,unsigned long index,gfp_t gfp)1080  int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1081  {
1082  	return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1083  }
1084  
1085  /**
1086   * xa_reserve_bh() - Reserve this index in the XArray.
1087   * @xa: XArray.
1088   * @index: Index into array.
1089   * @gfp: Memory allocation flags.
1090   *
1091   * A softirq-disabling version of xa_reserve().
1092   *
1093   * Context: Any context.  Takes and releases the xa_lock while
1094   * disabling softirqs.
1095   * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1096   */
1097  static inline __must_check
xa_reserve_bh(struct xarray * xa,unsigned long index,gfp_t gfp)1098  int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
1099  {
1100  	return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1101  }
1102  
1103  /**
1104   * xa_reserve_irq() - Reserve this index in the XArray.
1105   * @xa: XArray.
1106   * @index: Index into array.
1107   * @gfp: Memory allocation flags.
1108   *
1109   * An interrupt-disabling version of xa_reserve().
1110   *
1111   * Context: Process context.  Takes and releases the xa_lock while
1112   * disabling interrupts.
1113   * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1114   */
1115  static inline __must_check
xa_reserve_irq(struct xarray * xa,unsigned long index,gfp_t gfp)1116  int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
1117  {
1118  	return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1119  }
1120  
1121  /**
1122   * xa_release() - Release a reserved entry.
1123   * @xa: XArray.
1124   * @index: Index of entry.
1125   *
1126   * After calling xa_reserve(), you can call this function to release the
1127   * reservation.  If the entry at @index has been stored to, this function
1128   * will do nothing.
1129   */
xa_release(struct xarray * xa,unsigned long index)1130  static inline void xa_release(struct xarray *xa, unsigned long index)
1131  {
1132  	xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
1133  }
1134  
1135  /* Everything below here is the Advanced API.  Proceed with caution. */
1136  
1137  /*
1138   * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
1139   * the best chunk size requires some tradeoffs.  A power of two recommends
1140   * itself so that we can walk the tree based purely on shifts and masks.
1141   * Generally, the larger the better; as the number of slots per level of the
1142   * tree increases, the less tall the tree needs to be.  But that needs to be
1143   * balanced against the memory consumption of each node.  On a 64-bit system,
1144   * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
1145   * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
1146   */
1147  #ifndef XA_CHUNK_SHIFT
1148  #define XA_CHUNK_SHIFT		(IS_ENABLED(CONFIG_BASE_SMALL) ? 4 : 6)
1149  #endif
1150  #define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
1151  #define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
1152  #define XA_MAX_MARKS		3
1153  #define XA_MARK_LONGS		BITS_TO_LONGS(XA_CHUNK_SIZE)
1154  
1155  /*
1156   * @count is the count of every non-NULL element in the ->slots array
1157   * whether that is a value entry, a retry entry, a user pointer,
1158   * a sibling entry or a pointer to the next level of the tree.
1159   * @nr_values is the count of every element in ->slots which is
1160   * either a value entry or a sibling of a value entry.
1161   */
1162  struct xa_node {
1163  	unsigned char	shift;		/* Bits remaining in each slot */
1164  	unsigned char	offset;		/* Slot offset in parent */
1165  	unsigned char	count;		/* Total entry count */
1166  	unsigned char	nr_values;	/* Value entry count */
1167  	struct xa_node __rcu *parent;	/* NULL at top of tree */
1168  	struct xarray	*array;		/* The array we belong to */
1169  	union {
1170  		struct list_head private_list;	/* For tree user */
1171  		struct rcu_head	rcu_head;	/* Used when freeing node */
1172  	};
1173  	void __rcu	*slots[XA_CHUNK_SIZE];
1174  	union {
1175  		unsigned long	tags[XA_MAX_MARKS][XA_MARK_LONGS];
1176  		unsigned long	marks[XA_MAX_MARKS][XA_MARK_LONGS];
1177  	};
1178  };
1179  
1180  void xa_dump(const struct xarray *);
1181  void xa_dump_node(const struct xa_node *);
1182  
1183  #ifdef XA_DEBUG
1184  #define XA_BUG_ON(xa, x) do {					\
1185  		if (x) {					\
1186  			xa_dump(xa);				\
1187  			BUG();					\
1188  		}						\
1189  	} while (0)
1190  #define XA_NODE_BUG_ON(node, x) do {				\
1191  		if (x) {					\
1192  			if (node) xa_dump_node(node);		\
1193  			BUG();					\
1194  		}						\
1195  	} while (0)
1196  #else
1197  #define XA_BUG_ON(xa, x)	do { } while (0)
1198  #define XA_NODE_BUG_ON(node, x)	do { } while (0)
1199  #endif
1200  
1201  /* Private */
xa_head(const struct xarray * xa)1202  static inline void *xa_head(const struct xarray *xa)
1203  {
1204  	return rcu_dereference_check(xa->xa_head,
1205  						lockdep_is_held(&xa->xa_lock));
1206  }
1207  
1208  /* Private */
xa_head_locked(const struct xarray * xa)1209  static inline void *xa_head_locked(const struct xarray *xa)
1210  {
1211  	return rcu_dereference_protected(xa->xa_head,
1212  						lockdep_is_held(&xa->xa_lock));
1213  }
1214  
1215  /* Private */
xa_entry(const struct xarray * xa,const struct xa_node * node,unsigned int offset)1216  static inline void *xa_entry(const struct xarray *xa,
1217  				const struct xa_node *node, unsigned int offset)
1218  {
1219  	XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1220  	return rcu_dereference_check(node->slots[offset],
1221  						lockdep_is_held(&xa->xa_lock));
1222  }
1223  
1224  /* Private */
xa_entry_locked(const struct xarray * xa,const struct xa_node * node,unsigned int offset)1225  static inline void *xa_entry_locked(const struct xarray *xa,
1226  				const struct xa_node *node, unsigned int offset)
1227  {
1228  	XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1229  	return rcu_dereference_protected(node->slots[offset],
1230  						lockdep_is_held(&xa->xa_lock));
1231  }
1232  
1233  /* Private */
xa_parent(const struct xarray * xa,const struct xa_node * node)1234  static inline struct xa_node *xa_parent(const struct xarray *xa,
1235  					const struct xa_node *node)
1236  {
1237  	return rcu_dereference_check(node->parent,
1238  						lockdep_is_held(&xa->xa_lock));
1239  }
1240  
1241  /* Private */
xa_parent_locked(const struct xarray * xa,const struct xa_node * node)1242  static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
1243  					const struct xa_node *node)
1244  {
1245  	return rcu_dereference_protected(node->parent,
1246  						lockdep_is_held(&xa->xa_lock));
1247  }
1248  
1249  /* Private */
xa_mk_node(const struct xa_node * node)1250  static inline void *xa_mk_node(const struct xa_node *node)
1251  {
1252  	return (void *)((unsigned long)node | 2);
1253  }
1254  
1255  /* Private */
xa_to_node(const void * entry)1256  static inline struct xa_node *xa_to_node(const void *entry)
1257  {
1258  	return (struct xa_node *)((unsigned long)entry - 2);
1259  }
1260  
1261  /* Private */
xa_is_node(const void * entry)1262  static inline bool xa_is_node(const void *entry)
1263  {
1264  	return xa_is_internal(entry) && (unsigned long)entry > 4096;
1265  }
1266  
1267  /* Private */
xa_mk_sibling(unsigned int offset)1268  static inline void *xa_mk_sibling(unsigned int offset)
1269  {
1270  	return xa_mk_internal(offset);
1271  }
1272  
1273  /* Private */
xa_to_sibling(const void * entry)1274  static inline unsigned long xa_to_sibling(const void *entry)
1275  {
1276  	return xa_to_internal(entry);
1277  }
1278  
1279  /**
1280   * xa_is_sibling() - Is the entry a sibling entry?
1281   * @entry: Entry retrieved from the XArray
1282   *
1283   * Return: %true if the entry is a sibling entry.
1284   */
xa_is_sibling(const void * entry)1285  static inline bool xa_is_sibling(const void *entry)
1286  {
1287  	return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
1288  		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
1289  }
1290  
1291  #define XA_RETRY_ENTRY		xa_mk_internal(256)
1292  
1293  /**
1294   * xa_is_retry() - Is the entry a retry entry?
1295   * @entry: Entry retrieved from the XArray
1296   *
1297   * Return: %true if the entry is a retry entry.
1298   */
xa_is_retry(const void * entry)1299  static inline bool xa_is_retry(const void *entry)
1300  {
1301  	return unlikely(entry == XA_RETRY_ENTRY);
1302  }
1303  
1304  /**
1305   * xa_is_advanced() - Is the entry only permitted for the advanced API?
1306   * @entry: Entry to be stored in the XArray.
1307   *
1308   * Return: %true if the entry cannot be stored by the normal API.
1309   */
xa_is_advanced(const void * entry)1310  static inline bool xa_is_advanced(const void *entry)
1311  {
1312  	return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1313  }
1314  
1315  /**
1316   * typedef xa_update_node_t - A callback function from the XArray.
1317   * @node: The node which is being processed
1318   *
1319   * This function is called every time the XArray updates the count of
1320   * present and value entries in a node.  It allows advanced users to
1321   * maintain the private_list in the node.
1322   *
1323   * Context: The xa_lock is held and interrupts may be disabled.
1324   *	    Implementations should not drop the xa_lock, nor re-enable
1325   *	    interrupts.
1326   */
1327  typedef void (*xa_update_node_t)(struct xa_node *node);
1328  
1329  void xa_delete_node(struct xa_node *, xa_update_node_t);
1330  
1331  /*
1332   * The xa_state is opaque to its users.  It contains various different pieces
1333   * of state involved in the current operation on the XArray.  It should be
1334   * declared on the stack and passed between the various internal routines.
1335   * The various elements in it should not be accessed directly, but only
1336   * through the provided accessor functions.  The below documentation is for
1337   * the benefit of those working on the code, not for users of the XArray.
1338   *
1339   * @xa_node usually points to the xa_node containing the slot we're operating
1340   * on (and @xa_offset is the offset in the slots array).  If there is a
1341   * single entry in the array at index 0, there are no allocated xa_nodes to
1342   * point to, and so we store %NULL in @xa_node.  @xa_node is set to
1343   * the value %XAS_RESTART if the xa_state is not walked to the correct
1344   * position in the tree of nodes for this operation.  If an error occurs
1345   * during an operation, it is set to an %XAS_ERROR value.  If we run off the
1346   * end of the allocated nodes, it is set to %XAS_BOUNDS.
1347   */
1348  struct xa_state {
1349  	struct xarray *xa;
1350  	unsigned long xa_index;
1351  	unsigned char xa_shift;
1352  	unsigned char xa_sibs;
1353  	unsigned char xa_offset;
1354  	unsigned char xa_pad;		/* Helps gcc generate better code */
1355  	struct xa_node *xa_node;
1356  	struct xa_node *xa_alloc;
1357  	xa_update_node_t xa_update;
1358  	struct list_lru *xa_lru;
1359  };
1360  
1361  /*
1362   * We encode errnos in the xas->xa_node.  If an error has happened, we need to
1363   * drop the lock to fix it, and once we've done so the xa_state is invalid.
1364   */
1365  #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
1366  #define XAS_BOUNDS	((struct xa_node *)1UL)
1367  #define XAS_RESTART	((struct xa_node *)3UL)
1368  
1369  #define __XA_STATE(array, index, shift, sibs)  {	\
1370  	.xa = array,					\
1371  	.xa_index = index,				\
1372  	.xa_shift = shift,				\
1373  	.xa_sibs = sibs,				\
1374  	.xa_offset = 0,					\
1375  	.xa_pad = 0,					\
1376  	.xa_node = XAS_RESTART,				\
1377  	.xa_alloc = NULL,				\
1378  	.xa_update = NULL,				\
1379  	.xa_lru = NULL,					\
1380  }
1381  
1382  /**
1383   * XA_STATE() - Declare an XArray operation state.
1384   * @name: Name of this operation state (usually xas).
1385   * @array: Array to operate on.
1386   * @index: Initial index of interest.
1387   *
1388   * Declare and initialise an xa_state on the stack.
1389   */
1390  #define XA_STATE(name, array, index)				\
1391  	struct xa_state name = __XA_STATE(array, index, 0, 0)
1392  
1393  /**
1394   * XA_STATE_ORDER() - Declare an XArray operation state.
1395   * @name: Name of this operation state (usually xas).
1396   * @array: Array to operate on.
1397   * @index: Initial index of interest.
1398   * @order: Order of entry.
1399   *
1400   * Declare and initialise an xa_state on the stack.  This variant of
1401   * XA_STATE() allows you to specify the 'order' of the element you
1402   * want to operate on.`
1403   */
1404  #define XA_STATE_ORDER(name, array, index, order)		\
1405  	struct xa_state name = __XA_STATE(array,		\
1406  			(index >> order) << order,		\
1407  			order - (order % XA_CHUNK_SHIFT),	\
1408  			(1U << (order % XA_CHUNK_SHIFT)) - 1)
1409  
1410  #define xas_marked(xas, mark)	xa_marked((xas)->xa, (mark))
1411  #define xas_trylock(xas)	xa_trylock((xas)->xa)
1412  #define xas_lock(xas)		xa_lock((xas)->xa)
1413  #define xas_unlock(xas)		xa_unlock((xas)->xa)
1414  #define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
1415  #define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
1416  #define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
1417  #define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
1418  #define xas_lock_irqsave(xas, flags) \
1419  				xa_lock_irqsave((xas)->xa, flags)
1420  #define xas_unlock_irqrestore(xas, flags) \
1421  				xa_unlock_irqrestore((xas)->xa, flags)
1422  
1423  /**
1424   * xas_error() - Return an errno stored in the xa_state.
1425   * @xas: XArray operation state.
1426   *
1427   * Return: 0 if no error has been noted.  A negative errno if one has.
1428   */
xas_error(const struct xa_state * xas)1429  static inline int xas_error(const struct xa_state *xas)
1430  {
1431  	return xa_err(xas->xa_node);
1432  }
1433  
1434  /**
1435   * xas_set_err() - Note an error in the xa_state.
1436   * @xas: XArray operation state.
1437   * @err: Negative error number.
1438   *
1439   * Only call this function with a negative @err; zero or positive errors
1440   * will probably not behave the way you think they should.  If you want
1441   * to clear the error from an xa_state, use xas_reset().
1442   */
xas_set_err(struct xa_state * xas,long err)1443  static inline void xas_set_err(struct xa_state *xas, long err)
1444  {
1445  	xas->xa_node = XA_ERROR(err);
1446  }
1447  
1448  /**
1449   * xas_invalid() - Is the xas in a retry or error state?
1450   * @xas: XArray operation state.
1451   *
1452   * Return: %true if the xas cannot be used for operations.
1453   */
xas_invalid(const struct xa_state * xas)1454  static inline bool xas_invalid(const struct xa_state *xas)
1455  {
1456  	return (unsigned long)xas->xa_node & 3;
1457  }
1458  
1459  /**
1460   * xas_valid() - Is the xas a valid cursor into the array?
1461   * @xas: XArray operation state.
1462   *
1463   * Return: %true if the xas can be used for operations.
1464   */
xas_valid(const struct xa_state * xas)1465  static inline bool xas_valid(const struct xa_state *xas)
1466  {
1467  	return !xas_invalid(xas);
1468  }
1469  
1470  /**
1471   * xas_is_node() - Does the xas point to a node?
1472   * @xas: XArray operation state.
1473   *
1474   * Return: %true if the xas currently references a node.
1475   */
xas_is_node(const struct xa_state * xas)1476  static inline bool xas_is_node(const struct xa_state *xas)
1477  {
1478  	return xas_valid(xas) && xas->xa_node;
1479  }
1480  
1481  /* True if the pointer is something other than a node */
xas_not_node(struct xa_node * node)1482  static inline bool xas_not_node(struct xa_node *node)
1483  {
1484  	return ((unsigned long)node & 3) || !node;
1485  }
1486  
1487  /* True if the node represents RESTART or an error */
xas_frozen(struct xa_node * node)1488  static inline bool xas_frozen(struct xa_node *node)
1489  {
1490  	return (unsigned long)node & 2;
1491  }
1492  
1493  /* True if the node represents head-of-tree, RESTART or BOUNDS */
xas_top(struct xa_node * node)1494  static inline bool xas_top(struct xa_node *node)
1495  {
1496  	return node <= XAS_RESTART;
1497  }
1498  
1499  /**
1500   * xas_reset() - Reset an XArray operation state.
1501   * @xas: XArray operation state.
1502   *
1503   * Resets the error or walk state of the @xas so future walks of the
1504   * array will start from the root.  Use this if you have dropped the
1505   * xarray lock and want to reuse the xa_state.
1506   *
1507   * Context: Any context.
1508   */
xas_reset(struct xa_state * xas)1509  static inline void xas_reset(struct xa_state *xas)
1510  {
1511  	xas->xa_node = XAS_RESTART;
1512  }
1513  
1514  /**
1515   * xas_retry() - Retry the operation if appropriate.
1516   * @xas: XArray operation state.
1517   * @entry: Entry from xarray.
1518   *
1519   * The advanced functions may sometimes return an internal entry, such as
1520   * a retry entry or a zero entry.  This function sets up the @xas to restart
1521   * the walk from the head of the array if needed.
1522   *
1523   * Context: Any context.
1524   * Return: true if the operation needs to be retried.
1525   */
xas_retry(struct xa_state * xas,const void * entry)1526  static inline bool xas_retry(struct xa_state *xas, const void *entry)
1527  {
1528  	if (xa_is_zero(entry))
1529  		return true;
1530  	if (!xa_is_retry(entry))
1531  		return false;
1532  	xas_reset(xas);
1533  	return true;
1534  }
1535  
1536  void *xas_load(struct xa_state *);
1537  void *xas_store(struct xa_state *, void *entry);
1538  void *xas_find(struct xa_state *, unsigned long max);
1539  void *xas_find_conflict(struct xa_state *);
1540  
1541  bool xas_get_mark(const struct xa_state *, xa_mark_t);
1542  void xas_set_mark(const struct xa_state *, xa_mark_t);
1543  void xas_clear_mark(const struct xa_state *, xa_mark_t);
1544  void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1545  void xas_init_marks(const struct xa_state *);
1546  
1547  bool xas_nomem(struct xa_state *, gfp_t);
1548  void xas_destroy(struct xa_state *);
1549  void xas_pause(struct xa_state *);
1550  
1551  void xas_create_range(struct xa_state *);
1552  
1553  #ifdef CONFIG_XARRAY_MULTI
1554  int xa_get_order(struct xarray *, unsigned long index);
1555  int xas_get_order(struct xa_state *xas);
1556  void xas_split(struct xa_state *, void *entry, unsigned int order);
1557  void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1558  #else
xa_get_order(struct xarray * xa,unsigned long index)1559  static inline int xa_get_order(struct xarray *xa, unsigned long index)
1560  {
1561  	return 0;
1562  }
1563  
xas_get_order(struct xa_state * xas)1564  static inline int xas_get_order(struct xa_state *xas)
1565  {
1566  	return 0;
1567  }
1568  
xas_split(struct xa_state * xas,void * entry,unsigned int order)1569  static inline void xas_split(struct xa_state *xas, void *entry,
1570  		unsigned int order)
1571  {
1572  	xas_store(xas, entry);
1573  }
1574  
xas_split_alloc(struct xa_state * xas,void * entry,unsigned int order,gfp_t gfp)1575  static inline void xas_split_alloc(struct xa_state *xas, void *entry,
1576  		unsigned int order, gfp_t gfp)
1577  {
1578  }
1579  #endif
1580  
1581  /**
1582   * xas_reload() - Refetch an entry from the xarray.
1583   * @xas: XArray operation state.
1584   *
1585   * Use this function to check that a previously loaded entry still has
1586   * the same value.  This is useful for the lockless pagecache lookup where
1587   * we walk the array with only the RCU lock to protect us, lock the page,
1588   * then check that the page hasn't moved since we looked it up.
1589   *
1590   * The caller guarantees that @xas is still valid.  If it may be in an
1591   * error or restart state, call xas_load() instead.
1592   *
1593   * Return: The entry at this location in the xarray.
1594   */
xas_reload(struct xa_state * xas)1595  static inline void *xas_reload(struct xa_state *xas)
1596  {
1597  	struct xa_node *node = xas->xa_node;
1598  	void *entry;
1599  	char offset;
1600  
1601  	if (!node)
1602  		return xa_head(xas->xa);
1603  	if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
1604  		offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
1605  		entry = xa_entry(xas->xa, node, offset);
1606  		if (!xa_is_sibling(entry))
1607  			return entry;
1608  		offset = xa_to_sibling(entry);
1609  	} else {
1610  		offset = xas->xa_offset;
1611  	}
1612  	return xa_entry(xas->xa, node, offset);
1613  }
1614  
1615  /**
1616   * xas_set() - Set up XArray operation state for a different index.
1617   * @xas: XArray operation state.
1618   * @index: New index into the XArray.
1619   *
1620   * Move the operation state to refer to a different index.  This will
1621   * have the effect of starting a walk from the top; see xas_next()
1622   * to move to an adjacent index.
1623   */
xas_set(struct xa_state * xas,unsigned long index)1624  static inline void xas_set(struct xa_state *xas, unsigned long index)
1625  {
1626  	xas->xa_index = index;
1627  	xas->xa_node = XAS_RESTART;
1628  }
1629  
1630  /**
1631   * xas_advance() - Skip over sibling entries.
1632   * @xas: XArray operation state.
1633   * @index: Index of last sibling entry.
1634   *
1635   * Move the operation state to refer to the last sibling entry.
1636   * This is useful for loops that normally want to see sibling
1637   * entries but sometimes want to skip them.  Use xas_set() if you
1638   * want to move to an index which is not part of this entry.
1639   */
xas_advance(struct xa_state * xas,unsigned long index)1640  static inline void xas_advance(struct xa_state *xas, unsigned long index)
1641  {
1642  	unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
1643  
1644  	xas->xa_index = index;
1645  	xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
1646  }
1647  
1648  /**
1649   * xas_set_order() - Set up XArray operation state for a multislot entry.
1650   * @xas: XArray operation state.
1651   * @index: Target of the operation.
1652   * @order: Entry occupies 2^@order indices.
1653   */
xas_set_order(struct xa_state * xas,unsigned long index,unsigned int order)1654  static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1655  					unsigned int order)
1656  {
1657  #ifdef CONFIG_XARRAY_MULTI
1658  	xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1659  	xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1660  	xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1661  	xas->xa_node = XAS_RESTART;
1662  #else
1663  	BUG_ON(order > 0);
1664  	xas_set(xas, index);
1665  #endif
1666  }
1667  
1668  /**
1669   * xas_set_update() - Set up XArray operation state for a callback.
1670   * @xas: XArray operation state.
1671   * @update: Function to call when updating a node.
1672   *
1673   * The XArray can notify a caller after it has updated an xa_node.
1674   * This is advanced functionality and is only needed by the page
1675   * cache and swap cache.
1676   */
xas_set_update(struct xa_state * xas,xa_update_node_t update)1677  static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1678  {
1679  	xas->xa_update = update;
1680  }
1681  
xas_set_lru(struct xa_state * xas,struct list_lru * lru)1682  static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
1683  {
1684  	xas->xa_lru = lru;
1685  }
1686  
1687  /**
1688   * xas_next_entry() - Advance iterator to next present entry.
1689   * @xas: XArray operation state.
1690   * @max: Highest index to return.
1691   *
1692   * xas_next_entry() is an inline function to optimise xarray traversal for
1693   * speed.  It is equivalent to calling xas_find(), and will call xas_find()
1694   * for all the hard cases.
1695   *
1696   * Return: The next present entry after the one currently referred to by @xas.
1697   */
xas_next_entry(struct xa_state * xas,unsigned long max)1698  static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1699  {
1700  	struct xa_node *node = xas->xa_node;
1701  	void *entry;
1702  
1703  	if (unlikely(xas_not_node(node) || node->shift ||
1704  			xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1705  		return xas_find(xas, max);
1706  
1707  	do {
1708  		if (unlikely(xas->xa_index >= max))
1709  			return xas_find(xas, max);
1710  		if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1711  			return xas_find(xas, max);
1712  		entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1713  		if (unlikely(xa_is_internal(entry)))
1714  			return xas_find(xas, max);
1715  		xas->xa_offset++;
1716  		xas->xa_index++;
1717  	} while (!entry);
1718  
1719  	return entry;
1720  }
1721  
1722  /* Private */
xas_find_chunk(struct xa_state * xas,bool advance,xa_mark_t mark)1723  static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1724  		xa_mark_t mark)
1725  {
1726  	unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1727  	unsigned int offset = xas->xa_offset;
1728  
1729  	if (advance)
1730  		offset++;
1731  	if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1732  		if (offset < XA_CHUNK_SIZE) {
1733  			unsigned long data = *addr & (~0UL << offset);
1734  			if (data)
1735  				return __ffs(data);
1736  		}
1737  		return XA_CHUNK_SIZE;
1738  	}
1739  
1740  	return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1741  }
1742  
1743  /**
1744   * xas_next_marked() - Advance iterator to next marked entry.
1745   * @xas: XArray operation state.
1746   * @max: Highest index to return.
1747   * @mark: Mark to search for.
1748   *
1749   * xas_next_marked() is an inline function to optimise xarray traversal for
1750   * speed.  It is equivalent to calling xas_find_marked(), and will call
1751   * xas_find_marked() for all the hard cases.
1752   *
1753   * Return: The next marked entry after the one currently referred to by @xas.
1754   */
xas_next_marked(struct xa_state * xas,unsigned long max,xa_mark_t mark)1755  static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1756  								xa_mark_t mark)
1757  {
1758  	struct xa_node *node = xas->xa_node;
1759  	void *entry;
1760  	unsigned int offset;
1761  
1762  	if (unlikely(xas_not_node(node) || node->shift))
1763  		return xas_find_marked(xas, max, mark);
1764  	offset = xas_find_chunk(xas, true, mark);
1765  	xas->xa_offset = offset;
1766  	xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1767  	if (xas->xa_index > max)
1768  		return NULL;
1769  	if (offset == XA_CHUNK_SIZE)
1770  		return xas_find_marked(xas, max, mark);
1771  	entry = xa_entry(xas->xa, node, offset);
1772  	if (!entry)
1773  		return xas_find_marked(xas, max, mark);
1774  	return entry;
1775  }
1776  
1777  /*
1778   * If iterating while holding a lock, drop the lock and reschedule
1779   * every %XA_CHECK_SCHED loops.
1780   */
1781  enum {
1782  	XA_CHECK_SCHED = 4096,
1783  };
1784  
1785  /**
1786   * xas_for_each() - Iterate over a range of an XArray.
1787   * @xas: XArray operation state.
1788   * @entry: Entry retrieved from the array.
1789   * @max: Maximum index to retrieve from array.
1790   *
1791   * The loop body will be executed for each entry present in the xarray
1792   * between the current xas position and @max.  @entry will be set to
1793   * the entry retrieved from the xarray.  It is safe to delete entries
1794   * from the array in the loop body.  You should hold either the RCU lock
1795   * or the xa_lock while iterating.  If you need to drop the lock, call
1796   * xas_pause() first.
1797   */
1798  #define xas_for_each(xas, entry, max) \
1799  	for (entry = xas_find(xas, max); entry; \
1800  	     entry = xas_next_entry(xas, max))
1801  
1802  /**
1803   * xas_for_each_marked() - Iterate over a range of an XArray.
1804   * @xas: XArray operation state.
1805   * @entry: Entry retrieved from the array.
1806   * @max: Maximum index to retrieve from array.
1807   * @mark: Mark to search for.
1808   *
1809   * The loop body will be executed for each marked entry in the xarray
1810   * between the current xas position and @max.  @entry will be set to
1811   * the entry retrieved from the xarray.  It is safe to delete entries
1812   * from the array in the loop body.  You should hold either the RCU lock
1813   * or the xa_lock while iterating.  If you need to drop the lock, call
1814   * xas_pause() first.
1815   */
1816  #define xas_for_each_marked(xas, entry, max, mark) \
1817  	for (entry = xas_find_marked(xas, max, mark); entry; \
1818  	     entry = xas_next_marked(xas, max, mark))
1819  
1820  /**
1821   * xas_for_each_conflict() - Iterate over a range of an XArray.
1822   * @xas: XArray operation state.
1823   * @entry: Entry retrieved from the array.
1824   *
1825   * The loop body will be executed for each entry in the XArray that
1826   * lies within the range specified by @xas.  If the loop terminates
1827   * normally, @entry will be %NULL.  The user may break out of the loop,
1828   * which will leave @entry set to the conflicting entry.  The caller
1829   * may also call xa_set_err() to exit the loop while setting an error
1830   * to record the reason.
1831   */
1832  #define xas_for_each_conflict(xas, entry) \
1833  	while ((entry = xas_find_conflict(xas)))
1834  
1835  void *__xas_next(struct xa_state *);
1836  void *__xas_prev(struct xa_state *);
1837  
1838  /**
1839   * xas_prev() - Move iterator to previous index.
1840   * @xas: XArray operation state.
1841   *
1842   * If the @xas was in an error state, it will remain in an error state
1843   * and this function will return %NULL.  If the @xas has never been walked,
1844   * it will have the effect of calling xas_load().  Otherwise one will be
1845   * subtracted from the index and the state will be walked to the correct
1846   * location in the array for the next operation.
1847   *
1848   * If the iterator was referencing index 0, this function wraps
1849   * around to %ULONG_MAX.
1850   *
1851   * Return: The entry at the new index.  This may be %NULL or an internal
1852   * entry.
1853   */
xas_prev(struct xa_state * xas)1854  static inline void *xas_prev(struct xa_state *xas)
1855  {
1856  	struct xa_node *node = xas->xa_node;
1857  
1858  	if (unlikely(xas_not_node(node) || node->shift ||
1859  				xas->xa_offset == 0))
1860  		return __xas_prev(xas);
1861  
1862  	xas->xa_index--;
1863  	xas->xa_offset--;
1864  	return xa_entry(xas->xa, node, xas->xa_offset);
1865  }
1866  
1867  /**
1868   * xas_next() - Move state to next index.
1869   * @xas: XArray operation state.
1870   *
1871   * If the @xas was in an error state, it will remain in an error state
1872   * and this function will return %NULL.  If the @xas has never been walked,
1873   * it will have the effect of calling xas_load().  Otherwise one will be
1874   * added to the index and the state will be walked to the correct
1875   * location in the array for the next operation.
1876   *
1877   * If the iterator was referencing index %ULONG_MAX, this function wraps
1878   * around to 0.
1879   *
1880   * Return: The entry at the new index.  This may be %NULL or an internal
1881   * entry.
1882   */
xas_next(struct xa_state * xas)1883  static inline void *xas_next(struct xa_state *xas)
1884  {
1885  	struct xa_node *node = xas->xa_node;
1886  
1887  	if (unlikely(xas_not_node(node) || node->shift ||
1888  				xas->xa_offset == XA_CHUNK_MASK))
1889  		return __xas_next(xas);
1890  
1891  	xas->xa_index++;
1892  	xas->xa_offset++;
1893  	return xa_entry(xas->xa, node, xas->xa_offset);
1894  }
1895  
1896  #endif /* _LINUX_XARRAY_H */
1897