1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * XArray implementation
4   * Copyright (c) 2017-2018 Microsoft Corporation
5   * Copyright (c) 2018-2020 Oracle
6   * Author: Matthew Wilcox <willy@infradead.org>
7   */
8  
9  #include <linux/bitmap.h>
10  #include <linux/export.h>
11  #include <linux/list.h>
12  #include <linux/slab.h>
13  #include <linux/xarray.h>
14  
15  #include "radix-tree.h"
16  
17  /*
18   * Coding conventions in this file:
19   *
20   * @xa is used to refer to the entire xarray.
21   * @xas is the 'xarray operation state'.  It may be either a pointer to
22   * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
23   * ambiguity.
24   * @index is the index of the entry being operated on
25   * @mark is an xa_mark_t; a small number indicating one of the mark bits.
26   * @node refers to an xa_node; usually the primary one being operated on by
27   * this function.
28   * @offset is the index into the slots array inside an xa_node.
29   * @parent refers to the @xa_node closer to the head than @node.
30   * @entry refers to something stored in a slot in the xarray
31   */
32  
xa_lock_type(const struct xarray * xa)33  static inline unsigned int xa_lock_type(const struct xarray *xa)
34  {
35  	return (__force unsigned int)xa->xa_flags & 3;
36  }
37  
xas_lock_type(struct xa_state * xas,unsigned int lock_type)38  static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
39  {
40  	if (lock_type == XA_LOCK_IRQ)
41  		xas_lock_irq(xas);
42  	else if (lock_type == XA_LOCK_BH)
43  		xas_lock_bh(xas);
44  	else
45  		xas_lock(xas);
46  }
47  
xas_unlock_type(struct xa_state * xas,unsigned int lock_type)48  static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
49  {
50  	if (lock_type == XA_LOCK_IRQ)
51  		xas_unlock_irq(xas);
52  	else if (lock_type == XA_LOCK_BH)
53  		xas_unlock_bh(xas);
54  	else
55  		xas_unlock(xas);
56  }
57  
xa_track_free(const struct xarray * xa)58  static inline bool xa_track_free(const struct xarray *xa)
59  {
60  	return xa->xa_flags & XA_FLAGS_TRACK_FREE;
61  }
62  
xa_zero_busy(const struct xarray * xa)63  static inline bool xa_zero_busy(const struct xarray *xa)
64  {
65  	return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
66  }
67  
xa_mark_set(struct xarray * xa,xa_mark_t mark)68  static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
69  {
70  	if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
71  		xa->xa_flags |= XA_FLAGS_MARK(mark);
72  }
73  
xa_mark_clear(struct xarray * xa,xa_mark_t mark)74  static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
75  {
76  	if (xa->xa_flags & XA_FLAGS_MARK(mark))
77  		xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
78  }
79  
node_marks(struct xa_node * node,xa_mark_t mark)80  static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
81  {
82  	return node->marks[(__force unsigned)mark];
83  }
84  
node_get_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)85  static inline bool node_get_mark(struct xa_node *node,
86  		unsigned int offset, xa_mark_t mark)
87  {
88  	return test_bit(offset, node_marks(node, mark));
89  }
90  
91  /* returns true if the bit was set */
node_set_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)92  static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
93  				xa_mark_t mark)
94  {
95  	return __test_and_set_bit(offset, node_marks(node, mark));
96  }
97  
98  /* returns true if the bit was set */
node_clear_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)99  static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
100  				xa_mark_t mark)
101  {
102  	return __test_and_clear_bit(offset, node_marks(node, mark));
103  }
104  
node_any_mark(struct xa_node * node,xa_mark_t mark)105  static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
106  {
107  	return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
108  }
109  
node_mark_all(struct xa_node * node,xa_mark_t mark)110  static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
111  {
112  	bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
113  }
114  
115  #define mark_inc(mark) do { \
116  	mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
117  } while (0)
118  
119  /*
120   * xas_squash_marks() - Merge all marks to the first entry
121   * @xas: Array operation state.
122   *
123   * Set a mark on the first entry if any entry has it set.  Clear marks on
124   * all sibling entries.
125   */
xas_squash_marks(const struct xa_state * xas)126  static void xas_squash_marks(const struct xa_state *xas)
127  {
128  	unsigned int mark = 0;
129  	unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
130  
131  	if (!xas->xa_sibs)
132  		return;
133  
134  	do {
135  		unsigned long *marks = xas->xa_node->marks[mark];
136  		if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
137  			continue;
138  		__set_bit(xas->xa_offset, marks);
139  		bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
140  	} while (mark++ != (__force unsigned)XA_MARK_MAX);
141  }
142  
143  /* extracts the offset within this node from the index */
get_offset(unsigned long index,struct xa_node * node)144  static unsigned int get_offset(unsigned long index, struct xa_node *node)
145  {
146  	return (index >> node->shift) & XA_CHUNK_MASK;
147  }
148  
xas_set_offset(struct xa_state * xas)149  static void xas_set_offset(struct xa_state *xas)
150  {
151  	xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
152  }
153  
154  /* move the index either forwards (find) or backwards (sibling slot) */
xas_move_index(struct xa_state * xas,unsigned long offset)155  static void xas_move_index(struct xa_state *xas, unsigned long offset)
156  {
157  	unsigned int shift = xas->xa_node->shift;
158  	xas->xa_index &= ~XA_CHUNK_MASK << shift;
159  	xas->xa_index += offset << shift;
160  }
161  
xas_next_offset(struct xa_state * xas)162  static void xas_next_offset(struct xa_state *xas)
163  {
164  	xas->xa_offset++;
165  	xas_move_index(xas, xas->xa_offset);
166  }
167  
set_bounds(struct xa_state * xas)168  static void *set_bounds(struct xa_state *xas)
169  {
170  	xas->xa_node = XAS_BOUNDS;
171  	return NULL;
172  }
173  
174  /*
175   * Starts a walk.  If the @xas is already valid, we assume that it's on
176   * the right path and just return where we've got to.  If we're in an
177   * error state, return NULL.  If the index is outside the current scope
178   * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
179   * set @xas->xa_node to NULL and return the current head of the array.
180   */
xas_start(struct xa_state * xas)181  static void *xas_start(struct xa_state *xas)
182  {
183  	void *entry;
184  
185  	if (xas_valid(xas))
186  		return xas_reload(xas);
187  	if (xas_error(xas))
188  		return NULL;
189  
190  	entry = xa_head(xas->xa);
191  	if (!xa_is_node(entry)) {
192  		if (xas->xa_index)
193  			return set_bounds(xas);
194  	} else {
195  		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
196  			return set_bounds(xas);
197  	}
198  
199  	xas->xa_node = NULL;
200  	return entry;
201  }
202  
xas_descend(struct xa_state * xas,struct xa_node * node)203  static __always_inline void *xas_descend(struct xa_state *xas,
204  					struct xa_node *node)
205  {
206  	unsigned int offset = get_offset(xas->xa_index, node);
207  	void *entry = xa_entry(xas->xa, node, offset);
208  
209  	xas->xa_node = node;
210  	while (xa_is_sibling(entry)) {
211  		offset = xa_to_sibling(entry);
212  		entry = xa_entry(xas->xa, node, offset);
213  		if (node->shift && xa_is_node(entry))
214  			entry = XA_RETRY_ENTRY;
215  	}
216  
217  	xas->xa_offset = offset;
218  	return entry;
219  }
220  
221  /**
222   * xas_load() - Load an entry from the XArray (advanced).
223   * @xas: XArray operation state.
224   *
225   * Usually walks the @xas to the appropriate state to load the entry
226   * stored at xa_index.  However, it will do nothing and return %NULL if
227   * @xas is in an error state.  xas_load() will never expand the tree.
228   *
229   * If the xa_state is set up to operate on a multi-index entry, xas_load()
230   * may return %NULL or an internal entry, even if there are entries
231   * present within the range specified by @xas.
232   *
233   * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
234   * Return: Usually an entry in the XArray, but see description for exceptions.
235   */
xas_load(struct xa_state * xas)236  void *xas_load(struct xa_state *xas)
237  {
238  	void *entry = xas_start(xas);
239  
240  	while (xa_is_node(entry)) {
241  		struct xa_node *node = xa_to_node(entry);
242  
243  		if (xas->xa_shift > node->shift)
244  			break;
245  		entry = xas_descend(xas, node);
246  		if (node->shift == 0)
247  			break;
248  	}
249  	return entry;
250  }
251  EXPORT_SYMBOL_GPL(xas_load);
252  
253  #define XA_RCU_FREE	((struct xarray *)1)
254  
xa_node_free(struct xa_node * node)255  static void xa_node_free(struct xa_node *node)
256  {
257  	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
258  	node->array = XA_RCU_FREE;
259  	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
260  }
261  
262  /*
263   * xas_destroy() - Free any resources allocated during the XArray operation.
264   * @xas: XArray operation state.
265   *
266   * Most users will not need to call this function; it is called for you
267   * by xas_nomem().
268   */
xas_destroy(struct xa_state * xas)269  void xas_destroy(struct xa_state *xas)
270  {
271  	struct xa_node *next, *node = xas->xa_alloc;
272  
273  	while (node) {
274  		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
275  		next = rcu_dereference_raw(node->parent);
276  		radix_tree_node_rcu_free(&node->rcu_head);
277  		xas->xa_alloc = node = next;
278  	}
279  }
280  
281  /**
282   * xas_nomem() - Allocate memory if needed.
283   * @xas: XArray operation state.
284   * @gfp: Memory allocation flags.
285   *
286   * If we need to add new nodes to the XArray, we try to allocate memory
287   * with GFP_NOWAIT while holding the lock, which will usually succeed.
288   * If it fails, @xas is flagged as needing memory to continue.  The caller
289   * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
290   * the caller should retry the operation.
291   *
292   * Forward progress is guaranteed as one node is allocated here and
293   * stored in the xa_state where it will be found by xas_alloc().  More
294   * nodes will likely be found in the slab allocator, but we do not tie
295   * them up here.
296   *
297   * Return: true if memory was needed, and was successfully allocated.
298   */
xas_nomem(struct xa_state * xas,gfp_t gfp)299  bool xas_nomem(struct xa_state *xas, gfp_t gfp)
300  {
301  	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
302  		xas_destroy(xas);
303  		return false;
304  	}
305  	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
306  		gfp |= __GFP_ACCOUNT;
307  	xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
308  	if (!xas->xa_alloc)
309  		return false;
310  	xas->xa_alloc->parent = NULL;
311  	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
312  	xas->xa_node = XAS_RESTART;
313  	return true;
314  }
315  EXPORT_SYMBOL_GPL(xas_nomem);
316  
317  /*
318   * __xas_nomem() - Drop locks and allocate memory if needed.
319   * @xas: XArray operation state.
320   * @gfp: Memory allocation flags.
321   *
322   * Internal variant of xas_nomem().
323   *
324   * Return: true if memory was needed, and was successfully allocated.
325   */
__xas_nomem(struct xa_state * xas,gfp_t gfp)326  static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
327  	__must_hold(xas->xa->xa_lock)
328  {
329  	unsigned int lock_type = xa_lock_type(xas->xa);
330  
331  	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
332  		xas_destroy(xas);
333  		return false;
334  	}
335  	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
336  		gfp |= __GFP_ACCOUNT;
337  	if (gfpflags_allow_blocking(gfp)) {
338  		xas_unlock_type(xas, lock_type);
339  		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
340  		xas_lock_type(xas, lock_type);
341  	} else {
342  		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
343  	}
344  	if (!xas->xa_alloc)
345  		return false;
346  	xas->xa_alloc->parent = NULL;
347  	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
348  	xas->xa_node = XAS_RESTART;
349  	return true;
350  }
351  
xas_update(struct xa_state * xas,struct xa_node * node)352  static void xas_update(struct xa_state *xas, struct xa_node *node)
353  {
354  	if (xas->xa_update)
355  		xas->xa_update(node);
356  	else
357  		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
358  }
359  
xas_alloc(struct xa_state * xas,unsigned int shift)360  static void *xas_alloc(struct xa_state *xas, unsigned int shift)
361  {
362  	struct xa_node *parent = xas->xa_node;
363  	struct xa_node *node = xas->xa_alloc;
364  
365  	if (xas_invalid(xas))
366  		return NULL;
367  
368  	if (node) {
369  		xas->xa_alloc = NULL;
370  	} else {
371  		gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
372  
373  		if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
374  			gfp |= __GFP_ACCOUNT;
375  
376  		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
377  		if (!node) {
378  			xas_set_err(xas, -ENOMEM);
379  			return NULL;
380  		}
381  	}
382  
383  	if (parent) {
384  		node->offset = xas->xa_offset;
385  		parent->count++;
386  		XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
387  		xas_update(xas, parent);
388  	}
389  	XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
390  	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
391  	node->shift = shift;
392  	node->count = 0;
393  	node->nr_values = 0;
394  	RCU_INIT_POINTER(node->parent, xas->xa_node);
395  	node->array = xas->xa;
396  
397  	return node;
398  }
399  
400  #ifdef CONFIG_XARRAY_MULTI
401  /* Returns the number of indices covered by a given xa_state */
xas_size(const struct xa_state * xas)402  static unsigned long xas_size(const struct xa_state *xas)
403  {
404  	return (xas->xa_sibs + 1UL) << xas->xa_shift;
405  }
406  #endif
407  
408  /*
409   * Use this to calculate the maximum index that will need to be created
410   * in order to add the entry described by @xas.  Because we cannot store a
411   * multi-index entry at index 0, the calculation is a little more complex
412   * than you might expect.
413   */
xas_max(struct xa_state * xas)414  static unsigned long xas_max(struct xa_state *xas)
415  {
416  	unsigned long max = xas->xa_index;
417  
418  #ifdef CONFIG_XARRAY_MULTI
419  	if (xas->xa_shift || xas->xa_sibs) {
420  		unsigned long mask = xas_size(xas) - 1;
421  		max |= mask;
422  		if (mask == max)
423  			max++;
424  	}
425  #endif
426  
427  	return max;
428  }
429  
430  /* The maximum index that can be contained in the array without expanding it */
max_index(void * entry)431  static unsigned long max_index(void *entry)
432  {
433  	if (!xa_is_node(entry))
434  		return 0;
435  	return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
436  }
437  
xas_shrink(struct xa_state * xas)438  static void xas_shrink(struct xa_state *xas)
439  {
440  	struct xarray *xa = xas->xa;
441  	struct xa_node *node = xas->xa_node;
442  
443  	for (;;) {
444  		void *entry;
445  
446  		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
447  		if (node->count != 1)
448  			break;
449  		entry = xa_entry_locked(xa, node, 0);
450  		if (!entry)
451  			break;
452  		if (!xa_is_node(entry) && node->shift)
453  			break;
454  		if (xa_is_zero(entry) && xa_zero_busy(xa))
455  			entry = NULL;
456  		xas->xa_node = XAS_BOUNDS;
457  
458  		RCU_INIT_POINTER(xa->xa_head, entry);
459  		if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
460  			xa_mark_clear(xa, XA_FREE_MARK);
461  
462  		node->count = 0;
463  		node->nr_values = 0;
464  		if (!xa_is_node(entry))
465  			RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
466  		xas_update(xas, node);
467  		xa_node_free(node);
468  		if (!xa_is_node(entry))
469  			break;
470  		node = xa_to_node(entry);
471  		node->parent = NULL;
472  	}
473  }
474  
475  /*
476   * xas_delete_node() - Attempt to delete an xa_node
477   * @xas: Array operation state.
478   *
479   * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
480   * a non-zero reference count.
481   */
xas_delete_node(struct xa_state * xas)482  static void xas_delete_node(struct xa_state *xas)
483  {
484  	struct xa_node *node = xas->xa_node;
485  
486  	for (;;) {
487  		struct xa_node *parent;
488  
489  		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
490  		if (node->count)
491  			break;
492  
493  		parent = xa_parent_locked(xas->xa, node);
494  		xas->xa_node = parent;
495  		xas->xa_offset = node->offset;
496  		xa_node_free(node);
497  
498  		if (!parent) {
499  			xas->xa->xa_head = NULL;
500  			xas->xa_node = XAS_BOUNDS;
501  			return;
502  		}
503  
504  		parent->slots[xas->xa_offset] = NULL;
505  		parent->count--;
506  		XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
507  		node = parent;
508  		xas_update(xas, node);
509  	}
510  
511  	if (!node->parent)
512  		xas_shrink(xas);
513  }
514  
515  /**
516   * xas_free_nodes() - Free this node and all nodes that it references
517   * @xas: Array operation state.
518   * @top: Node to free
519   *
520   * This node has been removed from the tree.  We must now free it and all
521   * of its subnodes.  There may be RCU walkers with references into the tree,
522   * so we must replace all entries with retry markers.
523   */
xas_free_nodes(struct xa_state * xas,struct xa_node * top)524  static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
525  {
526  	unsigned int offset = 0;
527  	struct xa_node *node = top;
528  
529  	for (;;) {
530  		void *entry = xa_entry_locked(xas->xa, node, offset);
531  
532  		if (node->shift && xa_is_node(entry)) {
533  			node = xa_to_node(entry);
534  			offset = 0;
535  			continue;
536  		}
537  		if (entry)
538  			RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
539  		offset++;
540  		while (offset == XA_CHUNK_SIZE) {
541  			struct xa_node *parent;
542  
543  			parent = xa_parent_locked(xas->xa, node);
544  			offset = node->offset + 1;
545  			node->count = 0;
546  			node->nr_values = 0;
547  			xas_update(xas, node);
548  			xa_node_free(node);
549  			if (node == top)
550  				return;
551  			node = parent;
552  		}
553  	}
554  }
555  
556  /*
557   * xas_expand adds nodes to the head of the tree until it has reached
558   * sufficient height to be able to contain @xas->xa_index
559   */
xas_expand(struct xa_state * xas,void * head)560  static int xas_expand(struct xa_state *xas, void *head)
561  {
562  	struct xarray *xa = xas->xa;
563  	struct xa_node *node = NULL;
564  	unsigned int shift = 0;
565  	unsigned long max = xas_max(xas);
566  
567  	if (!head) {
568  		if (max == 0)
569  			return 0;
570  		while ((max >> shift) >= XA_CHUNK_SIZE)
571  			shift += XA_CHUNK_SHIFT;
572  		return shift + XA_CHUNK_SHIFT;
573  	} else if (xa_is_node(head)) {
574  		node = xa_to_node(head);
575  		shift = node->shift + XA_CHUNK_SHIFT;
576  	}
577  	xas->xa_node = NULL;
578  
579  	while (max > max_index(head)) {
580  		xa_mark_t mark = 0;
581  
582  		XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
583  		node = xas_alloc(xas, shift);
584  		if (!node)
585  			return -ENOMEM;
586  
587  		node->count = 1;
588  		if (xa_is_value(head))
589  			node->nr_values = 1;
590  		RCU_INIT_POINTER(node->slots[0], head);
591  
592  		/* Propagate the aggregated mark info to the new child */
593  		for (;;) {
594  			if (xa_track_free(xa) && mark == XA_FREE_MARK) {
595  				node_mark_all(node, XA_FREE_MARK);
596  				if (!xa_marked(xa, XA_FREE_MARK)) {
597  					node_clear_mark(node, 0, XA_FREE_MARK);
598  					xa_mark_set(xa, XA_FREE_MARK);
599  				}
600  			} else if (xa_marked(xa, mark)) {
601  				node_set_mark(node, 0, mark);
602  			}
603  			if (mark == XA_MARK_MAX)
604  				break;
605  			mark_inc(mark);
606  		}
607  
608  		/*
609  		 * Now that the new node is fully initialised, we can add
610  		 * it to the tree
611  		 */
612  		if (xa_is_node(head)) {
613  			xa_to_node(head)->offset = 0;
614  			rcu_assign_pointer(xa_to_node(head)->parent, node);
615  		}
616  		head = xa_mk_node(node);
617  		rcu_assign_pointer(xa->xa_head, head);
618  		xas_update(xas, node);
619  
620  		shift += XA_CHUNK_SHIFT;
621  	}
622  
623  	xas->xa_node = node;
624  	return shift;
625  }
626  
627  /*
628   * xas_create() - Create a slot to store an entry in.
629   * @xas: XArray operation state.
630   * @allow_root: %true if we can store the entry in the root directly
631   *
632   * Most users will not need to call this function directly, as it is called
633   * by xas_store().  It is useful for doing conditional store operations
634   * (see the xa_cmpxchg() implementation for an example).
635   *
636   * Return: If the slot already existed, returns the contents of this slot.
637   * If the slot was newly created, returns %NULL.  If it failed to create the
638   * slot, returns %NULL and indicates the error in @xas.
639   */
xas_create(struct xa_state * xas,bool allow_root)640  static void *xas_create(struct xa_state *xas, bool allow_root)
641  {
642  	struct xarray *xa = xas->xa;
643  	void *entry;
644  	void __rcu **slot;
645  	struct xa_node *node = xas->xa_node;
646  	int shift;
647  	unsigned int order = xas->xa_shift;
648  
649  	if (xas_top(node)) {
650  		entry = xa_head_locked(xa);
651  		xas->xa_node = NULL;
652  		if (!entry && xa_zero_busy(xa))
653  			entry = XA_ZERO_ENTRY;
654  		shift = xas_expand(xas, entry);
655  		if (shift < 0)
656  			return NULL;
657  		if (!shift && !allow_root)
658  			shift = XA_CHUNK_SHIFT;
659  		entry = xa_head_locked(xa);
660  		slot = &xa->xa_head;
661  	} else if (xas_error(xas)) {
662  		return NULL;
663  	} else if (node) {
664  		unsigned int offset = xas->xa_offset;
665  
666  		shift = node->shift;
667  		entry = xa_entry_locked(xa, node, offset);
668  		slot = &node->slots[offset];
669  	} else {
670  		shift = 0;
671  		entry = xa_head_locked(xa);
672  		slot = &xa->xa_head;
673  	}
674  
675  	while (shift > order) {
676  		shift -= XA_CHUNK_SHIFT;
677  		if (!entry) {
678  			node = xas_alloc(xas, shift);
679  			if (!node)
680  				break;
681  			if (xa_track_free(xa))
682  				node_mark_all(node, XA_FREE_MARK);
683  			rcu_assign_pointer(*slot, xa_mk_node(node));
684  		} else if (xa_is_node(entry)) {
685  			node = xa_to_node(entry);
686  		} else {
687  			break;
688  		}
689  		entry = xas_descend(xas, node);
690  		slot = &node->slots[xas->xa_offset];
691  	}
692  
693  	return entry;
694  }
695  
696  /**
697   * xas_create_range() - Ensure that stores to this range will succeed
698   * @xas: XArray operation state.
699   *
700   * Creates all of the slots in the range covered by @xas.  Sets @xas to
701   * create single-index entries and positions it at the beginning of the
702   * range.  This is for the benefit of users which have not yet been
703   * converted to use multi-index entries.
704   */
xas_create_range(struct xa_state * xas)705  void xas_create_range(struct xa_state *xas)
706  {
707  	unsigned long index = xas->xa_index;
708  	unsigned char shift = xas->xa_shift;
709  	unsigned char sibs = xas->xa_sibs;
710  
711  	xas->xa_index |= ((sibs + 1UL) << shift) - 1;
712  	if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
713  		xas->xa_offset |= sibs;
714  	xas->xa_shift = 0;
715  	xas->xa_sibs = 0;
716  
717  	for (;;) {
718  		xas_create(xas, true);
719  		if (xas_error(xas))
720  			goto restore;
721  		if (xas->xa_index <= (index | XA_CHUNK_MASK))
722  			goto success;
723  		xas->xa_index -= XA_CHUNK_SIZE;
724  
725  		for (;;) {
726  			struct xa_node *node = xas->xa_node;
727  			if (node->shift >= shift)
728  				break;
729  			xas->xa_node = xa_parent_locked(xas->xa, node);
730  			xas->xa_offset = node->offset - 1;
731  			if (node->offset != 0)
732  				break;
733  		}
734  	}
735  
736  restore:
737  	xas->xa_shift = shift;
738  	xas->xa_sibs = sibs;
739  	xas->xa_index = index;
740  	return;
741  success:
742  	xas->xa_index = index;
743  	if (xas->xa_node)
744  		xas_set_offset(xas);
745  }
746  EXPORT_SYMBOL_GPL(xas_create_range);
747  
update_node(struct xa_state * xas,struct xa_node * node,int count,int values)748  static void update_node(struct xa_state *xas, struct xa_node *node,
749  		int count, int values)
750  {
751  	if (!node || (!count && !values))
752  		return;
753  
754  	node->count += count;
755  	node->nr_values += values;
756  	XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
757  	XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
758  	xas_update(xas, node);
759  	if (count < 0)
760  		xas_delete_node(xas);
761  }
762  
763  /**
764   * xas_store() - Store this entry in the XArray.
765   * @xas: XArray operation state.
766   * @entry: New entry.
767   *
768   * If @xas is operating on a multi-index entry, the entry returned by this
769   * function is essentially meaningless (it may be an internal entry or it
770   * may be %NULL, even if there are non-NULL entries at some of the indices
771   * covered by the range).  This is not a problem for any current users,
772   * and can be changed if needed.
773   *
774   * Return: The old entry at this index.
775   */
xas_store(struct xa_state * xas,void * entry)776  void *xas_store(struct xa_state *xas, void *entry)
777  {
778  	struct xa_node *node;
779  	void __rcu **slot = &xas->xa->xa_head;
780  	unsigned int offset, max;
781  	int count = 0;
782  	int values = 0;
783  	void *first, *next;
784  	bool value = xa_is_value(entry);
785  
786  	if (entry) {
787  		bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
788  		first = xas_create(xas, allow_root);
789  	} else {
790  		first = xas_load(xas);
791  	}
792  
793  	if (xas_invalid(xas))
794  		return first;
795  	node = xas->xa_node;
796  	if (node && (xas->xa_shift < node->shift))
797  		xas->xa_sibs = 0;
798  	if ((first == entry) && !xas->xa_sibs)
799  		return first;
800  
801  	next = first;
802  	offset = xas->xa_offset;
803  	max = xas->xa_offset + xas->xa_sibs;
804  	if (node) {
805  		slot = &node->slots[offset];
806  		if (xas->xa_sibs)
807  			xas_squash_marks(xas);
808  	}
809  	if (!entry)
810  		xas_init_marks(xas);
811  
812  	for (;;) {
813  		/*
814  		 * Must clear the marks before setting the entry to NULL,
815  		 * otherwise xas_for_each_marked may find a NULL entry and
816  		 * stop early.  rcu_assign_pointer contains a release barrier
817  		 * so the mark clearing will appear to happen before the
818  		 * entry is set to NULL.
819  		 */
820  		rcu_assign_pointer(*slot, entry);
821  		if (xa_is_node(next) && (!node || node->shift))
822  			xas_free_nodes(xas, xa_to_node(next));
823  		if (!node)
824  			break;
825  		count += !next - !entry;
826  		values += !xa_is_value(first) - !value;
827  		if (entry) {
828  			if (offset == max)
829  				break;
830  			if (!xa_is_sibling(entry))
831  				entry = xa_mk_sibling(xas->xa_offset);
832  		} else {
833  			if (offset == XA_CHUNK_MASK)
834  				break;
835  		}
836  		next = xa_entry_locked(xas->xa, node, ++offset);
837  		if (!xa_is_sibling(next)) {
838  			if (!entry && (offset > max))
839  				break;
840  			first = next;
841  		}
842  		slot++;
843  	}
844  
845  	update_node(xas, node, count, values);
846  	return first;
847  }
848  EXPORT_SYMBOL_GPL(xas_store);
849  
850  /**
851   * xas_get_mark() - Returns the state of this mark.
852   * @xas: XArray operation state.
853   * @mark: Mark number.
854   *
855   * Return: true if the mark is set, false if the mark is clear or @xas
856   * is in an error state.
857   */
xas_get_mark(const struct xa_state * xas,xa_mark_t mark)858  bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
859  {
860  	if (xas_invalid(xas))
861  		return false;
862  	if (!xas->xa_node)
863  		return xa_marked(xas->xa, mark);
864  	return node_get_mark(xas->xa_node, xas->xa_offset, mark);
865  }
866  EXPORT_SYMBOL_GPL(xas_get_mark);
867  
868  /**
869   * xas_set_mark() - Sets the mark on this entry and its parents.
870   * @xas: XArray operation state.
871   * @mark: Mark number.
872   *
873   * Sets the specified mark on this entry, and walks up the tree setting it
874   * on all the ancestor entries.  Does nothing if @xas has not been walked to
875   * an entry, or is in an error state.
876   */
xas_set_mark(const struct xa_state * xas,xa_mark_t mark)877  void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
878  {
879  	struct xa_node *node = xas->xa_node;
880  	unsigned int offset = xas->xa_offset;
881  
882  	if (xas_invalid(xas))
883  		return;
884  
885  	while (node) {
886  		if (node_set_mark(node, offset, mark))
887  			return;
888  		offset = node->offset;
889  		node = xa_parent_locked(xas->xa, node);
890  	}
891  
892  	if (!xa_marked(xas->xa, mark))
893  		xa_mark_set(xas->xa, mark);
894  }
895  EXPORT_SYMBOL_GPL(xas_set_mark);
896  
897  /**
898   * xas_clear_mark() - Clears the mark on this entry and its parents.
899   * @xas: XArray operation state.
900   * @mark: Mark number.
901   *
902   * Clears the specified mark on this entry, and walks back to the head
903   * attempting to clear it on all the ancestor entries.  Does nothing if
904   * @xas has not been walked to an entry, or is in an error state.
905   */
xas_clear_mark(const struct xa_state * xas,xa_mark_t mark)906  void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
907  {
908  	struct xa_node *node = xas->xa_node;
909  	unsigned int offset = xas->xa_offset;
910  
911  	if (xas_invalid(xas))
912  		return;
913  
914  	while (node) {
915  		if (!node_clear_mark(node, offset, mark))
916  			return;
917  		if (node_any_mark(node, mark))
918  			return;
919  
920  		offset = node->offset;
921  		node = xa_parent_locked(xas->xa, node);
922  	}
923  
924  	if (xa_marked(xas->xa, mark))
925  		xa_mark_clear(xas->xa, mark);
926  }
927  EXPORT_SYMBOL_GPL(xas_clear_mark);
928  
929  /**
930   * xas_init_marks() - Initialise all marks for the entry
931   * @xas: Array operations state.
932   *
933   * Initialise all marks for the entry specified by @xas.  If we're tracking
934   * free entries with a mark, we need to set it on all entries.  All other
935   * marks are cleared.
936   *
937   * This implementation is not as efficient as it could be; we may walk
938   * up the tree multiple times.
939   */
xas_init_marks(const struct xa_state * xas)940  void xas_init_marks(const struct xa_state *xas)
941  {
942  	xa_mark_t mark = 0;
943  
944  	for (;;) {
945  		if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
946  			xas_set_mark(xas, mark);
947  		else
948  			xas_clear_mark(xas, mark);
949  		if (mark == XA_MARK_MAX)
950  			break;
951  		mark_inc(mark);
952  	}
953  }
954  EXPORT_SYMBOL_GPL(xas_init_marks);
955  
956  #ifdef CONFIG_XARRAY_MULTI
node_get_marks(struct xa_node * node,unsigned int offset)957  static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
958  {
959  	unsigned int marks = 0;
960  	xa_mark_t mark = XA_MARK_0;
961  
962  	for (;;) {
963  		if (node_get_mark(node, offset, mark))
964  			marks |= 1 << (__force unsigned int)mark;
965  		if (mark == XA_MARK_MAX)
966  			break;
967  		mark_inc(mark);
968  	}
969  
970  	return marks;
971  }
972  
node_mark_slots(struct xa_node * node,unsigned int sibs,xa_mark_t mark)973  static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
974  		xa_mark_t mark)
975  {
976  	int i;
977  
978  	if (sibs == 0)
979  		node_mark_all(node, mark);
980  	else {
981  		for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
982  			node_set_mark(node, i, mark);
983  	}
984  }
985  
node_set_marks(struct xa_node * node,unsigned int offset,struct xa_node * child,unsigned int sibs,unsigned int marks)986  static void node_set_marks(struct xa_node *node, unsigned int offset,
987  			struct xa_node *child, unsigned int sibs,
988  			unsigned int marks)
989  {
990  	xa_mark_t mark = XA_MARK_0;
991  
992  	for (;;) {
993  		if (marks & (1 << (__force unsigned int)mark)) {
994  			node_set_mark(node, offset, mark);
995  			if (child)
996  				node_mark_slots(child, sibs, mark);
997  		}
998  		if (mark == XA_MARK_MAX)
999  			break;
1000  		mark_inc(mark);
1001  	}
1002  }
1003  
1004  /**
1005   * xas_split_alloc() - Allocate memory for splitting an entry.
1006   * @xas: XArray operation state.
1007   * @entry: New entry which will be stored in the array.
1008   * @order: Current entry order.
1009   * @gfp: Memory allocation flags.
1010   *
1011   * This function should be called before calling xas_split().
1012   * If necessary, it will allocate new nodes (and fill them with @entry)
1013   * to prepare for the upcoming split of an entry of @order size into
1014   * entries of the order stored in the @xas.
1015   *
1016   * Context: May sleep if @gfp flags permit.
1017   */
xas_split_alloc(struct xa_state * xas,void * entry,unsigned int order,gfp_t gfp)1018  void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1019  		gfp_t gfp)
1020  {
1021  	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1022  	unsigned int mask = xas->xa_sibs;
1023  
1024  	/* XXX: no support for splitting really large entries yet */
1025  	if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1026  		goto nomem;
1027  	if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1028  		return;
1029  
1030  	do {
1031  		unsigned int i;
1032  		void *sibling = NULL;
1033  		struct xa_node *node;
1034  
1035  		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1036  		if (!node)
1037  			goto nomem;
1038  		node->array = xas->xa;
1039  		for (i = 0; i < XA_CHUNK_SIZE; i++) {
1040  			if ((i & mask) == 0) {
1041  				RCU_INIT_POINTER(node->slots[i], entry);
1042  				sibling = xa_mk_sibling(i);
1043  			} else {
1044  				RCU_INIT_POINTER(node->slots[i], sibling);
1045  			}
1046  		}
1047  		RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1048  		xas->xa_alloc = node;
1049  	} while (sibs-- > 0);
1050  
1051  	return;
1052  nomem:
1053  	xas_destroy(xas);
1054  	xas_set_err(xas, -ENOMEM);
1055  }
1056  EXPORT_SYMBOL_GPL(xas_split_alloc);
1057  
1058  /**
1059   * xas_split() - Split a multi-index entry into smaller entries.
1060   * @xas: XArray operation state.
1061   * @entry: New entry to store in the array.
1062   * @order: Current entry order.
1063   *
1064   * The size of the new entries is set in @xas.  The value in @entry is
1065   * copied to all the replacement entries.
1066   *
1067   * Context: Any context.  The caller should hold the xa_lock.
1068   */
xas_split(struct xa_state * xas,void * entry,unsigned int order)1069  void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1070  {
1071  	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1072  	unsigned int offset, marks;
1073  	struct xa_node *node;
1074  	void *curr = xas_load(xas);
1075  	int values = 0;
1076  
1077  	node = xas->xa_node;
1078  	if (xas_top(node))
1079  		return;
1080  
1081  	marks = node_get_marks(node, xas->xa_offset);
1082  
1083  	offset = xas->xa_offset + sibs;
1084  	do {
1085  		if (xas->xa_shift < node->shift) {
1086  			struct xa_node *child = xas->xa_alloc;
1087  
1088  			xas->xa_alloc = rcu_dereference_raw(child->parent);
1089  			child->shift = node->shift - XA_CHUNK_SHIFT;
1090  			child->offset = offset;
1091  			child->count = XA_CHUNK_SIZE;
1092  			child->nr_values = xa_is_value(entry) ?
1093  					XA_CHUNK_SIZE : 0;
1094  			RCU_INIT_POINTER(child->parent, node);
1095  			node_set_marks(node, offset, child, xas->xa_sibs,
1096  					marks);
1097  			rcu_assign_pointer(node->slots[offset],
1098  					xa_mk_node(child));
1099  			if (xa_is_value(curr))
1100  				values--;
1101  			xas_update(xas, child);
1102  		} else {
1103  			unsigned int canon = offset - xas->xa_sibs;
1104  
1105  			node_set_marks(node, canon, NULL, 0, marks);
1106  			rcu_assign_pointer(node->slots[canon], entry);
1107  			while (offset > canon)
1108  				rcu_assign_pointer(node->slots[offset--],
1109  						xa_mk_sibling(canon));
1110  			values += (xa_is_value(entry) - xa_is_value(curr)) *
1111  					(xas->xa_sibs + 1);
1112  		}
1113  	} while (offset-- > xas->xa_offset);
1114  
1115  	node->nr_values += values;
1116  	xas_update(xas, node);
1117  }
1118  EXPORT_SYMBOL_GPL(xas_split);
1119  #endif
1120  
1121  /**
1122   * xas_pause() - Pause a walk to drop a lock.
1123   * @xas: XArray operation state.
1124   *
1125   * Some users need to pause a walk and drop the lock they're holding in
1126   * order to yield to a higher priority thread or carry out an operation
1127   * on an entry.  Those users should call this function before they drop
1128   * the lock.  It resets the @xas to be suitable for the next iteration
1129   * of the loop after the user has reacquired the lock.  If most entries
1130   * found during a walk require you to call xas_pause(), the xa_for_each()
1131   * iterator may be more appropriate.
1132   *
1133   * Note that xas_pause() only works for forward iteration.  If a user needs
1134   * to pause a reverse iteration, we will need a xas_pause_rev().
1135   */
xas_pause(struct xa_state * xas)1136  void xas_pause(struct xa_state *xas)
1137  {
1138  	struct xa_node *node = xas->xa_node;
1139  
1140  	if (xas_invalid(xas))
1141  		return;
1142  
1143  	xas->xa_node = XAS_RESTART;
1144  	if (node) {
1145  		unsigned long offset = xas->xa_offset;
1146  		while (++offset < XA_CHUNK_SIZE) {
1147  			if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1148  				break;
1149  		}
1150  		xas->xa_index += (offset - xas->xa_offset) << node->shift;
1151  		if (xas->xa_index == 0)
1152  			xas->xa_node = XAS_BOUNDS;
1153  	} else {
1154  		xas->xa_index++;
1155  	}
1156  }
1157  EXPORT_SYMBOL_GPL(xas_pause);
1158  
1159  /*
1160   * __xas_prev() - Find the previous entry in the XArray.
1161   * @xas: XArray operation state.
1162   *
1163   * Helper function for xas_prev() which handles all the complex cases
1164   * out of line.
1165   */
__xas_prev(struct xa_state * xas)1166  void *__xas_prev(struct xa_state *xas)
1167  {
1168  	void *entry;
1169  
1170  	if (!xas_frozen(xas->xa_node))
1171  		xas->xa_index--;
1172  	if (!xas->xa_node)
1173  		return set_bounds(xas);
1174  	if (xas_not_node(xas->xa_node))
1175  		return xas_load(xas);
1176  
1177  	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1178  		xas->xa_offset--;
1179  
1180  	while (xas->xa_offset == 255) {
1181  		xas->xa_offset = xas->xa_node->offset - 1;
1182  		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1183  		if (!xas->xa_node)
1184  			return set_bounds(xas);
1185  	}
1186  
1187  	for (;;) {
1188  		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1189  		if (!xa_is_node(entry))
1190  			return entry;
1191  
1192  		xas->xa_node = xa_to_node(entry);
1193  		xas_set_offset(xas);
1194  	}
1195  }
1196  EXPORT_SYMBOL_GPL(__xas_prev);
1197  
1198  /*
1199   * __xas_next() - Find the next entry in the XArray.
1200   * @xas: XArray operation state.
1201   *
1202   * Helper function for xas_next() which handles all the complex cases
1203   * out of line.
1204   */
__xas_next(struct xa_state * xas)1205  void *__xas_next(struct xa_state *xas)
1206  {
1207  	void *entry;
1208  
1209  	if (!xas_frozen(xas->xa_node))
1210  		xas->xa_index++;
1211  	if (!xas->xa_node)
1212  		return set_bounds(xas);
1213  	if (xas_not_node(xas->xa_node))
1214  		return xas_load(xas);
1215  
1216  	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1217  		xas->xa_offset++;
1218  
1219  	while (xas->xa_offset == XA_CHUNK_SIZE) {
1220  		xas->xa_offset = xas->xa_node->offset + 1;
1221  		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1222  		if (!xas->xa_node)
1223  			return set_bounds(xas);
1224  	}
1225  
1226  	for (;;) {
1227  		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1228  		if (!xa_is_node(entry))
1229  			return entry;
1230  
1231  		xas->xa_node = xa_to_node(entry);
1232  		xas_set_offset(xas);
1233  	}
1234  }
1235  EXPORT_SYMBOL_GPL(__xas_next);
1236  
1237  /**
1238   * xas_find() - Find the next present entry in the XArray.
1239   * @xas: XArray operation state.
1240   * @max: Highest index to return.
1241   *
1242   * If the @xas has not yet been walked to an entry, return the entry
1243   * which has an index >= xas.xa_index.  If it has been walked, the entry
1244   * currently being pointed at has been processed, and so we move to the
1245   * next entry.
1246   *
1247   * If no entry is found and the array is smaller than @max, the iterator
1248   * is set to the smallest index not yet in the array.  This allows @xas
1249   * to be immediately passed to xas_store().
1250   *
1251   * Return: The entry, if found, otherwise %NULL.
1252   */
xas_find(struct xa_state * xas,unsigned long max)1253  void *xas_find(struct xa_state *xas, unsigned long max)
1254  {
1255  	void *entry;
1256  
1257  	if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1258  		return NULL;
1259  	if (xas->xa_index > max)
1260  		return set_bounds(xas);
1261  
1262  	if (!xas->xa_node) {
1263  		xas->xa_index = 1;
1264  		return set_bounds(xas);
1265  	} else if (xas->xa_node == XAS_RESTART) {
1266  		entry = xas_load(xas);
1267  		if (entry || xas_not_node(xas->xa_node))
1268  			return entry;
1269  	} else if (!xas->xa_node->shift &&
1270  		    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1271  		xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1272  	}
1273  
1274  	xas_next_offset(xas);
1275  
1276  	while (xas->xa_node && (xas->xa_index <= max)) {
1277  		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1278  			xas->xa_offset = xas->xa_node->offset + 1;
1279  			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1280  			continue;
1281  		}
1282  
1283  		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1284  		if (xa_is_node(entry)) {
1285  			xas->xa_node = xa_to_node(entry);
1286  			xas->xa_offset = 0;
1287  			continue;
1288  		}
1289  		if (entry && !xa_is_sibling(entry))
1290  			return entry;
1291  
1292  		xas_next_offset(xas);
1293  	}
1294  
1295  	if (!xas->xa_node)
1296  		xas->xa_node = XAS_BOUNDS;
1297  	return NULL;
1298  }
1299  EXPORT_SYMBOL_GPL(xas_find);
1300  
1301  /**
1302   * xas_find_marked() - Find the next marked entry in the XArray.
1303   * @xas: XArray operation state.
1304   * @max: Highest index to return.
1305   * @mark: Mark number to search for.
1306   *
1307   * If the @xas has not yet been walked to an entry, return the marked entry
1308   * which has an index >= xas.xa_index.  If it has been walked, the entry
1309   * currently being pointed at has been processed, and so we return the
1310   * first marked entry with an index > xas.xa_index.
1311   *
1312   * If no marked entry is found and the array is smaller than @max, @xas is
1313   * set to the bounds state and xas->xa_index is set to the smallest index
1314   * not yet in the array.  This allows @xas to be immediately passed to
1315   * xas_store().
1316   *
1317   * If no entry is found before @max is reached, @xas is set to the restart
1318   * state.
1319   *
1320   * Return: The entry, if found, otherwise %NULL.
1321   */
xas_find_marked(struct xa_state * xas,unsigned long max,xa_mark_t mark)1322  void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1323  {
1324  	bool advance = true;
1325  	unsigned int offset;
1326  	void *entry;
1327  
1328  	if (xas_error(xas))
1329  		return NULL;
1330  	if (xas->xa_index > max)
1331  		goto max;
1332  
1333  	if (!xas->xa_node) {
1334  		xas->xa_index = 1;
1335  		goto out;
1336  	} else if (xas_top(xas->xa_node)) {
1337  		advance = false;
1338  		entry = xa_head(xas->xa);
1339  		xas->xa_node = NULL;
1340  		if (xas->xa_index > max_index(entry))
1341  			goto out;
1342  		if (!xa_is_node(entry)) {
1343  			if (xa_marked(xas->xa, mark))
1344  				return entry;
1345  			xas->xa_index = 1;
1346  			goto out;
1347  		}
1348  		xas->xa_node = xa_to_node(entry);
1349  		xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1350  	}
1351  
1352  	while (xas->xa_index <= max) {
1353  		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1354  			xas->xa_offset = xas->xa_node->offset + 1;
1355  			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1356  			if (!xas->xa_node)
1357  				break;
1358  			advance = false;
1359  			continue;
1360  		}
1361  
1362  		if (!advance) {
1363  			entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1364  			if (xa_is_sibling(entry)) {
1365  				xas->xa_offset = xa_to_sibling(entry);
1366  				xas_move_index(xas, xas->xa_offset);
1367  			}
1368  		}
1369  
1370  		offset = xas_find_chunk(xas, advance, mark);
1371  		if (offset > xas->xa_offset) {
1372  			advance = false;
1373  			xas_move_index(xas, offset);
1374  			/* Mind the wrap */
1375  			if ((xas->xa_index - 1) >= max)
1376  				goto max;
1377  			xas->xa_offset = offset;
1378  			if (offset == XA_CHUNK_SIZE)
1379  				continue;
1380  		}
1381  
1382  		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1383  		if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1384  			continue;
1385  		if (!xa_is_node(entry))
1386  			return entry;
1387  		xas->xa_node = xa_to_node(entry);
1388  		xas_set_offset(xas);
1389  	}
1390  
1391  out:
1392  	if (xas->xa_index > max)
1393  		goto max;
1394  	return set_bounds(xas);
1395  max:
1396  	xas->xa_node = XAS_RESTART;
1397  	return NULL;
1398  }
1399  EXPORT_SYMBOL_GPL(xas_find_marked);
1400  
1401  /**
1402   * xas_find_conflict() - Find the next present entry in a range.
1403   * @xas: XArray operation state.
1404   *
1405   * The @xas describes both a range and a position within that range.
1406   *
1407   * Context: Any context.  Expects xa_lock to be held.
1408   * Return: The next entry in the range covered by @xas or %NULL.
1409   */
xas_find_conflict(struct xa_state * xas)1410  void *xas_find_conflict(struct xa_state *xas)
1411  {
1412  	void *curr;
1413  
1414  	if (xas_error(xas))
1415  		return NULL;
1416  
1417  	if (!xas->xa_node)
1418  		return NULL;
1419  
1420  	if (xas_top(xas->xa_node)) {
1421  		curr = xas_start(xas);
1422  		if (!curr)
1423  			return NULL;
1424  		while (xa_is_node(curr)) {
1425  			struct xa_node *node = xa_to_node(curr);
1426  			curr = xas_descend(xas, node);
1427  		}
1428  		if (curr)
1429  			return curr;
1430  	}
1431  
1432  	if (xas->xa_node->shift > xas->xa_shift)
1433  		return NULL;
1434  
1435  	for (;;) {
1436  		if (xas->xa_node->shift == xas->xa_shift) {
1437  			if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1438  				break;
1439  		} else if (xas->xa_offset == XA_CHUNK_MASK) {
1440  			xas->xa_offset = xas->xa_node->offset;
1441  			xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1442  			if (!xas->xa_node)
1443  				break;
1444  			continue;
1445  		}
1446  		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1447  		if (xa_is_sibling(curr))
1448  			continue;
1449  		while (xa_is_node(curr)) {
1450  			xas->xa_node = xa_to_node(curr);
1451  			xas->xa_offset = 0;
1452  			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1453  		}
1454  		if (curr)
1455  			return curr;
1456  	}
1457  	xas->xa_offset -= xas->xa_sibs;
1458  	return NULL;
1459  }
1460  EXPORT_SYMBOL_GPL(xas_find_conflict);
1461  
1462  /**
1463   * xa_load() - Load an entry from an XArray.
1464   * @xa: XArray.
1465   * @index: index into array.
1466   *
1467   * Context: Any context.  Takes and releases the RCU lock.
1468   * Return: The entry at @index in @xa.
1469   */
xa_load(struct xarray * xa,unsigned long index)1470  void *xa_load(struct xarray *xa, unsigned long index)
1471  {
1472  	XA_STATE(xas, xa, index);
1473  	void *entry;
1474  
1475  	rcu_read_lock();
1476  	do {
1477  		entry = xas_load(&xas);
1478  		if (xa_is_zero(entry))
1479  			entry = NULL;
1480  	} while (xas_retry(&xas, entry));
1481  	rcu_read_unlock();
1482  
1483  	return entry;
1484  }
1485  EXPORT_SYMBOL(xa_load);
1486  
xas_result(struct xa_state * xas,void * curr)1487  static void *xas_result(struct xa_state *xas, void *curr)
1488  {
1489  	if (xa_is_zero(curr))
1490  		return NULL;
1491  	if (xas_error(xas))
1492  		curr = xas->xa_node;
1493  	return curr;
1494  }
1495  
1496  /**
1497   * __xa_erase() - Erase this entry from the XArray while locked.
1498   * @xa: XArray.
1499   * @index: Index into array.
1500   *
1501   * After this function returns, loading from @index will return %NULL.
1502   * If the index is part of a multi-index entry, all indices will be erased
1503   * and none of the entries will be part of a multi-index entry.
1504   *
1505   * Context: Any context.  Expects xa_lock to be held on entry.
1506   * Return: The entry which used to be at this index.
1507   */
__xa_erase(struct xarray * xa,unsigned long index)1508  void *__xa_erase(struct xarray *xa, unsigned long index)
1509  {
1510  	XA_STATE(xas, xa, index);
1511  	return xas_result(&xas, xas_store(&xas, NULL));
1512  }
1513  EXPORT_SYMBOL(__xa_erase);
1514  
1515  /**
1516   * xa_erase() - Erase this entry from the XArray.
1517   * @xa: XArray.
1518   * @index: Index of entry.
1519   *
1520   * After this function returns, loading from @index will return %NULL.
1521   * If the index is part of a multi-index entry, all indices will be erased
1522   * and none of the entries will be part of a multi-index entry.
1523   *
1524   * Context: Any context.  Takes and releases the xa_lock.
1525   * Return: The entry which used to be at this index.
1526   */
xa_erase(struct xarray * xa,unsigned long index)1527  void *xa_erase(struct xarray *xa, unsigned long index)
1528  {
1529  	void *entry;
1530  
1531  	xa_lock(xa);
1532  	entry = __xa_erase(xa, index);
1533  	xa_unlock(xa);
1534  
1535  	return entry;
1536  }
1537  EXPORT_SYMBOL(xa_erase);
1538  
1539  /**
1540   * __xa_store() - Store this entry in the XArray.
1541   * @xa: XArray.
1542   * @index: Index into array.
1543   * @entry: New entry.
1544   * @gfp: Memory allocation flags.
1545   *
1546   * You must already be holding the xa_lock when calling this function.
1547   * It will drop the lock if needed to allocate memory, and then reacquire
1548   * it afterwards.
1549   *
1550   * Context: Any context.  Expects xa_lock to be held on entry.  May
1551   * release and reacquire xa_lock if @gfp flags permit.
1552   * Return: The old entry at this index or xa_err() if an error happened.
1553   */
__xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1554  void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1555  {
1556  	XA_STATE(xas, xa, index);
1557  	void *curr;
1558  
1559  	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1560  		return XA_ERROR(-EINVAL);
1561  	if (xa_track_free(xa) && !entry)
1562  		entry = XA_ZERO_ENTRY;
1563  
1564  	do {
1565  		curr = xas_store(&xas, entry);
1566  		if (xa_track_free(xa))
1567  			xas_clear_mark(&xas, XA_FREE_MARK);
1568  	} while (__xas_nomem(&xas, gfp));
1569  
1570  	return xas_result(&xas, curr);
1571  }
1572  EXPORT_SYMBOL(__xa_store);
1573  
1574  /**
1575   * xa_store() - Store this entry in the XArray.
1576   * @xa: XArray.
1577   * @index: Index into array.
1578   * @entry: New entry.
1579   * @gfp: Memory allocation flags.
1580   *
1581   * After this function returns, loads from this index will return @entry.
1582   * Storing into an existing multi-index entry updates the entry of every index.
1583   * The marks associated with @index are unaffected unless @entry is %NULL.
1584   *
1585   * Context: Any context.  Takes and releases the xa_lock.
1586   * May sleep if the @gfp flags permit.
1587   * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1588   * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1589   * failed.
1590   */
xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1591  void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1592  {
1593  	void *curr;
1594  
1595  	xa_lock(xa);
1596  	curr = __xa_store(xa, index, entry, gfp);
1597  	xa_unlock(xa);
1598  
1599  	return curr;
1600  }
1601  EXPORT_SYMBOL(xa_store);
1602  
1603  /**
1604   * __xa_cmpxchg() - Store this entry in the XArray.
1605   * @xa: XArray.
1606   * @index: Index into array.
1607   * @old: Old value to test against.
1608   * @entry: New entry.
1609   * @gfp: Memory allocation flags.
1610   *
1611   * You must already be holding the xa_lock when calling this function.
1612   * It will drop the lock if needed to allocate memory, and then reacquire
1613   * it afterwards.
1614   *
1615   * Context: Any context.  Expects xa_lock to be held on entry.  May
1616   * release and reacquire xa_lock if @gfp flags permit.
1617   * Return: The old entry at this index or xa_err() if an error happened.
1618   */
__xa_cmpxchg(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)1619  void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1620  			void *old, void *entry, gfp_t gfp)
1621  {
1622  	XA_STATE(xas, xa, index);
1623  	void *curr;
1624  
1625  	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1626  		return XA_ERROR(-EINVAL);
1627  
1628  	do {
1629  		curr = xas_load(&xas);
1630  		if (curr == old) {
1631  			xas_store(&xas, entry);
1632  			if (xa_track_free(xa) && entry && !curr)
1633  				xas_clear_mark(&xas, XA_FREE_MARK);
1634  		}
1635  	} while (__xas_nomem(&xas, gfp));
1636  
1637  	return xas_result(&xas, curr);
1638  }
1639  EXPORT_SYMBOL(__xa_cmpxchg);
1640  
1641  /**
1642   * __xa_insert() - Store this entry in the XArray if no entry is present.
1643   * @xa: XArray.
1644   * @index: Index into array.
1645   * @entry: New entry.
1646   * @gfp: Memory allocation flags.
1647   *
1648   * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1649   * if no entry is present.  Inserting will fail if a reserved entry is
1650   * present, even though loading from this index will return NULL.
1651   *
1652   * Context: Any context.  Expects xa_lock to be held on entry.  May
1653   * release and reacquire xa_lock if @gfp flags permit.
1654   * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1655   * -ENOMEM if memory could not be allocated.
1656   */
__xa_insert(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1657  int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1658  {
1659  	XA_STATE(xas, xa, index);
1660  	void *curr;
1661  
1662  	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1663  		return -EINVAL;
1664  	if (!entry)
1665  		entry = XA_ZERO_ENTRY;
1666  
1667  	do {
1668  		curr = xas_load(&xas);
1669  		if (!curr) {
1670  			xas_store(&xas, entry);
1671  			if (xa_track_free(xa))
1672  				xas_clear_mark(&xas, XA_FREE_MARK);
1673  		} else {
1674  			xas_set_err(&xas, -EBUSY);
1675  		}
1676  	} while (__xas_nomem(&xas, gfp));
1677  
1678  	return xas_error(&xas);
1679  }
1680  EXPORT_SYMBOL(__xa_insert);
1681  
1682  #ifdef CONFIG_XARRAY_MULTI
xas_set_range(struct xa_state * xas,unsigned long first,unsigned long last)1683  static void xas_set_range(struct xa_state *xas, unsigned long first,
1684  		unsigned long last)
1685  {
1686  	unsigned int shift = 0;
1687  	unsigned long sibs = last - first;
1688  	unsigned int offset = XA_CHUNK_MASK;
1689  
1690  	xas_set(xas, first);
1691  
1692  	while ((first & XA_CHUNK_MASK) == 0) {
1693  		if (sibs < XA_CHUNK_MASK)
1694  			break;
1695  		if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1696  			break;
1697  		shift += XA_CHUNK_SHIFT;
1698  		if (offset == XA_CHUNK_MASK)
1699  			offset = sibs & XA_CHUNK_MASK;
1700  		sibs >>= XA_CHUNK_SHIFT;
1701  		first >>= XA_CHUNK_SHIFT;
1702  	}
1703  
1704  	offset = first & XA_CHUNK_MASK;
1705  	if (offset + sibs > XA_CHUNK_MASK)
1706  		sibs = XA_CHUNK_MASK - offset;
1707  	if ((((first + sibs + 1) << shift) - 1) > last)
1708  		sibs -= 1;
1709  
1710  	xas->xa_shift = shift;
1711  	xas->xa_sibs = sibs;
1712  }
1713  
1714  /**
1715   * xa_store_range() - Store this entry at a range of indices in the XArray.
1716   * @xa: XArray.
1717   * @first: First index to affect.
1718   * @last: Last index to affect.
1719   * @entry: New entry.
1720   * @gfp: Memory allocation flags.
1721   *
1722   * After this function returns, loads from any index between @first and @last,
1723   * inclusive will return @entry.
1724   * Storing into an existing multi-index entry updates the entry of every index.
1725   * The marks associated with @index are unaffected unless @entry is %NULL.
1726   *
1727   * Context: Process context.  Takes and releases the xa_lock.  May sleep
1728   * if the @gfp flags permit.
1729   * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1730   * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1731   */
xa_store_range(struct xarray * xa,unsigned long first,unsigned long last,void * entry,gfp_t gfp)1732  void *xa_store_range(struct xarray *xa, unsigned long first,
1733  		unsigned long last, void *entry, gfp_t gfp)
1734  {
1735  	XA_STATE(xas, xa, 0);
1736  
1737  	if (WARN_ON_ONCE(xa_is_internal(entry)))
1738  		return XA_ERROR(-EINVAL);
1739  	if (last < first)
1740  		return XA_ERROR(-EINVAL);
1741  
1742  	do {
1743  		xas_lock(&xas);
1744  		if (entry) {
1745  			unsigned int order = BITS_PER_LONG;
1746  			if (last + 1)
1747  				order = __ffs(last + 1);
1748  			xas_set_order(&xas, last, order);
1749  			xas_create(&xas, true);
1750  			if (xas_error(&xas))
1751  				goto unlock;
1752  		}
1753  		do {
1754  			xas_set_range(&xas, first, last);
1755  			xas_store(&xas, entry);
1756  			if (xas_error(&xas))
1757  				goto unlock;
1758  			first += xas_size(&xas);
1759  		} while (first <= last);
1760  unlock:
1761  		xas_unlock(&xas);
1762  	} while (xas_nomem(&xas, gfp));
1763  
1764  	return xas_result(&xas, NULL);
1765  }
1766  EXPORT_SYMBOL(xa_store_range);
1767  
1768  /**
1769   * xas_get_order() - Get the order of an entry.
1770   * @xas: XArray operation state.
1771   *
1772   * Called after xas_load, the xas should not be in an error state.
1773   *
1774   * Return: A number between 0 and 63 indicating the order of the entry.
1775   */
xas_get_order(struct xa_state * xas)1776  int xas_get_order(struct xa_state *xas)
1777  {
1778  	int order = 0;
1779  
1780  	if (!xas->xa_node)
1781  		return 0;
1782  
1783  	for (;;) {
1784  		unsigned int slot = xas->xa_offset + (1 << order);
1785  
1786  		if (slot >= XA_CHUNK_SIZE)
1787  			break;
1788  		if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
1789  			break;
1790  		order++;
1791  	}
1792  
1793  	order += xas->xa_node->shift;
1794  	return order;
1795  }
1796  EXPORT_SYMBOL_GPL(xas_get_order);
1797  
1798  /**
1799   * xa_get_order() - Get the order of an entry.
1800   * @xa: XArray.
1801   * @index: Index of the entry.
1802   *
1803   * Return: A number between 0 and 63 indicating the order of the entry.
1804   */
xa_get_order(struct xarray * xa,unsigned long index)1805  int xa_get_order(struct xarray *xa, unsigned long index)
1806  {
1807  	XA_STATE(xas, xa, index);
1808  	int order = 0;
1809  	void *entry;
1810  
1811  	rcu_read_lock();
1812  	entry = xas_load(&xas);
1813  	if (entry)
1814  		order = xas_get_order(&xas);
1815  	rcu_read_unlock();
1816  
1817  	return order;
1818  }
1819  EXPORT_SYMBOL(xa_get_order);
1820  #endif /* CONFIG_XARRAY_MULTI */
1821  
1822  /**
1823   * __xa_alloc() - Find somewhere to store this entry in the XArray.
1824   * @xa: XArray.
1825   * @id: Pointer to ID.
1826   * @limit: Range for allocated ID.
1827   * @entry: New entry.
1828   * @gfp: Memory allocation flags.
1829   *
1830   * Finds an empty entry in @xa between @limit.min and @limit.max,
1831   * stores the index into the @id pointer, then stores the entry at
1832   * that index.  A concurrent lookup will not see an uninitialised @id.
1833   *
1834   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1835   * in xa_init_flags().
1836   *
1837   * Context: Any context.  Expects xa_lock to be held on entry.  May
1838   * release and reacquire xa_lock if @gfp flags permit.
1839   * Return: 0 on success, -ENOMEM if memory could not be allocated or
1840   * -EBUSY if there are no free entries in @limit.
1841   */
__xa_alloc(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)1842  int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1843  		struct xa_limit limit, gfp_t gfp)
1844  {
1845  	XA_STATE(xas, xa, 0);
1846  
1847  	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1848  		return -EINVAL;
1849  	if (WARN_ON_ONCE(!xa_track_free(xa)))
1850  		return -EINVAL;
1851  
1852  	if (!entry)
1853  		entry = XA_ZERO_ENTRY;
1854  
1855  	do {
1856  		xas.xa_index = limit.min;
1857  		xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1858  		if (xas.xa_node == XAS_RESTART)
1859  			xas_set_err(&xas, -EBUSY);
1860  		else
1861  			*id = xas.xa_index;
1862  		xas_store(&xas, entry);
1863  		xas_clear_mark(&xas, XA_FREE_MARK);
1864  	} while (__xas_nomem(&xas, gfp));
1865  
1866  	return xas_error(&xas);
1867  }
1868  EXPORT_SYMBOL(__xa_alloc);
1869  
1870  /**
1871   * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1872   * @xa: XArray.
1873   * @id: Pointer to ID.
1874   * @entry: New entry.
1875   * @limit: Range of allocated ID.
1876   * @next: Pointer to next ID to allocate.
1877   * @gfp: Memory allocation flags.
1878   *
1879   * Finds an empty entry in @xa between @limit.min and @limit.max,
1880   * stores the index into the @id pointer, then stores the entry at
1881   * that index.  A concurrent lookup will not see an uninitialised @id.
1882   * The search for an empty entry will start at @next and will wrap
1883   * around if necessary.
1884   *
1885   * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1886   * in xa_init_flags().
1887   *
1888   * Context: Any context.  Expects xa_lock to be held on entry.  May
1889   * release and reacquire xa_lock if @gfp flags permit.
1890   * Return: 0 if the allocation succeeded without wrapping.  1 if the
1891   * allocation succeeded after wrapping, -ENOMEM if memory could not be
1892   * allocated or -EBUSY if there are no free entries in @limit.
1893   */
__xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)1894  int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1895  		struct xa_limit limit, u32 *next, gfp_t gfp)
1896  {
1897  	u32 min = limit.min;
1898  	int ret;
1899  
1900  	limit.min = max(min, *next);
1901  	ret = __xa_alloc(xa, id, entry, limit, gfp);
1902  	if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1903  		xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1904  		ret = 1;
1905  	}
1906  
1907  	if (ret < 0 && limit.min > min) {
1908  		limit.min = min;
1909  		ret = __xa_alloc(xa, id, entry, limit, gfp);
1910  		if (ret == 0)
1911  			ret = 1;
1912  	}
1913  
1914  	if (ret >= 0) {
1915  		*next = *id + 1;
1916  		if (*next == 0)
1917  			xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1918  	}
1919  	return ret;
1920  }
1921  EXPORT_SYMBOL(__xa_alloc_cyclic);
1922  
1923  /**
1924   * __xa_set_mark() - Set this mark on this entry while locked.
1925   * @xa: XArray.
1926   * @index: Index of entry.
1927   * @mark: Mark number.
1928   *
1929   * Attempting to set a mark on a %NULL entry does not succeed.
1930   *
1931   * Context: Any context.  Expects xa_lock to be held on entry.
1932   */
__xa_set_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)1933  void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1934  {
1935  	XA_STATE(xas, xa, index);
1936  	void *entry = xas_load(&xas);
1937  
1938  	if (entry)
1939  		xas_set_mark(&xas, mark);
1940  }
1941  EXPORT_SYMBOL(__xa_set_mark);
1942  
1943  /**
1944   * __xa_clear_mark() - Clear this mark on this entry while locked.
1945   * @xa: XArray.
1946   * @index: Index of entry.
1947   * @mark: Mark number.
1948   *
1949   * Context: Any context.  Expects xa_lock to be held on entry.
1950   */
__xa_clear_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)1951  void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1952  {
1953  	XA_STATE(xas, xa, index);
1954  	void *entry = xas_load(&xas);
1955  
1956  	if (entry)
1957  		xas_clear_mark(&xas, mark);
1958  }
1959  EXPORT_SYMBOL(__xa_clear_mark);
1960  
1961  /**
1962   * xa_get_mark() - Inquire whether this mark is set on this entry.
1963   * @xa: XArray.
1964   * @index: Index of entry.
1965   * @mark: Mark number.
1966   *
1967   * This function uses the RCU read lock, so the result may be out of date
1968   * by the time it returns.  If you need the result to be stable, use a lock.
1969   *
1970   * Context: Any context.  Takes and releases the RCU lock.
1971   * Return: True if the entry at @index has this mark set, false if it doesn't.
1972   */
xa_get_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)1973  bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1974  {
1975  	XA_STATE(xas, xa, index);
1976  	void *entry;
1977  
1978  	rcu_read_lock();
1979  	entry = xas_start(&xas);
1980  	while (xas_get_mark(&xas, mark)) {
1981  		if (!xa_is_node(entry))
1982  			goto found;
1983  		entry = xas_descend(&xas, xa_to_node(entry));
1984  	}
1985  	rcu_read_unlock();
1986  	return false;
1987   found:
1988  	rcu_read_unlock();
1989  	return true;
1990  }
1991  EXPORT_SYMBOL(xa_get_mark);
1992  
1993  /**
1994   * xa_set_mark() - Set this mark on this entry.
1995   * @xa: XArray.
1996   * @index: Index of entry.
1997   * @mark: Mark number.
1998   *
1999   * Attempting to set a mark on a %NULL entry does not succeed.
2000   *
2001   * Context: Process context.  Takes and releases the xa_lock.
2002   */
xa_set_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)2003  void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
2004  {
2005  	xa_lock(xa);
2006  	__xa_set_mark(xa, index, mark);
2007  	xa_unlock(xa);
2008  }
2009  EXPORT_SYMBOL(xa_set_mark);
2010  
2011  /**
2012   * xa_clear_mark() - Clear this mark on this entry.
2013   * @xa: XArray.
2014   * @index: Index of entry.
2015   * @mark: Mark number.
2016   *
2017   * Clearing a mark always succeeds.
2018   *
2019   * Context: Process context.  Takes and releases the xa_lock.
2020   */
xa_clear_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)2021  void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
2022  {
2023  	xa_lock(xa);
2024  	__xa_clear_mark(xa, index, mark);
2025  	xa_unlock(xa);
2026  }
2027  EXPORT_SYMBOL(xa_clear_mark);
2028  
2029  /**
2030   * xa_find() - Search the XArray for an entry.
2031   * @xa: XArray.
2032   * @indexp: Pointer to an index.
2033   * @max: Maximum index to search to.
2034   * @filter: Selection criterion.
2035   *
2036   * Finds the entry in @xa which matches the @filter, and has the lowest
2037   * index that is at least @indexp and no more than @max.
2038   * If an entry is found, @indexp is updated to be the index of the entry.
2039   * This function is protected by the RCU read lock, so it may not find
2040   * entries which are being simultaneously added.  It will not return an
2041   * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2042   *
2043   * Context: Any context.  Takes and releases the RCU lock.
2044   * Return: The entry, if found, otherwise %NULL.
2045   */
xa_find(struct xarray * xa,unsigned long * indexp,unsigned long max,xa_mark_t filter)2046  void *xa_find(struct xarray *xa, unsigned long *indexp,
2047  			unsigned long max, xa_mark_t filter)
2048  {
2049  	XA_STATE(xas, xa, *indexp);
2050  	void *entry;
2051  
2052  	rcu_read_lock();
2053  	do {
2054  		if ((__force unsigned int)filter < XA_MAX_MARKS)
2055  			entry = xas_find_marked(&xas, max, filter);
2056  		else
2057  			entry = xas_find(&xas, max);
2058  	} while (xas_retry(&xas, entry));
2059  	rcu_read_unlock();
2060  
2061  	if (entry)
2062  		*indexp = xas.xa_index;
2063  	return entry;
2064  }
2065  EXPORT_SYMBOL(xa_find);
2066  
xas_sibling(struct xa_state * xas)2067  static bool xas_sibling(struct xa_state *xas)
2068  {
2069  	struct xa_node *node = xas->xa_node;
2070  	unsigned long mask;
2071  
2072  	if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
2073  		return false;
2074  	mask = (XA_CHUNK_SIZE << node->shift) - 1;
2075  	return (xas->xa_index & mask) >
2076  		((unsigned long)xas->xa_offset << node->shift);
2077  }
2078  
2079  /**
2080   * xa_find_after() - Search the XArray for a present entry.
2081   * @xa: XArray.
2082   * @indexp: Pointer to an index.
2083   * @max: Maximum index to search to.
2084   * @filter: Selection criterion.
2085   *
2086   * Finds the entry in @xa which matches the @filter and has the lowest
2087   * index that is above @indexp and no more than @max.
2088   * If an entry is found, @indexp is updated to be the index of the entry.
2089   * This function is protected by the RCU read lock, so it may miss entries
2090   * which are being simultaneously added.  It will not return an
2091   * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2092   *
2093   * Context: Any context.  Takes and releases the RCU lock.
2094   * Return: The pointer, if found, otherwise %NULL.
2095   */
xa_find_after(struct xarray * xa,unsigned long * indexp,unsigned long max,xa_mark_t filter)2096  void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2097  			unsigned long max, xa_mark_t filter)
2098  {
2099  	XA_STATE(xas, xa, *indexp + 1);
2100  	void *entry;
2101  
2102  	if (xas.xa_index == 0)
2103  		return NULL;
2104  
2105  	rcu_read_lock();
2106  	for (;;) {
2107  		if ((__force unsigned int)filter < XA_MAX_MARKS)
2108  			entry = xas_find_marked(&xas, max, filter);
2109  		else
2110  			entry = xas_find(&xas, max);
2111  
2112  		if (xas_invalid(&xas))
2113  			break;
2114  		if (xas_sibling(&xas))
2115  			continue;
2116  		if (!xas_retry(&xas, entry))
2117  			break;
2118  	}
2119  	rcu_read_unlock();
2120  
2121  	if (entry)
2122  		*indexp = xas.xa_index;
2123  	return entry;
2124  }
2125  EXPORT_SYMBOL(xa_find_after);
2126  
xas_extract_present(struct xa_state * xas,void ** dst,unsigned long max,unsigned int n)2127  static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2128  			unsigned long max, unsigned int n)
2129  {
2130  	void *entry;
2131  	unsigned int i = 0;
2132  
2133  	rcu_read_lock();
2134  	xas_for_each(xas, entry, max) {
2135  		if (xas_retry(xas, entry))
2136  			continue;
2137  		dst[i++] = entry;
2138  		if (i == n)
2139  			break;
2140  	}
2141  	rcu_read_unlock();
2142  
2143  	return i;
2144  }
2145  
xas_extract_marked(struct xa_state * xas,void ** dst,unsigned long max,unsigned int n,xa_mark_t mark)2146  static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2147  			unsigned long max, unsigned int n, xa_mark_t mark)
2148  {
2149  	void *entry;
2150  	unsigned int i = 0;
2151  
2152  	rcu_read_lock();
2153  	xas_for_each_marked(xas, entry, max, mark) {
2154  		if (xas_retry(xas, entry))
2155  			continue;
2156  		dst[i++] = entry;
2157  		if (i == n)
2158  			break;
2159  	}
2160  	rcu_read_unlock();
2161  
2162  	return i;
2163  }
2164  
2165  /**
2166   * xa_extract() - Copy selected entries from the XArray into a normal array.
2167   * @xa: The source XArray to copy from.
2168   * @dst: The buffer to copy entries into.
2169   * @start: The first index in the XArray eligible to be selected.
2170   * @max: The last index in the XArray eligible to be selected.
2171   * @n: The maximum number of entries to copy.
2172   * @filter: Selection criterion.
2173   *
2174   * Copies up to @n entries that match @filter from the XArray.  The
2175   * copied entries will have indices between @start and @max, inclusive.
2176   *
2177   * The @filter may be an XArray mark value, in which case entries which are
2178   * marked with that mark will be copied.  It may also be %XA_PRESENT, in
2179   * which case all entries which are not %NULL will be copied.
2180   *
2181   * The entries returned may not represent a snapshot of the XArray at a
2182   * moment in time.  For example, if another thread stores to index 5, then
2183   * index 10, calling xa_extract() may return the old contents of index 5
2184   * and the new contents of index 10.  Indices not modified while this
2185   * function is running will not be skipped.
2186   *
2187   * If you need stronger guarantees, holding the xa_lock across calls to this
2188   * function will prevent concurrent modification.
2189   *
2190   * Context: Any context.  Takes and releases the RCU lock.
2191   * Return: The number of entries copied.
2192   */
xa_extract(struct xarray * xa,void ** dst,unsigned long start,unsigned long max,unsigned int n,xa_mark_t filter)2193  unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2194  			unsigned long max, unsigned int n, xa_mark_t filter)
2195  {
2196  	XA_STATE(xas, xa, start);
2197  
2198  	if (!n)
2199  		return 0;
2200  
2201  	if ((__force unsigned int)filter < XA_MAX_MARKS)
2202  		return xas_extract_marked(&xas, dst, max, n, filter);
2203  	return xas_extract_present(&xas, dst, max, n);
2204  }
2205  EXPORT_SYMBOL(xa_extract);
2206  
2207  /**
2208   * xa_delete_node() - Private interface for workingset code.
2209   * @node: Node to be removed from the tree.
2210   * @update: Function to call to update ancestor nodes.
2211   *
2212   * Context: xa_lock must be held on entry and will not be released.
2213   */
xa_delete_node(struct xa_node * node,xa_update_node_t update)2214  void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2215  {
2216  	struct xa_state xas = {
2217  		.xa = node->array,
2218  		.xa_index = (unsigned long)node->offset <<
2219  				(node->shift + XA_CHUNK_SHIFT),
2220  		.xa_shift = node->shift + XA_CHUNK_SHIFT,
2221  		.xa_offset = node->offset,
2222  		.xa_node = xa_parent_locked(node->array, node),
2223  		.xa_update = update,
2224  	};
2225  
2226  	xas_store(&xas, NULL);
2227  }
2228  EXPORT_SYMBOL_GPL(xa_delete_node);	/* For the benefit of the test suite */
2229  
2230  /**
2231   * xa_destroy() - Free all internal data structures.
2232   * @xa: XArray.
2233   *
2234   * After calling this function, the XArray is empty and has freed all memory
2235   * allocated for its internal data structures.  You are responsible for
2236   * freeing the objects referenced by the XArray.
2237   *
2238   * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
2239   */
xa_destroy(struct xarray * xa)2240  void xa_destroy(struct xarray *xa)
2241  {
2242  	XA_STATE(xas, xa, 0);
2243  	unsigned long flags;
2244  	void *entry;
2245  
2246  	xas.xa_node = NULL;
2247  	xas_lock_irqsave(&xas, flags);
2248  	entry = xa_head_locked(xa);
2249  	RCU_INIT_POINTER(xa->xa_head, NULL);
2250  	xas_init_marks(&xas);
2251  	if (xa_zero_busy(xa))
2252  		xa_mark_clear(xa, XA_FREE_MARK);
2253  	/* lockdep checks we're still holding the lock in xas_free_nodes() */
2254  	if (xa_is_node(entry))
2255  		xas_free_nodes(&xas, xa_to_node(entry));
2256  	xas_unlock_irqrestore(&xas, flags);
2257  }
2258  EXPORT_SYMBOL(xa_destroy);
2259  
2260  #ifdef XA_DEBUG
xa_dump_node(const struct xa_node * node)2261  void xa_dump_node(const struct xa_node *node)
2262  {
2263  	unsigned i, j;
2264  
2265  	if (!node)
2266  		return;
2267  	if ((unsigned long)node & 3) {
2268  		pr_cont("node %px\n", node);
2269  		return;
2270  	}
2271  
2272  	pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2273  		"array %px list %px %px marks",
2274  		node, node->parent ? "offset" : "max", node->offset,
2275  		node->parent, node->shift, node->count, node->nr_values,
2276  		node->array, node->private_list.prev, node->private_list.next);
2277  	for (i = 0; i < XA_MAX_MARKS; i++)
2278  		for (j = 0; j < XA_MARK_LONGS; j++)
2279  			pr_cont(" %lx", node->marks[i][j]);
2280  	pr_cont("\n");
2281  }
2282  
xa_dump_index(unsigned long index,unsigned int shift)2283  void xa_dump_index(unsigned long index, unsigned int shift)
2284  {
2285  	if (!shift)
2286  		pr_info("%lu: ", index);
2287  	else if (shift >= BITS_PER_LONG)
2288  		pr_info("0-%lu: ", ~0UL);
2289  	else
2290  		pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2291  }
2292  
xa_dump_entry(const void * entry,unsigned long index,unsigned long shift)2293  void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2294  {
2295  	if (!entry)
2296  		return;
2297  
2298  	xa_dump_index(index, shift);
2299  
2300  	if (xa_is_node(entry)) {
2301  		if (shift == 0) {
2302  			pr_cont("%px\n", entry);
2303  		} else {
2304  			unsigned long i;
2305  			struct xa_node *node = xa_to_node(entry);
2306  			xa_dump_node(node);
2307  			for (i = 0; i < XA_CHUNK_SIZE; i++)
2308  				xa_dump_entry(node->slots[i],
2309  				      index + (i << node->shift), node->shift);
2310  		}
2311  	} else if (xa_is_value(entry))
2312  		pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2313  						xa_to_value(entry), entry);
2314  	else if (!xa_is_internal(entry))
2315  		pr_cont("%px\n", entry);
2316  	else if (xa_is_retry(entry))
2317  		pr_cont("retry (%ld)\n", xa_to_internal(entry));
2318  	else if (xa_is_sibling(entry))
2319  		pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2320  	else if (xa_is_zero(entry))
2321  		pr_cont("zero (%ld)\n", xa_to_internal(entry));
2322  	else
2323  		pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2324  }
2325  
xa_dump(const struct xarray * xa)2326  void xa_dump(const struct xarray *xa)
2327  {
2328  	void *entry = xa->xa_head;
2329  	unsigned int shift = 0;
2330  
2331  	pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2332  			xa->xa_flags, xa_marked(xa, XA_MARK_0),
2333  			xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2334  	if (xa_is_node(entry))
2335  		shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2336  	xa_dump_entry(entry, 0, shift);
2337  }
2338  #endif
2339