Lines Matching +full:0 +full:xa
40 * 0-62: Sibling entries
60 WARN_ON((long)v < 0); in xa_mk_value()
91 * @tag: Tag value (0, 1 or 3).
94 * of storing value entries. Three tags are available (0, 1 and 3).
140 * Internal entries are used for a number of purposes. Entries 0-255 are
141 * used for sibling entries (only 0-62 are used by the current code). 256
217 * the errno from the pointer value, or returns 0 if the pointer does not
221 * Return: A negative errno or 0.
228 return 0; in xa_err()
239 * * xa_limit_32b - [0 - UINT_MAX]
240 * * xa_limit_31b - [0 - INT_MAX]
241 * * xa_limit_16b - [0 - USHRT_MAX]
250 #define xa_limit_32b XA_LIMIT(0, UINT_MAX)
251 #define xa_limit_31b XA_LIMIT(0, INT_MAX)
252 #define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
255 #define XA_MARK_0 ((__force xa_mark_t)0U)
280 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
296 * If the only non-NULL entry in the array is at index 0, @xa_head is that
335 #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
338 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
363 void *xa_find(struct xarray *xa, unsigned long *index,
365 void *xa_find_after(struct xarray *xa, unsigned long *index,
373 * @xa: XArray.
382 static inline void xa_init_flags(struct xarray *xa, gfp_t flags) in xa_init_flags() argument
384 spin_lock_init(&xa->xa_lock); in xa_init_flags()
385 xa->xa_flags = flags; in xa_init_flags()
386 xa->xa_head = NULL; in xa_init_flags()
391 * @xa: XArray.
397 static inline void xa_init(struct xarray *xa) in xa_init() argument
399 xa_init_flags(xa, 0); in xa_init()
404 * @xa: XArray.
409 static inline bool xa_empty(const struct xarray *xa) in xa_empty() argument
411 return xa->xa_head == NULL; in xa_empty()
416 * @xa: Array
422 static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) in xa_marked() argument
424 return xa->xa_flags & XA_FLAGS_MARK(mark); in xa_marked()
429 * @xa: XArray.
436 * in @xa at @index. You may modify @index during the iteration if you
451 #define xa_for_each_range(xa, index, entry, start, last) \ argument
453 entry = xa_find(xa, &index, last, XA_PRESENT); \
455 entry = xa_find_after(xa, &index, last, XA_PRESENT))
459 * @xa: XArray.
465 * in @xa at @index. You may modify @index during the iteration if you
480 #define xa_for_each_start(xa, index, entry, start) \ argument
481 xa_for_each_range(xa, index, entry, start, ULONG_MAX)
485 * @xa: XArray.
490 * in @xa at @index. You may modify @index during the iteration if you want
504 #define xa_for_each(xa, index, entry) \ argument
505 xa_for_each_start(xa, index, entry, 0)
509 * @xa: XArray.
515 * in @xa at @index. The iteration will skip all entries in the array
531 #define xa_for_each_marked(xa, index, entry, filter) \ argument
532 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
533 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
535 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) argument
536 #define xa_lock(xa) spin_lock(&(xa)->xa_lock) argument
537 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) argument
538 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) argument
539 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) argument
540 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) argument
541 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) argument
542 #define xa_lock_irqsave(xa, flags) \ argument
543 spin_lock_irqsave(&(xa)->xa_lock, flags)
544 #define xa_unlock_irqrestore(xa, flags) \ argument
545 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
546 #define xa_lock_nested(xa, subclass) \ argument
547 spin_lock_nested(&(xa)->xa_lock, subclass)
548 #define xa_lock_bh_nested(xa, subclass) \ argument
549 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
550 #define xa_lock_irq_nested(xa, subclass) \ argument
551 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
552 #define xa_lock_irqsave_nested(xa, flags, subclass) \ argument
553 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
577 * @xa: XArray.
589 static inline void *xa_store_bh(struct xarray *xa, unsigned long index, in xa_store_bh() argument
595 xa_lock_bh(xa); in xa_store_bh()
596 curr = __xa_store(xa, index, entry, gfp); in xa_store_bh()
597 xa_unlock_bh(xa); in xa_store_bh()
604 * @xa: XArray.
616 static inline void *xa_store_irq(struct xarray *xa, unsigned long index, in xa_store_irq() argument
622 xa_lock_irq(xa); in xa_store_irq()
623 curr = __xa_store(xa, index, entry, gfp); in xa_store_irq()
624 xa_unlock_irq(xa); in xa_store_irq()
631 * @xa: XArray.
642 static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) in xa_erase_bh() argument
646 xa_lock_bh(xa); in xa_erase_bh()
647 entry = __xa_erase(xa, index); in xa_erase_bh()
648 xa_unlock_bh(xa); in xa_erase_bh()
655 * @xa: XArray.
666 static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) in xa_erase_irq() argument
670 xa_lock_irq(xa); in xa_erase_irq()
671 entry = __xa_erase(xa, index); in xa_erase_irq()
672 xa_unlock_irq(xa); in xa_erase_irq()
679 * @xa: XArray.
692 static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, in xa_cmpxchg() argument
698 xa_lock(xa); in xa_cmpxchg()
699 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg()
700 xa_unlock(xa); in xa_cmpxchg()
707 * @xa: XArray.
720 static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, in xa_cmpxchg_bh() argument
726 xa_lock_bh(xa); in xa_cmpxchg_bh()
727 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg_bh()
728 xa_unlock_bh(xa); in xa_cmpxchg_bh()
735 * @xa: XArray.
748 static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, in xa_cmpxchg_irq() argument
754 xa_lock_irq(xa); in xa_cmpxchg_irq()
755 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg_irq()
756 xa_unlock_irq(xa); in xa_cmpxchg_irq()
764 * @xa: XArray.
775 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
778 static inline int __must_check xa_insert(struct xarray *xa, in xa_insert() argument
784 xa_lock(xa); in xa_insert()
785 err = __xa_insert(xa, index, entry, gfp); in xa_insert()
786 xa_unlock(xa); in xa_insert()
794 * @xa: XArray.
805 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
808 static inline int __must_check xa_insert_bh(struct xarray *xa, in xa_insert_bh() argument
814 xa_lock_bh(xa); in xa_insert_bh()
815 err = __xa_insert(xa, index, entry, gfp); in xa_insert_bh()
816 xa_unlock_bh(xa); in xa_insert_bh()
824 * @xa: XArray.
835 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
838 static inline int __must_check xa_insert_irq(struct xarray *xa, in xa_insert_irq() argument
844 xa_lock_irq(xa); in xa_insert_irq()
845 err = __xa_insert(xa, index, entry, gfp); in xa_insert_irq()
846 xa_unlock_irq(xa); in xa_insert_irq()
853 * @xa: XArray.
859 * Finds an empty entry in @xa between @limit.min and @limit.max,
868 * Return: 0 on success, -ENOMEM if memory could not be allocated or
871 static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, in xa_alloc() argument
877 xa_lock(xa); in xa_alloc()
878 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc()
879 xa_unlock(xa); in xa_alloc()
886 * @xa: XArray.
892 * Finds an empty entry in @xa between @limit.min and @limit.max,
901 * Return: 0 on success, -ENOMEM if memory could not be allocated or
904 static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, in xa_alloc_bh() argument
910 xa_lock_bh(xa); in xa_alloc_bh()
911 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc_bh()
912 xa_unlock_bh(xa); in xa_alloc_bh()
919 * @xa: XArray.
925 * Finds an empty entry in @xa between @limit.min and @limit.max,
934 * Return: 0 on success, -ENOMEM if memory could not be allocated or
937 static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, in xa_alloc_irq() argument
943 xa_lock_irq(xa); in xa_alloc_irq()
944 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc_irq()
945 xa_unlock_irq(xa); in xa_alloc_irq()
952 * @xa: XArray.
959 * Finds an empty entry in @xa between @limit.min and @limit.max,
970 * Return: 0 if the allocation succeeded without wrapping. 1 if the
974 static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic() argument
980 xa_lock(xa); in xa_alloc_cyclic()
981 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic()
982 xa_unlock(xa); in xa_alloc_cyclic()
989 * @xa: XArray.
996 * Finds an empty entry in @xa between @limit.min and @limit.max,
1007 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1011 static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic_bh() argument
1017 xa_lock_bh(xa); in xa_alloc_cyclic_bh()
1018 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic_bh()
1019 xa_unlock_bh(xa); in xa_alloc_cyclic_bh()
1026 * @xa: XArray.
1033 * Finds an empty entry in @xa between @limit.min and @limit.max,
1044 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1048 static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic_irq() argument
1054 xa_lock_irq(xa); in xa_alloc_cyclic_irq()
1055 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic_irq()
1056 xa_unlock_irq(xa); in xa_alloc_cyclic_irq()
1063 * @xa: XArray.
1077 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1080 int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve() argument
1082 return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve()
1087 * @xa: XArray.
1095 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1098 int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve_bh() argument
1100 return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve_bh()
1105 * @xa: XArray.
1113 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1116 int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve_irq() argument
1118 return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve_irq()
1123 * @xa: XArray.
1130 static inline void xa_release(struct xarray *xa, unsigned long index) in xa_release() argument
1132 xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); in xa_release()
1184 #define XA_BUG_ON(xa, x) do { \ argument
1186 xa_dump(xa); \
1189 } while (0)
1195 } while (0)
1197 #define XA_BUG_ON(xa, x) do { } while (0) argument
1198 #define XA_NODE_BUG_ON(node, x) do { } while (0)
1202 static inline void *xa_head(const struct xarray *xa) in xa_head() argument
1204 return rcu_dereference_check(xa->xa_head, in xa_head()
1205 lockdep_is_held(&xa->xa_lock)); in xa_head()
1209 static inline void *xa_head_locked(const struct xarray *xa) in xa_head_locked() argument
1211 return rcu_dereference_protected(xa->xa_head, in xa_head_locked()
1212 lockdep_is_held(&xa->xa_lock)); in xa_head_locked()
1216 static inline void *xa_entry(const struct xarray *xa, in xa_entry() argument
1221 lockdep_is_held(&xa->xa_lock)); in xa_entry()
1225 static inline void *xa_entry_locked(const struct xarray *xa, in xa_entry_locked() argument
1230 lockdep_is_held(&xa->xa_lock)); in xa_entry_locked()
1234 static inline struct xa_node *xa_parent(const struct xarray *xa, in xa_parent() argument
1238 lockdep_is_held(&xa->xa_lock)); in xa_parent()
1242 static inline struct xa_node *xa_parent_locked(const struct xarray *xa, in xa_parent_locked() argument
1246 lockdep_is_held(&xa->xa_lock)); in xa_parent_locked()
1341 * single entry in the array at index 0, there are no allocated xa_nodes to
1349 struct xarray *xa; member
1370 .xa = array, \
1374 .xa_offset = 0, \
1375 .xa_pad = 0, \
1391 struct xa_state name = __XA_STATE(array, index, 0, 0)
1410 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1411 #define xas_trylock(xas) xa_trylock((xas)->xa)
1412 #define xas_lock(xas) xa_lock((xas)->xa)
1413 #define xas_unlock(xas) xa_unlock((xas)->xa)
1414 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1415 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1416 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1417 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1419 xa_lock_irqsave((xas)->xa, flags)
1421 xa_unlock_irqrestore((xas)->xa, flags)
1427 * Return: 0 if no error has been noted. A negative errno if one has.
1559 static inline int xa_get_order(struct xarray *xa, unsigned long index) in xa_get_order() argument
1561 return 0; in xa_get_order()
1566 return 0; in xas_get_order()
1602 return xa_head(xas->xa); in xas_reload()
1605 entry = xa_entry(xas->xa, node, offset); in xas_reload()
1612 return xa_entry(xas->xa, node, offset); in xas_reload()
1642 unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0; in xas_advance()
1658 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
1663 BUG_ON(order > 0); in xas_set_order()
1712 entry = xa_entry(xas->xa, node, xas->xa_offset + 1); in xas_next_entry()
1733 unsigned long data = *addr & (~0UL << offset); in xas_find_chunk()
1771 entry = xa_entry(xas->xa, node, offset); in xas_next_marked()
1848 * If the iterator was referencing index 0, this function wraps
1859 xas->xa_offset == 0)) in xas_prev()
1864 return xa_entry(xas->xa, node, xas->xa_offset); in xas_prev()
1878 * around to 0.
1893 return xa_entry(xas->xa, node, xas->xa_offset); in xas_next()