Lines Matching +full:permit +full:-
1 /* SPDX-License-Identifier: GPL-2.0+ */
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
40 * 0-62: Sibling entries
45 * space (-4094 to -2). They're never stored in the slots array; only
49 #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
52 * xa_mk_value() - Create an XArray entry from an integer.
65 * xa_to_value() - Get value stored in an XArray entry.
77 * xa_is_value() - Determine if an entry is a value.
89 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
107 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
122 * xa_pointer_tag() - Get the tag stored in an XArray entry.
137 * xa_mk_internal() - Create an internal entry.
140 * Internal entries are used for a number of purposes. Entries 0-255 are
141 * used for sibling entries (only 0-62 are used by the current code). 256
155 * xa_to_internal() - Extract the value from an internal entry.
167 * xa_is_internal() - Is the entry an internal entry?
181 * xa_is_zero() - Is the entry a zero entry?
195 * xa_is_err() - Report whether an XArray operation returned an error
208 entry >= xa_mk_internal(-MAX_ERRNO)); in xa_is_err()
212 * xa_err() - Turn an XArray result into an errno.
232 * struct xa_limit - Represents a range of IDs.
239 * * xa_limit_32b - [0 - UINT_MAX]
240 * * xa_limit_31b - [0 - INT_MAX]
241 * * xa_limit_16b - [0 - USHRT_MAX]
280 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
285 * struct xarray - The anchor of the XArray.
296 * If the only non-NULL entry in the array is at index 0, @xa_head is that
297 * entry. If any other entry in the array is non-NULL, @xa_head points
314 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
327 * DEFINE_XARRAY() - Define an XArray.
338 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
347 * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
372 * xa_init_flags() - Initialise an empty XArray with flags.
384 spin_lock_init(&xa->xa_lock); in xa_init_flags()
385 xa->xa_flags = flags; in xa_init_flags()
386 xa->xa_head = NULL; in xa_init_flags()
390 * xa_init() - Initialise an empty XArray.
403 * xa_empty() - Determine if an array has any present entries.
411 return xa->xa_head == NULL; in xa_empty()
415 * xa_marked() - Inquire whether any entry in this array has a mark set
424 return xa->xa_flags & XA_FLAGS_MARK(mark); in xa_marked()
428 * xa_for_each_range() - Iterate over a portion of an XArray.
458 * xa_for_each_start() - Iterate over a portion of an XArray.
484 * xa_for_each() - Iterate over present entries in an XArray.
508 * xa_for_each_marked() - Iterate over marked entries in an XArray.
535 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
536 #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
537 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
538 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
539 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
540 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
541 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
543 spin_lock_irqsave(&(xa)->xa_lock, flags)
545 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
547 spin_lock_nested(&(xa)->xa_lock, subclass)
549 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
551 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
553 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
559 * may also re-enable interrupts if the XArray flags indicate the
576 * xa_store_bh() - Store this entry in the XArray.
603 * xa_store_irq() - Store this entry in the XArray.
630 * xa_erase_bh() - Erase this entry from the XArray.
635 * If the index is part of a multi-index entry, all indices will be erased
636 * and none of the entries will be part of a multi-index entry.
654 * xa_erase_irq() - Erase this entry from the XArray.
659 * If the index is part of a multi-index entry, all indices will be erased
660 * and none of the entries will be part of a multi-index entry.
678 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
689 * if the @gfp flags permit.
706 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
717 * disabling softirqs. May sleep if the @gfp flags permit.
734 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
745 * disabling interrupts. May sleep if the @gfp flags permit.
762 * xa_insert() - Store this entry in the XArray unless another entry is
774 * the @gfp flags permit.
775 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
776 * -ENOMEM if memory could not be allocated.
792 * xa_insert_bh() - Store this entry in the XArray unless another entry is
804 * disabling softirqs. May sleep if the @gfp flags permit.
805 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
806 * -ENOMEM if memory could not be allocated.
822 * xa_insert_irq() - Store this entry in the XArray unless another entry is
834 * disabling interrupts. May sleep if the @gfp flags permit.
835 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
836 * -ENOMEM if memory could not be allocated.
852 * xa_alloc() - Find somewhere to store this entry in the XArray.
867 * the @gfp flags permit.
868 * Return: 0 on success, -ENOMEM if memory could not be allocated or
869 * -EBUSY if there are no free entries in @limit.
885 * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
900 * disabling softirqs. May sleep if the @gfp flags permit.
901 * Return: 0 on success, -ENOMEM if memory could not be allocated or
902 * -EBUSY if there are no free entries in @limit.
918 * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
933 * disabling interrupts. May sleep if the @gfp flags permit.
934 * Return: 0 on success, -ENOMEM if memory could not be allocated or
935 * -EBUSY if there are no free entries in @limit.
951 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
969 * the @gfp flags permit.
971 * allocation succeeded after wrapping, -ENOMEM if memory could not be
972 * allocated or -EBUSY if there are no free entries in @limit.
988 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
1006 * disabling softirqs. May sleep if the @gfp flags permit.
1008 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1009 * allocated or -EBUSY if there are no free entries in @limit.
1025 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
1043 * disabling interrupts. May sleep if the @gfp flags permit.
1045 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1046 * allocated or -EBUSY if there are no free entries in @limit.
1062 * xa_reserve() - Reserve this index in the XArray.
1076 * May sleep if the @gfp flags permit.
1077 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1086 * xa_reserve_bh() - Reserve this index in the XArray.
1091 * A softirq-disabling version of xa_reserve().
1095 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1104 * xa_reserve_irq() - Reserve this index in the XArray.
1109 * An interrupt-disabling version of xa_reserve().
1113 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1122 * xa_release() - Release a reserved entry.
1143 * balanced against the memory consumption of each node. On a 64-bit system,
1151 #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
1156 * @count is the count of every non-NULL element in the ->slots array
1159 * @nr_values is the count of every element in ->slots which is
1204 return rcu_dereference_check(xa->xa_head, in xa_head()
1205 lockdep_is_held(&xa->xa_lock)); in xa_head()
1211 return rcu_dereference_protected(xa->xa_head, in xa_head_locked()
1212 lockdep_is_held(&xa->xa_lock)); in xa_head_locked()
1220 return rcu_dereference_check(node->slots[offset], in xa_entry()
1221 lockdep_is_held(&xa->xa_lock)); in xa_entry()
1229 return rcu_dereference_protected(node->slots[offset], in xa_entry_locked()
1230 lockdep_is_held(&xa->xa_lock)); in xa_entry_locked()
1237 return rcu_dereference_check(node->parent, in xa_parent()
1238 lockdep_is_held(&xa->xa_lock)); in xa_parent()
1245 return rcu_dereference_protected(node->parent, in xa_parent_locked()
1246 lockdep_is_held(&xa->xa_lock)); in xa_parent_locked()
1258 return (struct xa_node *)((unsigned long)entry - 2); in xa_to_node()
1280 * xa_is_sibling() - Is the entry a sibling entry?
1288 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); in xa_is_sibling()
1294 * xa_is_retry() - Is the entry a retry entry?
1305 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1316 * typedef xa_update_node_t - A callback function from the XArray.
1324 * Implementations should not drop the xa_lock, nor re-enable
1362 * We encode errnos in the xas->xa_node. If an error has happened, we need to
1383 * XA_STATE() - Declare an XArray operation state.
1394 * XA_STATE_ORDER() - Declare an XArray operation state.
1407 order - (order % XA_CHUNK_SHIFT), \
1408 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1410 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1411 #define xas_trylock(xas) xa_trylock((xas)->xa)
1412 #define xas_lock(xas) xa_lock((xas)->xa)
1413 #define xas_unlock(xas) xa_unlock((xas)->xa)
1414 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1415 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1416 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1417 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1419 xa_lock_irqsave((xas)->xa, flags)
1421 xa_unlock_irqrestore((xas)->xa, flags)
1424 * xas_error() - Return an errno stored in the xa_state.
1431 return xa_err(xas->xa_node); in xas_error()
1435 * xas_set_err() - Note an error in the xa_state.
1445 xas->xa_node = XA_ERROR(err); in xas_set_err()
1449 * xas_invalid() - Is the xas in a retry or error state?
1456 return (unsigned long)xas->xa_node & 3; in xas_invalid()
1460 * xas_valid() - Is the xas a valid cursor into the array?
1471 * xas_is_node() - Does the xas point to a node?
1478 return xas_valid(xas) && xas->xa_node; in xas_is_node()
1493 /* True if the node represents head-of-tree, RESTART or BOUNDS */
1500 * xas_reset() - Reset an XArray operation state.
1511 xas->xa_node = XAS_RESTART; in xas_reset()
1515 * xas_retry() - Retry the operation if appropriate.
1582 * xas_reload() - Refetch an entry from the xarray.
1597 struct xa_node *node = xas->xa_node; in xas_reload()
1602 return xa_head(xas->xa); in xas_reload()
1604 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; in xas_reload()
1605 entry = xa_entry(xas->xa, node, offset); in xas_reload()
1610 offset = xas->xa_offset; in xas_reload()
1612 return xa_entry(xas->xa, node, offset); in xas_reload()
1616 * xas_set() - Set up XArray operation state for a different index.
1626 xas->xa_index = index; in xas_set()
1627 xas->xa_node = XAS_RESTART; in xas_set()
1631 * xas_advance() - Skip over sibling entries.
1642 unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0; in xas_advance()
1644 xas->xa_index = index; in xas_advance()
1645 xas->xa_offset = (index >> shift) & XA_CHUNK_MASK; in xas_advance()
1649 * xas_set_order() - Set up XArray operation state for a multislot entry.
1658 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
1659 xas->xa_shift = order - (order % XA_CHUNK_SHIFT); in xas_set_order()
1660 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; in xas_set_order()
1661 xas->xa_node = XAS_RESTART; in xas_set_order()
1669 * xas_set_update() - Set up XArray operation state for a callback.
1679 xas->xa_update = update; in xas_set_update()
1684 xas->xa_lru = lru; in xas_set_lru()
1688 * xas_next_entry() - Advance iterator to next present entry.
1700 struct xa_node *node = xas->xa_node; in xas_next_entry()
1703 if (unlikely(xas_not_node(node) || node->shift || in xas_next_entry()
1704 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) in xas_next_entry()
1708 if (unlikely(xas->xa_index >= max)) in xas_next_entry()
1710 if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) in xas_next_entry()
1712 entry = xa_entry(xas->xa, node, xas->xa_offset + 1); in xas_next_entry()
1715 xas->xa_offset++; in xas_next_entry()
1716 xas->xa_index++; in xas_next_entry()
1726 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; in xas_find_chunk()
1727 unsigned int offset = xas->xa_offset; in xas_find_chunk()
1744 * xas_next_marked() - Advance iterator to next marked entry.
1758 struct xa_node *node = xas->xa_node; in xas_next_marked()
1762 if (unlikely(xas_not_node(node) || node->shift)) in xas_next_marked()
1765 xas->xa_offset = offset; in xas_next_marked()
1766 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; in xas_next_marked()
1767 if (xas->xa_index > max) in xas_next_marked()
1771 entry = xa_entry(xas->xa, node, offset); in xas_next_marked()
1786 * xas_for_each() - Iterate over a range of an XArray.
1803 * xas_for_each_marked() - Iterate over a range of an XArray.
1821 * xas_for_each_conflict() - Iterate over a range of an XArray.
1839 * xas_prev() - Move iterator to previous index.
1856 struct xa_node *node = xas->xa_node; in xas_prev()
1858 if (unlikely(xas_not_node(node) || node->shift || in xas_prev()
1859 xas->xa_offset == 0)) in xas_prev()
1862 xas->xa_index--; in xas_prev()
1863 xas->xa_offset--; in xas_prev()
1864 return xa_entry(xas->xa, node, xas->xa_offset); in xas_prev()
1868 * xas_next() - Move state to next index.
1885 struct xa_node *node = xas->xa_node; in xas_next()
1887 if (unlikely(xas_not_node(node) || node->shift || in xas_next()
1888 xas->xa_offset == XA_CHUNK_MASK)) in xas_next()
1891 xas->xa_index++; in xas_next()
1892 xas->xa_offset++; in xas_next()
1893 return xa_entry(xas->xa, node, xas->xa_offset); in xas_next()