/linux-6.12.1/lib/ |
D | xarray.c | 151 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); in xas_set_offset() 158 xas->xa_index &= ~XA_CHUNK_MASK << shift; in xas_move_index() 159 xas->xa_index += offset << shift; in xas_move_index() 192 if (xas->xa_index) in xas_start() 195 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) in xas_start() 206 unsigned int offset = get_offset(xas->xa_index, node); in xas_descend() 416 unsigned long max = xas->xa_index; in xas_max() 707 unsigned long index = xas->xa_index; in xas_create_range() 711 xas->xa_index |= ((sibs + 1UL) << shift) - 1; in xas_create_range() 721 if (xas->xa_index <= (index | XA_CHUNK_MASK)) in xas_create_range() [all …]
|
D | idr.c | 398 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range() 400 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 408 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 431 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 455 xas.xa_index = min / IDA_BITMAP_BITS; in ida_alloc_range() 463 return xas.xa_index * IDA_BITMAP_BITS + bit; in ida_alloc_range()
|
D | test_xarray.c | 139 xas_store(&xas, xa_mk_index(xas.xa_index)); in check_xas_retry() 284 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); in check_xa_mark_2() 617 XA_BUG_ON(xa, xas.xa_index != index); in check_multi_store_2() 1382 return entry ? xas.xa_index : -1; in xa_find_entry() 1509 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1516 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1523 XA_BUG_ON(xa, xas.xa_index != i); in check_move_small() 1532 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); in check_move_small() 1534 XA_BUG_ON(xa, xas.xa_index != 0); in check_move_small() 1536 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); in check_move_small() [all …]
|
D | iov_iter.c | 1076 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages() 1673 p[nr++] = find_subpage(page, xas.xa_index); in iov_iter_extract_xarray_pages()
|
/linux-6.12.1/include/linux/ |
D | xarray.h | 1350 unsigned long xa_index; member 1371 .xa_index = index, \ 1604 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; in xas_reload() 1626 xas->xa_index = index; in xas_set() 1644 xas->xa_index = index; in xas_advance() 1658 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order() 1704 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) in xas_next_entry() 1708 if (unlikely(xas->xa_index >= max)) in xas_next_entry() 1716 xas->xa_index++; in xas_next_entry() 1766 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; in xas_next_marked() [all …]
|
/linux-6.12.1/tools/testing/radix-tree/ |
D | multiorder.c | 62 assert((xas.xa_index | mask) == (index[i] | mask)); in multiorder_iteration() 114 assert((xas.xa_index | mask) == (tag_index[i] | mask)); in multiorder_tagged_iteration() 141 assert((xas.xa_index | mask) == (tag_index[i] | mask)); in multiorder_tagged_iteration() 154 assert(xas.xa_index == tag_index[i]); in multiorder_tagged_iteration() 191 item_sanity(item, xas.xa_index); in iterator_func()
|
D | iteration_check_2.c | 26 assert(xas.xa_index >= 100); in iterator()
|
D | test.c | 262 item_free(entry, xas.xa_index); in item_kill_tree()
|
/linux-6.12.1/fs/cachefiles/ |
D | ondemand.c | 174 xas.xa_index = id; in cachefiles_ondemand_copen() 388 if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req) in cachefiles_ondemand_finish_req() 431 cache->req_id_next = xas.xa_index + 1; in cachefiles_ondemand_daemon_read() 442 msg->msg_id = xas.xa_index; in cachefiles_ondemand_daemon_read() 542 xas.xa_index = cache->msg_id_next; in cachefiles_ondemand_send_req() 545 xas.xa_index = 0; in cachefiles_ondemand_send_req() 553 cache->msg_id_next = xas.xa_index + 1; in cachefiles_ondemand_send_req()
|
/linux-6.12.1/fs/ |
D | dax.c | 147 unsigned long index = xas->xa_index; in dax_entry_waitqueue() 576 unsigned long index = xas->xa_index; in grab_mapping_entry() 617 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry() 883 unsigned long index = xas->xa_index; in dax_insert_entry() 988 index = xas->xa_index & ~(count - 1); in dax_writeback_one() 1041 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range() 1043 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range() 1061 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range() 1664 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; in dax_fault_iter() 1816 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_fault_check_fallback() [all …]
|
/linux-6.12.1/mm/ |
D | filemap.c | 874 folio->index = xas.xa_index; in __filemap_add_folio() 1757 return xas.xa_index; in page_cache_next_miss() 1758 if (xas.xa_index == 0) in page_cache_next_miss() 1794 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss() 1798 return xas.xa_index; in page_cache_prev_miss() 2048 indices[fbatch->nr] = xas.xa_index; in find_get_entries() 2114 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries() 2118 base = xas.xa_index & ~(nr - 1); in find_lock_entries() 2129 indices[fbatch->nr] = xas.xa_index; in find_lock_entries() 2188 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig() [all …]
|
D | list_lru.c | 541 else if (!xas_error(&xas) && index != xas.xa_index) in memcg_list_lru_alloc()
|
D | swap_state.c | 115 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); in add_to_swap_cache()
|
D | madvise.c | 242 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); in shmem_swapin_range()
|
D | shmem.c | 894 if (xas.xa_index == max) in shmem_partial_swap_usage() 1327 indices[folio_batch_count(fbatch)] = xas.xa_index; in shmem_find_swap_entries() 1338 return xas.xa_index; in shmem_find_swap_entries()
|
D | khugepaged.c | 1851 VM_BUG_ON(index != xas.xa_index); in collapse_file()
|
/linux-6.12.1/drivers/infiniband/core/ |
D | ib_core_uverbs.c | 298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range() 309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
|
D | device.c | 170 *indexp = xas.xa_index; in xan_find_marked()
|
/linux-6.12.1/arch/arm64/mm/ |
D | mteswap.c | 88 __xa_erase(&mte_pages, xa_state.xa_index); in mte_invalidate_tags_area()
|
/linux-6.12.1/arch/arm64/kernel/ |
D | hibernate.c | 299 unsigned long pfn = xa_state.xa_index; in swsusp_mte_restore_tags()
|
/linux-6.12.1/Documentation/translations/zh_CN/core-api/ |
D | xarray.rst | 355 如果xas_load()遇到一个多索引条目,xa_state中的xa_index将不会被改变。当遍历一个XArray或者调用xas_find()
|
/linux-6.12.1/drivers/iommu/iommufd/ |
D | pages.c | 608 if (xas.xa_index != start_index) in pages_to_xarray() 609 clear_xarray(xa, start_index, xas.xa_index - 1); in pages_to_xarray()
|
/linux-6.12.1/Documentation/core-api/ |
D | xarray.rst | 474 If xas_load() encounters a multi-index entry, the xa_index
|
/linux-6.12.1/virt/kvm/ |
D | kvm_main.c | 2435 if (xas.xa_index != index || in kvm_range_has_memory_attributes()
|