/linux-6.12.1/include/linux/ |
D | rwbase_rt.h | 12 atomic_t readers; member 18 .readers = ATOMIC_INIT(READER_BIAS), \ 25 atomic_set(&(rwbase)->readers, READER_BIAS); \ 31 return atomic_read(&rwb->readers) != READER_BIAS; in rw_base_is_locked() 36 return atomic_read(&rwb->readers) == WRITER_BIAS; in rw_base_is_write_locked() 41 return atomic_read(&rwb->readers) > 0; in rw_base_is_contended()
|
D | rwlock_types.h | 59 atomic_t readers; member
|
/linux-6.12.1/kernel/locking/ |
D | rwbase_rt.c | 61 for (r = atomic_read(&rwb->readers); r < 0;) { in rwbase_read_trylock() 62 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) in rwbase_read_trylock() 123 atomic_inc(&rwb->readers); in __rwbase_read_lock() 177 if (unlikely(atomic_dec_and_test(&rwb->readers))) in rwbase_read_unlock() 190 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); in __rwbase_write_unlock() 223 if (!atomic_read_acquire(&rwb->readers)) { in __rwbase_write_trylock() 224 atomic_set(&rwb->readers, WRITER_BIAS); in __rwbase_write_trylock() 242 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_lock() 288 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_trylock()
|
/linux-6.12.1/fs/bcachefs/ |
D | six.c | 104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count() 154 if (type == SIX_LOCK_read && lock->readers) { in __do_six_trylock() 156 this_cpu_inc(*lock->readers); /* signal that we own lock */ in __do_six_trylock() 163 this_cpu_sub(*lock->readers, !ret); in __do_six_trylock() 171 } else if (type == SIX_LOCK_write && lock->readers) { in __do_six_trylock() 576 lock->readers) { in do_six_unlock_type() 578 this_cpu_dec(*lock->readers); in do_six_unlock_type() 664 if (!lock->readers) { in six_lock_tryupgrade() 672 if (lock->readers) in six_lock_tryupgrade() 673 this_cpu_dec(*lock->readers); in six_lock_tryupgrade() [all …]
|
/linux-6.12.1/fs/btrfs/ |
D | locking.c | 332 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init() 341 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock() 348 if (atomic_read(&lock->readers)) { in btrfs_drew_try_write_lock() 361 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock() 377 atomic_inc(&lock->readers); in btrfs_drew_read_lock() 396 if (atomic_dec_and_test(&lock->readers)) in btrfs_drew_read_unlock()
|
D | subpage.c | 146 atomic_set(&ret->readers, 0); in btrfs_alloc_subpage() 242 atomic_add(nbits, &subpage->readers); in btrfs_subpage_start_reader() 263 ASSERT(atomic_read(&subpage->readers) >= nbits); in btrfs_subpage_end_reader() 266 last = atomic_sub_and_test(nbits, &subpage->readers); in btrfs_subpage_end_reader() 310 ASSERT(atomic_read(&subpage->readers) == 0); in btrfs_subpage_start_writer()
|
/linux-6.12.1/tools/testing/selftests/kvm/lib/ |
D | userfaultfd_util.c | 125 uffd_desc->readers = calloc(sizeof(pthread_t), num_readers); in uffd_setup_demand_paging() 126 TEST_ASSERT(uffd_desc->readers, "Failed to alloc reader threads"); in uffd_setup_demand_paging() 170 pthread_create(&uffd_desc->readers[i], NULL, uffd_handler_thread_fn, in uffd_setup_demand_paging() 190 TEST_ASSERT(!pthread_join(uffd->readers[i], NULL), in uffd_stop_demand_paging() 201 free(uffd->readers); in uffd_stop_demand_paging()
|
/linux-6.12.1/drivers/misc/cardreader/ |
D | Kconfig | 9 Alcor Micro card readers support access to many types of memory cards, 21 Realtek card readers support access to many types of memory cards, 30 Select this option to get support for Realtek USB 2.0 card readers
|
/linux-6.12.1/Documentation/RCU/ |
D | rcu.rst | 10 must be long enough that any readers accessing the item being deleted have 21 The advantage of RCU's two-part approach is that RCU readers need 26 in read-mostly situations. The fact that RCU readers need not 30 if the RCU readers give no indication when they are done? 32 Just as with spinlocks, RCU readers are not permitted to 42 same effect, but require that the readers manipulate CPU-local
|
D | checklist.rst | 30 One final exception is where RCU readers are used to prevent 40 RCU does allow *readers* to run (almost) naked, but *writers* must 86 The whole point of RCU is to permit readers to run without 87 any locks or atomic operations. This means that readers will 100 locks (that are acquired by both readers and writers) 101 that guard per-element state. Fields that the readers 107 c. Make updates appear atomic to readers. For example, 111 appear to be atomic to RCU readers, nor will sequences 119 d. Carefully order the updates and the reads so that readers 162 is common to readers and updaters. However, lockdep [all …]
|
D | whatisRCU.rst | 56 Section 1, though most readers will profit by reading this section at 79 new versions of these data items), and can run concurrently with readers. 81 readers is the semantics of modern CPUs guarantee that readers will see 85 removal phase. Because reclaiming data items can disrupt any readers 87 not start until readers no longer hold references to those data items. 91 reclamation phase until all readers active during the removal phase have 93 callback that is invoked after they finish. Only readers that are active 101 readers cannot gain a reference to it. 103 b. Wait for all previous readers to complete their RCU read-side 106 c. At this point, there cannot be any readers who hold references [all …]
|
D | lockdep.rst | 43 invoked by both RCU readers and updaters. 47 is invoked by both RCU-bh readers and updaters. 51 is invoked by both RCU-sched readers and updaters. 55 is invoked by both SRCU readers and updaters.
|
D | listRCU.rst | 63 ``tasklist_lock``. To prevent readers from noticing changes in the list 67 any readers traversing the list will see valid ``p->tasks.next`` pointers 71 all existing readers finish, which guarantees that the ``task_struct`` 73 of all RCU readers that might possibly have a reference to that object. 219 need for writers to exclude readers. 226 readers to fail spectacularly. 228 So, when readers can tolerate stale data and when entries are either added or
|
/linux-6.12.1/drivers/misc/ibmasm/ |
D | event.c | 30 list_for_each_entry(reader, &sp->event_buffer->readers, node) in wake_up_event_readers() 123 list_add(&reader->node, &sp->event_buffer->readers); in ibmasm_event_reader_register() 153 INIT_LIST_HEAD(&buffer->readers); in ibmasm_event_buffer_init()
|
/linux-6.12.1/Documentation/locking/ |
D | lockdep-design.rst | 405 spin_lock() or write_lock()), non-recursive readers (i.e. shared lockers, like 406 down_read()) and recursive readers (recursive shared lockers, like rcu_read_lock()). 410 r: stands for non-recursive readers. 411 R: stands for recursive readers. 412 S: stands for all readers (non-recursive + recursive), as both are shared lockers. 413 N: stands for writers and non-recursive readers, as both are not recursive. 417 Recursive readers, as their name indicates, are the lockers allowed to acquire 421 While non-recursive readers will cause a self deadlock if trying to acquire inside 424 The difference between recursive readers and non-recursive readers is because: 425 recursive readers get blocked only by a write lock *holder*, while non-recursive [all …]
|
D | seqlock.rst | 9 lockless readers (read-only retry loops), and no writer starvation. They 23 is odd and indicates to the readers that an update is in progress. At 25 even again which lets readers make progress. 153 from interruption by readers. This is typically the case when the read 195 1. Normal Sequence readers which never block a writer but they must 206 2. Locking readers which will wait if a writer or another locking reader 218 according to a passed marker. This is used to avoid lockless readers
|
D | locktypes.rst | 95 readers. 135 rw_semaphore is a multiple readers and single writer lock mechanism. 141 exist special-purpose interfaces that allow non-owner release for readers. 151 readers, a preempted low-priority reader will continue holding its lock, 152 thus starving even high-priority writers. In contrast, because readers 155 writer from starving readers. 299 rwlock_t is a multiple readers and single writer lock mechanism. 314 readers, a preempted low-priority reader will continue holding its lock, 315 thus starving even high-priority writers. In contrast, because readers 318 preventing that writer from starving readers.
|
/linux-6.12.1/fs/ |
D | pipe.c | 424 !READ_ONCE(pipe->readers); in pipe_writable() 457 if (!pipe->readers) { in pipe_write() 498 if (!pipe->readers) { in pipe_write() 700 if (!pipe->readers) in pipe_poll() 729 pipe->readers--; in pipe_release() 734 if (!pipe->readers != !pipe->writers) { in pipe_release() 896 pipe->readers = pipe->writers = 1; in get_pipe_inode() 1153 if (pipe->readers++ == 0) in fifo_open() 1175 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) in fifo_open() 1182 if (!is_pipe && !pipe->readers) { in fifo_open() [all …]
|
/linux-6.12.1/drivers/soc/aspeed/ |
D | aspeed-p2a-ctrl.c | 65 u32 readers; member 192 ctrl->readers += 1; in aspeed_p2a_ioctl() 275 priv->parent->readers -= priv->read; in aspeed_p2a_release() 299 if (!open_regions && priv->parent->readers == 0) in aspeed_p2a_release()
|
/linux-6.12.1/drivers/hid/ |
D | hid-roccat.c | 47 struct list_head readers; member 191 list_add_tail(&reader->node, &device->readers); in roccat_open() 270 list_for_each_entry(reader, &device->readers, node) { in roccat_report_event() 339 INIT_LIST_HEAD(&device->readers); in roccat_connect()
|
/linux-6.12.1/tools/testing/selftests/kvm/include/ |
D | userfaultfd_util.h | 31 pthread_t *readers; member
|
/linux-6.12.1/drivers/md/dm-vdo/indexer/ |
D | volume-index.c | 806 struct buffered_reader **readers, in start_restoring_volume_sub_index() argument 820 result = uds_read_from_buffered_reader(readers[i], buffer, in start_restoring_volume_sub_index() 873 result = uds_read_from_buffered_reader(readers[i], decoded, in start_restoring_volume_sub_index() 891 result = uds_start_restoring_delta_index(&sub_index->delta_index, readers, in start_restoring_volume_sub_index() 983 struct buffered_reader **readers, unsigned int reader_count) in uds_load_volume_index() argument 988 result = start_restoring_volume_index(volume_index, readers, reader_count); in uds_load_volume_index() 992 result = finish_restoring_volume_index(volume_index, readers, reader_count); in uds_load_volume_index() 999 result = uds_check_guard_delta_lists(readers, reader_count); in uds_load_volume_index()
|
D | index-layout.c | 914 struct buffered_reader *readers[MAX_ZONES]; in uds_load_index_state() local 924 result = open_region_reader(layout, &isl->open_chapter, &readers[0]); in uds_load_index_state() 928 result = uds_load_open_chapter(index, readers[0]); in uds_load_index_state() 929 uds_free_buffered_reader(readers[0]); in uds_load_index_state() 935 &readers[zone]); in uds_load_index_state() 938 uds_free_buffered_reader(readers[zone - 1]); in uds_load_index_state() 944 result = uds_load_volume_index(index->volume_index, readers, isl->zone_count); in uds_load_index_state() 946 uds_free_buffered_reader(readers[zone]); in uds_load_index_state() 950 result = open_region_reader(layout, &isl->index_page_map, &readers[0]); in uds_load_index_state() 954 result = uds_read_index_page_map(index->volume->index_page_map, readers[0]); in uds_load_index_state() [all …]
|
/linux-6.12.1/drivers/iio/ |
D | TODO | 14 to state struct and using property handlers and readers.
|
/linux-6.12.1/Documentation/trace/ |
D | ring-buffer-map.rst | 43 Concurrent readers (either another application mapping that ring-buffer or the 45 the ring-buffer and the output is unpredictable, just like concurrent readers on
|