Lines Matching full:ssp
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
122 static void init_srcu_struct_data(struct srcu_struct *ssp) in init_srcu_struct_data() argument
134 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_data()
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
145 sdp->ssp = ssp; in init_srcu_struct_data()
165 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags) in init_srcu_struct_nodes() argument
177 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags); in init_srcu_struct_nodes()
178 if (!ssp->srcu_sup->node) in init_srcu_struct_nodes()
182 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0]; in init_srcu_struct_nodes()
184 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
188 srcu_for_each_node_breadth_first(ssp, snp) { in init_srcu_struct_nodes()
199 if (snp == &ssp->srcu_sup->node[0]) { in init_srcu_struct_nodes()
206 if (snp == ssp->srcu_sup->level[level + 1]) in init_srcu_struct_nodes()
208 snp->srcu_parent = ssp->srcu_sup->level[level - 1] + in init_srcu_struct_nodes()
209 (snp - ssp->srcu_sup->level[level]) / in init_srcu_struct_nodes()
218 snp_first = ssp->srcu_sup->level[level]; in init_srcu_struct_nodes()
220 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_nodes()
229 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER); in init_srcu_struct_nodes()
238 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) in init_srcu_struct_fields() argument
241 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL); in init_srcu_struct_fields()
242 if (!ssp->srcu_sup) in init_srcu_struct_fields()
245 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in init_srcu_struct_fields()
246 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL; in init_srcu_struct_fields()
247 ssp->srcu_sup->node = NULL; in init_srcu_struct_fields()
248 mutex_init(&ssp->srcu_sup->srcu_cb_mutex); in init_srcu_struct_fields()
249 mutex_init(&ssp->srcu_sup->srcu_gp_mutex); in init_srcu_struct_fields()
250 ssp->srcu_idx = 0; in init_srcu_struct_fields()
251 ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
252 ssp->srcu_sup->srcu_barrier_seq = 0; in init_srcu_struct_fields()
253 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex); in init_srcu_struct_fields()
254 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
255 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu); in init_srcu_struct_fields()
256 ssp->srcu_sup->sda_is_static = is_static; in init_srcu_struct_fields()
258 ssp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
259 if (!ssp->sda) in init_srcu_struct_fields()
261 init_srcu_struct_data(ssp); in init_srcu_struct_fields()
262 ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
263 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
264 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) { in init_srcu_struct_fields()
265 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) in init_srcu_struct_fields()
267 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG); in init_srcu_struct_fields()
269 ssp->srcu_sup->srcu_ssp = ssp; in init_srcu_struct_fields()
270 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, in init_srcu_struct_fields()
276 free_percpu(ssp->sda); in init_srcu_struct_fields()
277 ssp->sda = NULL; in init_srcu_struct_fields()
281 kfree(ssp->srcu_sup); in init_srcu_struct_fields()
282 ssp->srcu_sup = NULL; in init_srcu_struct_fields()
289 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, in __init_srcu_struct() argument
293 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); in __init_srcu_struct()
294 lockdep_init_map(&ssp->dep_map, name, key, 0); in __init_srcu_struct()
295 return init_srcu_struct_fields(ssp, false); in __init_srcu_struct()
303 * @ssp: structure to initialize.
309 int init_srcu_struct(struct srcu_struct *ssp) in init_srcu_struct() argument
311 return init_srcu_struct_fields(ssp, false); in init_srcu_struct()
320 static void __srcu_transition_to_big(struct srcu_struct *ssp) in __srcu_transition_to_big() argument
322 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in __srcu_transition_to_big()
323 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC); in __srcu_transition_to_big()
329 static void srcu_transition_to_big(struct srcu_struct *ssp) in srcu_transition_to_big() argument
334 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) in srcu_transition_to_big()
336 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
337 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) { in srcu_transition_to_big()
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
341 __srcu_transition_to_big(ssp); in srcu_transition_to_big()
342 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
349 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp) in spin_lock_irqsave_check_contention() argument
353 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state) in spin_lock_irqsave_check_contention()
356 if (ssp->srcu_sup->srcu_size_jiffies != j) { in spin_lock_irqsave_check_contention()
357 ssp->srcu_sup->srcu_size_jiffies = j; in spin_lock_irqsave_check_contention()
358 ssp->srcu_sup->srcu_n_lock_retries = 0; in spin_lock_irqsave_check_contention()
360 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim) in spin_lock_irqsave_check_contention()
362 __srcu_transition_to_big(ssp); in spin_lock_irqsave_check_contention()
373 struct srcu_struct *ssp = sdp->ssp; in spin_lock_irqsave_sdp_contention() local
377 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
378 spin_lock_irqsave_check_contention(ssp); in spin_lock_irqsave_sdp_contention()
379 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
389 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags) in spin_lock_irqsave_ssp_contention() argument
391 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags)) in spin_lock_irqsave_ssp_contention()
393 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_ssp_contention()
394 spin_lock_irqsave_check_contention(ssp); in spin_lock_irqsave_ssp_contention()
401 * to each update-side SRCU primitive. Use ssp->lock, which -is-
405 static void check_init_srcu_struct(struct srcu_struct *ssp) in check_init_srcu_struct() argument
410 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
412 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
413 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) { in check_init_srcu_struct()
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
417 init_srcu_struct_fields(ssp, true); in check_init_srcu_struct()
418 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
425 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_lock_idx() argument
431 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx()
442 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) in srcu_readers_unlock_idx() argument
449 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx()
456 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp); in srcu_readers_unlock_idx()
464 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) in srcu_readers_active_idx_check() argument
468 unlocks = srcu_readers_unlock_idx(ssp, idx); in srcu_readers_active_idx_check()
539 return srcu_readers_lock_idx(ssp, idx) == unlocks; in srcu_readers_active_idx_check()
545 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
551 static bool srcu_readers_active(struct srcu_struct *ssp) in srcu_readers_active() argument
557 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active()
618 static unsigned long srcu_get_delay(struct srcu_struct *ssp) in srcu_get_delay() argument
623 struct srcu_usage *sup = ssp->srcu_sup; in srcu_get_delay()
644 * @ssp: structure to clean up.
649 void cleanup_srcu_struct(struct srcu_struct *ssp) in cleanup_srcu_struct() argument
652 struct srcu_usage *sup = ssp->srcu_sup; in cleanup_srcu_struct()
654 if (WARN_ON(!srcu_get_delay(ssp))) in cleanup_srcu_struct()
656 if (WARN_ON(srcu_readers_active(ssp))) in cleanup_srcu_struct()
660 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in cleanup_srcu_struct()
669 WARN_ON(srcu_readers_active(ssp))) { in cleanup_srcu_struct()
671 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)), in cleanup_srcu_struct()
682 free_percpu(ssp->sda); in cleanup_srcu_struct()
683 ssp->sda = NULL; in cleanup_srcu_struct()
685 ssp->srcu_sup = NULL; in cleanup_srcu_struct()
694 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe) in srcu_check_nmi_safety() argument
702 sdp = raw_cpu_ptr(ssp->sda); in srcu_check_nmi_safety()
718 int __srcu_read_lock(struct srcu_struct *ssp) in __srcu_read_lock() argument
722 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock()
723 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); in __srcu_read_lock()
734 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) in __srcu_read_unlock() argument
737 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); in __srcu_read_unlock()
748 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) in __srcu_read_lock_nmisafe() argument
751 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); in __srcu_read_lock_nmisafe()
753 idx = READ_ONCE(ssp->srcu_idx) & 0x1; in __srcu_read_lock_nmisafe()
765 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) in __srcu_read_unlock_nmisafe() argument
767 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); in __srcu_read_unlock_nmisafe()
779 static void srcu_gp_start(struct srcu_struct *ssp) in srcu_gp_start() argument
783 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in srcu_gp_start()
784 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)); in srcu_gp_start()
785 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies); in srcu_gp_start()
786 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0); in srcu_gp_start()
788 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
789 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
827 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, in srcu_schedule_cbs_snp() argument
835 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); in srcu_schedule_cbs_snp()
848 static void srcu_gp_end(struct srcu_struct *ssp) in srcu_gp_end() argument
861 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_end()
885 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()), in srcu_gp_end()
889 srcu_for_each_node_breadth_first(ssp, snp) { in srcu_gp_end()
907 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); in srcu_gp_end()
914 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_gp_end()
931 srcu_gp_start(ssp); in srcu_gp_end()
933 srcu_reschedule(ssp, 0); in srcu_gp_end()
941 init_srcu_struct_nodes(ssp, GFP_KERNEL); in srcu_gp_end()
954 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, in srcu_funnel_exp_start() argument
963 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) || in srcu_funnel_exp_start()
975 spin_lock_irqsave_ssp_contention(ssp, &flags); in srcu_funnel_exp_start()
976 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
977 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
978 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_funnel_exp_start()
994 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, in srcu_funnel_gp_start() argument
1003 struct srcu_usage *sup = ssp->srcu_sup; in srcu_funnel_gp_start()
1027 srcu_funnel_exp_start(ssp, snp, s); in srcu_funnel_gp_start()
1040 spin_lock_irqsave_ssp_contention(ssp, &flags); in srcu_funnel_gp_start()
1055 srcu_gp_start(ssp); in srcu_funnel_gp_start()
1064 !!srcu_get_delay(ssp)); in srcu_funnel_gp_start()
1076 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) in try_check_zero() argument
1080 curdelay = !srcu_get_delay(ssp); in try_check_zero()
1083 if (srcu_readers_active_idx_check(ssp, idx)) in try_check_zero()
1096 static void srcu_flip(struct srcu_struct *ssp) in srcu_flip() argument
1128 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter. in srcu_flip()
1162 static bool srcu_might_be_idle(struct srcu_struct *ssp) in srcu_might_be_idle() argument
1170 check_init_srcu_struct(ssp); in srcu_might_be_idle()
1172 sdp = raw_cpu_ptr(ssp->sda); in srcu_might_be_idle()
1188 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end); in srcu_might_be_idle()
1194 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcu_might_be_idle()
1196 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed))) in srcu_might_be_idle()
1199 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)) in srcu_might_be_idle()
1214 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, in srcu_gp_start_if_needed() argument
1226 check_init_srcu_struct(ssp); in srcu_gp_start_if_needed()
1232 idx = __srcu_read_lock_nmisafe(ssp); in srcu_gp_start_if_needed()
1233 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state); in srcu_gp_start_if_needed()
1235 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id()); in srcu_gp_start_if_needed()
1237 sdp = raw_cpu_ptr(ssp->sda); in srcu_gp_start_if_needed()
1277 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start_if_needed()
1280 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_gp_start_if_needed()
1307 srcu_funnel_gp_start(ssp, sdp, s, do_norm); in srcu_gp_start_if_needed()
1309 srcu_funnel_exp_start(ssp, sdp_mynode, s); in srcu_gp_start_if_needed()
1310 __srcu_read_unlock_nmisafe(ssp, idx); in srcu_gp_start_if_needed()
1342 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in __call_srcu() argument
1352 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); in __call_srcu()
1357 * @ssp: srcu_struct in queue the callback
1372 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
1375 __call_srcu(ssp, rhp, func, true); in call_srcu()
1382 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) in __synchronize_srcu() argument
1386 srcu_lock_sync(&ssp->dep_map); in __synchronize_srcu()
1388 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || in __synchronize_srcu()
1397 check_init_srcu_struct(ssp); in __synchronize_srcu()
1400 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); in __synchronize_srcu()
1416 * @ssp: srcu_struct with which to synchronize.
1424 void synchronize_srcu_expedited(struct srcu_struct *ssp) in synchronize_srcu_expedited() argument
1426 __synchronize_srcu(ssp, rcu_gp_is_normal()); in synchronize_srcu_expedited()
1432 * @ssp: srcu_struct with which to synchronize.
1477 void synchronize_srcu(struct srcu_struct *ssp) in synchronize_srcu() argument
1479 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) in synchronize_srcu()
1480 synchronize_srcu_expedited(ssp); in synchronize_srcu()
1482 __synchronize_srcu(ssp, true); in synchronize_srcu()
1488 * @ssp: srcu_struct to provide cookie for.
1496 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) in get_state_synchronize_srcu() argument
1501 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in get_state_synchronize_srcu()
1507 * @ssp: srcu_struct to provide cookie for.
1515 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) in start_poll_synchronize_srcu() argument
1517 return srcu_gp_start_if_needed(ssp, NULL, true); in start_poll_synchronize_srcu()
1523 * @ssp: srcu_struct to provide cookie for.
1546 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) in poll_state_synchronize_srcu() argument
1549 !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie)) in poll_state_synchronize_srcu()
1564 struct srcu_struct *ssp; in srcu_barrier_cb() local
1568 ssp = sdp->ssp; in srcu_barrier_cb()
1569 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1570 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier_cb()
1581 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp) in srcu_barrier_one_cpu() argument
1584 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1590 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1597 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1599 void srcu_barrier(struct srcu_struct *ssp) in srcu_barrier() argument
1603 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1605 check_init_srcu_struct(ssp); in srcu_barrier()
1606 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1607 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) { in srcu_barrier()
1609 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1612 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1613 init_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1616 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1618 idx = __srcu_read_lock_nmisafe(ssp); in srcu_barrier()
1619 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_barrier()
1620 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id())); in srcu_barrier()
1623 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); in srcu_barrier()
1624 __srcu_read_unlock_nmisafe(ssp, idx); in srcu_barrier()
1627 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier()
1628 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1629 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1631 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1632 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1638 * @ssp: srcu_struct on which to report batch completion.
1643 unsigned long srcu_batches_completed(struct srcu_struct *ssp) in srcu_batches_completed() argument
1645 return READ_ONCE(ssp->srcu_idx); in srcu_batches_completed()
1654 static void srcu_advance_state(struct srcu_struct *ssp) in srcu_advance_state() argument
1658 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1670 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1672 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1673 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_advance_state()
1674 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1675 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1676 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1679 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1681 srcu_gp_start(ssp); in srcu_advance_state()
1682 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1684 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1689 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1690 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1691 if (!try_check_zero(ssp, idx, 1)) { in srcu_advance_state()
1692 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1695 srcu_flip(ssp); in srcu_advance_state()
1696 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1697 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1698 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1699 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1702 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1708 idx = 1 ^ (ssp->srcu_idx & 1); in srcu_advance_state()
1709 if (!try_check_zero(ssp, idx, 2)) { in srcu_advance_state()
1710 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1713 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1714 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1731 struct srcu_struct *ssp; in srcu_invoke_callbacks() local
1735 ssp = sdp->ssp; in srcu_invoke_callbacks()
1740 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_invoke_callbacks()
1785 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) in srcu_reschedule() argument
1789 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1790 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_reschedule()
1791 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) { in srcu_reschedule()
1795 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) { in srcu_reschedule()
1797 srcu_gp_start(ssp); in srcu_reschedule()
1799 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1802 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay); in srcu_reschedule()
1812 struct srcu_struct *ssp; in process_srcu() local
1816 ssp = sup->srcu_ssp; in process_srcu()
1818 srcu_advance_state(ssp); in process_srcu()
1819 curdelay = srcu_get_delay(ssp); in process_srcu()
1834 srcu_reschedule(ssp, curdelay); in process_srcu()
1837 void srcutorture_get_gp_data(struct srcu_struct *ssp, int *flags, in srcutorture_get_gp_data() argument
1841 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcutorture_get_gp_data()
1858 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) in srcu_torture_stats_print() argument
1863 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state); in srcu_torture_stats_print()
1866 idx = ssp->srcu_idx & 0x1; in srcu_torture_stats_print()
1870 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state, in srcu_torture_stats_print()
1872 if (!ssp->sda) { in srcu_torture_stats_print()
1883 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_torture_stats_print()
1907 srcu_transition_to_big(ssp); in srcu_torture_stats_print()
1963 struct srcu_struct *ssp; in srcu_module_coming() local
1967 ssp = *(sspp++); in srcu_module_coming()
1968 ssp->sda = alloc_percpu(struct srcu_data); in srcu_module_coming()
1969 if (WARN_ON_ONCE(!ssp->sda)) in srcu_module_coming()
1979 struct srcu_struct *ssp; in srcu_module_going() local
1983 ssp = *(sspp++); in srcu_module_going()
1984 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) && in srcu_module_going()
1985 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static)) in srcu_module_going()
1986 cleanup_srcu_struct(ssp); in srcu_module_going()
1987 if (!WARN_ON(srcu_readers_active(ssp))) in srcu_module_going()
1988 free_percpu(ssp->sda); in srcu_module_going()