Lines Matching refs:sup

623 	struct srcu_usage *sup = ssp->srcu_sup;  in srcu_get_delay()  local
625 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp))) in srcu_get_delay()
627 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) { in srcu_get_delay()
629 gpstart = READ_ONCE(sup->srcu_gp_start); in srcu_get_delay()
633 ASSERT_EXCLUSIVE_WRITER(sup->srcu_n_exp_nodelay); in srcu_get_delay()
634 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1); in srcu_get_delay()
635 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase) in srcu_get_delay()
652 struct srcu_usage *sup = ssp->srcu_sup; in cleanup_srcu_struct() local
658 flush_delayed_work(&sup->work); in cleanup_srcu_struct()
667 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) || in cleanup_srcu_struct()
668 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) || in cleanup_srcu_struct()
671 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)), in cleanup_srcu_struct()
672 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed); in cleanup_srcu_struct()
678 kfree(sup->node); in cleanup_srcu_struct()
679 sup->node = NULL; in cleanup_srcu_struct()
680 sup->srcu_size_state = SRCU_SIZE_SMALL; in cleanup_srcu_struct()
681 if (!sup->sda_is_static) { in cleanup_srcu_struct()
684 kfree(sup); in cleanup_srcu_struct()
861 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_end() local
864 mutex_lock(&sup->srcu_cb_mutex); in srcu_gp_end()
867 spin_lock_irq_rcu_node(sup); in srcu_gp_end()
868 idx = rcu_seq_state(sup->srcu_gp_seq); in srcu_gp_end()
870 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp))) in srcu_gp_end()
873 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns()); in srcu_gp_end()
874 rcu_seq_end(&sup->srcu_gp_seq); in srcu_gp_end()
875 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
876 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
877 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
878 spin_unlock_irq_rcu_node(sup); in srcu_gp_end()
879 mutex_unlock(&sup->srcu_gp_mutex); in srcu_gp_end()
883 ss_state = smp_load_acquire(&sup->srcu_size_state); in srcu_gp_end()
892 last_lvl = snp >= sup->level[rcu_num_lvls - 1]; in srcu_gp_end()
924 mutex_unlock(&sup->srcu_cb_mutex); in srcu_gp_end()
927 spin_lock_irq_rcu_node(sup); in srcu_gp_end()
928 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
930 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) { in srcu_gp_end()
932 spin_unlock_irq_rcu_node(sup); in srcu_gp_end()
935 spin_unlock_irq_rcu_node(sup); in srcu_gp_end()
943 smp_store_release(&sup->srcu_size_state, ss_state + 1); in srcu_gp_end()
1003 struct srcu_usage *sup = ssp->srcu_sup; in srcu_funnel_gp_start() local
1006 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_funnel_gp_start()
1014 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf) in srcu_funnel_gp_start()
1041 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
1046 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
1048 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
1049 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
1052 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && in srcu_funnel_gp_start()
1053 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
1054 WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed)); in srcu_funnel_gp_start()
1063 queue_delayed_work(rcu_gp_wq, &sup->work, in srcu_funnel_gp_start()
1065 else if (list_empty(&sup->work.work.entry)) in srcu_funnel_gp_start()
1066 list_add(&sup->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start()
1068 spin_unlock_irqrestore_rcu_node(sup, flags); in srcu_funnel_gp_start()
1813 struct srcu_usage *sup; in process_srcu() local
1815 sup = container_of(work, struct srcu_usage, work.work); in process_srcu()
1816 ssp = sup->srcu_ssp; in process_srcu()
1821 WRITE_ONCE(sup->reschedule_count, 0); in process_srcu()
1824 if (READ_ONCE(sup->reschedule_jiffies) == j) { in process_srcu()
1825 ASSERT_EXCLUSIVE_WRITER(sup->reschedule_count); in process_srcu()
1826 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1); in process_srcu()
1827 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay) in process_srcu()
1830 WRITE_ONCE(sup->reschedule_count, 1); in process_srcu()
1831 WRITE_ONCE(sup->reschedule_jiffies, j); in process_srcu()
1927 struct srcu_usage *sup; in srcu_init() local
1947 sup = list_first_entry(&srcu_boot_list, struct srcu_usage, in srcu_init()
1949 list_del_init(&sup->work.work.entry); in srcu_init()
1951 sup->srcu_size_state == SRCU_SIZE_SMALL) in srcu_init()
1952 sup->srcu_size_state = SRCU_SIZE_ALLOC; in srcu_init()
1953 queue_work(rcu_gp_wq, &sup->work.work); in srcu_init()