Lines Matching full:gp
46 unsigned long gp_seq_needed; /* Track furthest future GP request. */
54 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
56 /* Per-GP initial value for qsmask. */
61 /* to allow the current expedited GP */
64 /* Per-GP initial values for expmask. */
66 /* beginning of each expedited GP. */
68 /* Online CPUs for next expedited GP. */
72 /* Workers performing per node expedited GP */
126 /* Place for rcu_nocb_kthread() to wait GP. */
181 unsigned long gp_seq_needed; /* Track furthest future GP request. */
209 int watching_snap; /* Per-GP tracking for dynticks. */
210 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
211 bool rcu_urgent_qs; /* GP old need light quiescent state. */
213 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
229 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
239 /* The following fields are used by GP kthread, hence own cacheline. */
241 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
243 u8 nocb_gp_gp; /* GP to wait for on last scan? */
258 /* GP rdp takes GP-end wakeups. */
352 unsigned long gp_max; /* Maximum GP duration in */
355 struct swait_queue_head gp_wq; /* Where GP task waits. */
356 short gp_flags; /* Commands for GP task. */
357 short gp_state; /* GP kthread sleep state. */
358 unsigned long gp_wake_time; /* Last GP kthread wake. */
360 unsigned long gp_seq_polled; /* GP seq for polled API. */
361 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */
362 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */
375 struct mutex exp_mutex; /* Serialize expedited GP. */
390 unsigned long gp_start; /* Time at which GP started, */
392 unsigned long gp_end; /* Time last GP ended, again */
394 unsigned long gp_activity; /* Time of last GP kthread */
396 unsigned long gp_req_activity; /* Time of last GP request */
407 /* GP start. */
413 /* GP pre-initialization. */
416 struct llist_head srs_next; /* request a GP users. */
417 struct llist_node *srs_wait_tail; /* wait for GP users. */
418 struct llist_node *srs_done_tail; /* ready for GP users. */
435 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */