Lines Matching refs:waiter

35 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,  in __ww_mutex_add_waiter()  argument
53 struct rt_mutex_waiter *waiter, in __ww_mutex_check_kill() argument
360 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) in waiter_update_prio() argument
362 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_update_prio()
363 lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); in waiter_update_prio()
365 waiter->tree.prio = __waiter_prio(task); in waiter_update_prio()
366 waiter->tree.deadline = task->dl.deadline; in waiter_update_prio()
373 waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) in waiter_clone_prio() argument
375 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_clone_prio()
377 lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); in waiter_clone_prio()
379 waiter->pi_tree.prio = waiter->tree.prio; in waiter_clone_prio()
380 waiter->pi_tree.deadline = waiter->tree.deadline; in waiter_clone_prio()
427 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, in rt_mutex_steal() argument
430 if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) in rt_mutex_steal()
438 if (rt_or_dl_prio(waiter->tree.prio)) in rt_mutex_steal()
441 return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); in rt_mutex_steal()
477 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
481 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
485 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
489 if (RB_EMPTY_NODE(&waiter->tree.entry)) in rt_mutex_dequeue()
492 rb_erase_cached(&waiter->tree.entry, &lock->waiters); in rt_mutex_dequeue()
493 RB_CLEAR_NODE(&waiter->tree.entry); in rt_mutex_dequeue()
505 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument
509 rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi()
513 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument
517 if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) in rt_mutex_dequeue_pi()
520 rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
521 RB_CLEAR_NODE(&waiter->pi_tree.entry); in rt_mutex_dequeue_pi()
589 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, in rt_mutex_cond_detect_deadlock() argument
593 return waiter != NULL; in rt_mutex_cond_detect_deadlock()
682 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; in rt_mutex_adjust_prio_chain() local
734 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
745 if (!waiter) in rt_mutex_adjust_prio_chain()
764 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
789 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) in rt_mutex_adjust_prio_chain()
821 if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { in rt_mutex_adjust_prio_chain()
832 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
932 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
945 waiter_update_prio(waiter, task); in rt_mutex_adjust_prio_chain()
947 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
989 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
997 waiter_clone_prio(waiter, task); in rt_mutex_adjust_prio_chain()
998 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
1001 } else if (prerequeue_top_waiter == waiter) { in rt_mutex_adjust_prio_chain()
1012 rt_mutex_dequeue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
1013 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
1014 waiter_clone_prio(waiter, task); in rt_mutex_adjust_prio_chain()
1015 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
1060 if (!detect_deadlock && waiter != top_waiter) in rt_mutex_adjust_prio_chain()
1085 struct rt_mutex_waiter *waiter) in try_to_take_rt_mutex() argument
1119 if (waiter) { in try_to_take_rt_mutex()
1126 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { in try_to_take_rt_mutex()
1131 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1201 struct rt_mutex_waiter *waiter, in task_blocks_on_rt_mutex() argument
1207 struct rt_mutex_waiter *top_waiter = waiter; in task_blocks_on_rt_mutex()
1229 waiter->task = task; in task_blocks_on_rt_mutex()
1230 waiter->lock = lock; in task_blocks_on_rt_mutex()
1231 waiter_update_prio(waiter, task); in task_blocks_on_rt_mutex()
1232 waiter_clone_prio(waiter, task); in task_blocks_on_rt_mutex()
1237 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1239 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
1248 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); in task_blocks_on_rt_mutex()
1251 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1262 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1264 rt_mutex_enqueue_pi(owner, waiter); in task_blocks_on_rt_mutex()
1269 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { in task_blocks_on_rt_mutex()
1295 next_lock, waiter, task); in task_blocks_on_rt_mutex()
1311 struct rt_mutex_waiter *waiter; in mark_wakeup_next_waiter() local
1317 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1326 rt_mutex_dequeue_pi(current, waiter); in mark_wakeup_next_waiter()
1350 rt_mutex_wake_q_add(wqh, waiter); in mark_wakeup_next_waiter()
1478 struct rt_mutex_waiter *waiter, in rtmutex_spin_on_owner() argument
1504 !rt_mutex_waiter_is_top_waiter(lock, waiter)) { in rtmutex_spin_on_owner()
1515 struct rt_mutex_waiter *waiter, in rtmutex_spin_on_owner() argument
1536 struct rt_mutex_waiter *waiter) in remove_waiter() argument
1538 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1545 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1558 rt_mutex_dequeue_pi(owner, waiter); in remove_waiter()
1603 struct rt_mutex_waiter *waiter) in rt_mutex_slowlock_block() argument
1611 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1624 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); in rt_mutex_slowlock_block()
1629 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1635 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1682 struct rt_mutex_waiter *waiter) in __rt_mutex_slowlock() argument
1703 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); in __rt_mutex_slowlock()
1705 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); in __rt_mutex_slowlock()
1716 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1717 rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); in __rt_mutex_slowlock()
1735 struct rt_mutex_waiter waiter; in __rt_mutex_slowlock_locked() local
1738 rt_mutex_init_waiter(&waiter); in __rt_mutex_slowlock_locked()
1739 waiter.ww_ctx = ww_ctx; in __rt_mutex_slowlock_locked()
1742 &waiter); in __rt_mutex_slowlock_locked()
1744 debug_rt_mutex_free_waiter(&waiter); in __rt_mutex_slowlock_locked()
1809 struct rt_mutex_waiter waiter; in rtlock_slowlock_locked() local
1817 rt_mutex_init_rtlock_waiter(&waiter); in rtlock_slowlock_locked()
1824 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); in rtlock_slowlock_locked()
1828 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1831 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1837 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1852 debug_rt_mutex_free_waiter(&waiter); in rtlock_slowlock_locked()