Lines Matching +full:fault +full:- +full:q

1 // SPDX-License-Identifier: GPL-2.0-or-later
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
29 * Kirkwood for proof-of-concept implementation.
40 #include <linux/fault-inject.h>
60 * Fault injections for futexes.
99 debugfs_create_bool("ignore-private", mode, dir, in fail_futex_debugfs()
111 * futex_hash - Return the hash bucket in the global hash
120 key->both.offset); in futex_hash()
122 return &futex_queues[hash & (futex_hashsize - 1)]; in futex_hash()
127 * futex_setup_timer - set up the sleeping hrtimer.
150 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); in futex_setup_timer()
158 * This relies on u64 not wrapping in the life-time of the machine; which with
169 * It is important that futex_match() will never have a false-positive, esp.
170 * for PI futexes that can mess up the state. The above argues that false-negatives
179 old = atomic64_read(&inode->i_sequence); in get_inode_sequence_number()
188 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new); in get_inode_sequence_number()
196 * get_futex_key() - Get parameters which are the keys for a futex
209 * ( inode->i_sequence, page->index, offset_within_page )
215 * ( current->mm, address, 0 )
226 struct mm_struct *mm = current->mm; in get_futex_key()
238 key->both.offset = address % PAGE_SIZE; in get_futex_key()
240 return -EINVAL; in get_futex_key()
241 address -= key->both.offset; in get_futex_key()
244 return -EFAULT; in get_futex_key()
247 return -EFAULT; in get_futex_key()
258 * On no-MMU, shared futexes are treated as private, therefore in get_futex_key()
264 key->private.mm = mm; in get_futex_key()
266 key->private.mm = NULL; in get_futex_key()
268 key->private.address = address; in get_futex_key()
275 return -EFAULT; in get_futex_key()
280 * and get read-only access. in get_futex_key()
282 if (err == -EFAULT && rw == FUTEX_READ) { in get_futex_key()
295 * file-backed region case and guards against movement to swap cache. in get_futex_key()
299 * From this point on, mapping will be re-verified if necessary and in get_futex_key()
305 * filesystem-backed pages, the precise page is required as the in get_futex_key()
309 mapping = READ_ONCE(folio->mapping); in get_futex_key()
312 * If folio->mapping is NULL, then it cannot be an anonymous in get_futex_key()
324 * an unlikely race, but we do need to retry for folio->mapping. in get_futex_key()
335 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping; in get_futex_key()
342 return -EFAULT; in get_futex_key()
352 * it's a read-only handle, it's expected that futexes attach to in get_futex_key()
361 err = -EFAULT; in get_futex_key()
365 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ in get_futex_key()
366 key->private.mm = mm; in get_futex_key()
367 key->private.address = address; in get_futex_key()
374 * the folio->mapping must be traversed. Ordinarily this should in get_futex_key()
381 * mapping->host can be safely accessed as being a valid inode. in get_futex_key()
385 if (READ_ONCE(folio->mapping) != mapping) { in get_futex_key()
392 inode = READ_ONCE(mapping->host); in get_futex_key()
400 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ in get_futex_key()
401 key->shared.i_seq = get_inode_sequence_number(inode); in get_futex_key()
402 key->shared.pgoff = folio->index + folio_page_idx(folio, page); in get_futex_key()
412 * fault_in_user_writeable() - Fault in user address and verify RW access
415 * Slow path to fixup the fault we just took in the atomic write
418 * We have no generic implementation of a non-destructive write to the
425 struct mm_struct *mm = current->mm; in fault_in_user_writeable()
437 * futex_top_waiter() - Return the highest priority waiter on a futex
447 plist_for_each_entry(this, &hb->chain, list) { in futex_top_waiter()
448 if (futex_match(&this->key, key)) in futex_top_waiter()
473 return ret ? -EFAULT : 0; in futex_get_value_locked()
477 * wait_for_owner_exiting - Block until the owner has exited
485 if (ret != -EBUSY) { in wait_for_owner_exiting()
490 if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) in wait_for_owner_exiting()
493 mutex_lock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
496 * while the task was in exec()->exec_futex_release() then it can in wait_for_owner_exiting()
502 mutex_unlock(&exiting->futex_exit_mutex); in wait_for_owner_exiting()
508 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
509 * @q: The futex_q to unqueue
511 * The q->lock_ptr must not be NULL and must be held by the caller.
513 void __futex_unqueue(struct futex_q *q) in __futex_unqueue() argument
517 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __futex_unqueue()
519 lockdep_assert_held(q->lock_ptr); in __futex_unqueue()
521 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __futex_unqueue()
522 plist_del(&q->list, &hb->chain); in __futex_unqueue()
526 /* The key must be already stored in q->key. */
527 struct futex_hash_bucket *futex_q_lock(struct futex_q *q) in futex_q_lock() argument
528 __acquires(&hb->lock) in futex_q_lock()
532 hb = futex_hash(&q->key); in futex_q_lock()
536 * a potential waker won't miss a to-be-slept task that is in futex_q_lock()
544 q->lock_ptr = &hb->lock; in futex_q_lock()
546 spin_lock(&hb->lock); in futex_q_lock()
551 __releases(&hb->lock) in futex_q_unlock()
553 spin_unlock(&hb->lock); in futex_q_unlock()
557 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) in __futex_queue() argument
563 * - either the real thread-priority for the real-time threads in __futex_queue()
565 * - or MAX_RT_PRIO for non-RT threads. in __futex_queue()
566 * Thus, all RT-threads are woken first in priority order, and in __futex_queue()
569 prio = min(current->normal_prio, MAX_RT_PRIO); in __futex_queue()
571 plist_node_init(&q->list, prio); in __futex_queue()
572 plist_add(&q->list, &hb->chain); in __futex_queue()
573 q->task = current; in __futex_queue()
577 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
578 * @q: The futex_q to unqueue
580 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
584 * - 1 - if the futex_q was still queued (and we removed unqueued it);
585 * - 0 - if the futex_q was already removed by the waking thread
587 int futex_unqueue(struct futex_q *q) in futex_unqueue() argument
595 * q->lock_ptr can change between this read and the following spin_lock. in futex_unqueue()
596 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and in futex_unqueue()
599 lock_ptr = READ_ONCE(q->lock_ptr); in futex_unqueue()
603 * q->lock_ptr can change between reading it and in futex_unqueue()
608 * q->lock_ptr must have changed (maybe several times) in futex_unqueue()
615 if (unlikely(lock_ptr != q->lock_ptr)) { in futex_unqueue()
619 __futex_unqueue(q); in futex_unqueue()
621 BUG_ON(q->pi_state); in futex_unqueue()
634 void futex_unqueue_pi(struct futex_q *q) in futex_unqueue_pi() argument
644 if (!plist_node_empty(&q->list)) in futex_unqueue_pi()
645 __futex_unqueue(q); in futex_unqueue_pi()
647 BUG_ON(!q->pi_state); in futex_unqueue_pi()
648 put_pi_state(q->pi_state); in futex_unqueue_pi()
649 q->pi_state = NULL; in futex_unqueue_pi()
657 * Process a futex-list entry, check whether it's owned by the
669 return -1; in handle_futex_death()
673 return -1; in handle_futex_death()
696 * 1) task->robust_list->list_op_pending != NULL in handle_futex_death()
727 * futex_wake() even if OWNER_DIED is already set - in handle_futex_death()
729 * thread-death.) The rest of the cleanup is done in in handle_futex_death()
737 * we want to handle the fault gracefully. If the in handle_futex_death()
738 * access fails we try to fault in the futex with R/W in handle_futex_death()
745 case -EFAULT: in handle_futex_death()
747 return -1; in handle_futex_death()
750 case -EAGAIN: in handle_futex_death()
764 * Wake robust non-PI futexes here. The wakeup of in handle_futex_death()
776 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
785 return -EFAULT; in fetch_robust_entry()
794 * Walk curr->robust_list (very carefully, it's a userspace list!)
797 * We silently return on any sign of list-walking problem.
801 struct robust_list_head __user *head = curr->robust_list; in exit_robust_list()
812 if (fetch_robust_entry(&entry, &head->list.next, &pi)) in exit_robust_list()
817 if (get_user(futex_offset, &head->futex_offset)) in exit_robust_list()
820 * Fetch any possibly pending lock-add first, and handle it in exit_robust_list()
823 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list()
827 while (entry != &head->list) { in exit_robust_list()
832 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); in exit_robust_list()
849 if (!--limit) in exit_robust_list()
872 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
879 return -EFAULT; in compat_fetch_robust_entry()
888 * Walk curr->robust_list (very carefully, it's a userspace list!)
891 * We silently return on any sign of list-walking problem.
895 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list()
907 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) in compat_exit_robust_list()
912 if (get_user(futex_offset, &head->futex_offset)) in compat_exit_robust_list()
915 * Fetch any possibly pending lock-add first, and handle it in compat_exit_robust_list()
919 &head->list_op_pending, &pip)) in compat_exit_robust_list()
923 while (entry != (struct robust_list __user *) &head->list) { in compat_exit_robust_list()
929 (compat_uptr_t __user *)&entry->next, &next_pi); in compat_exit_robust_list()
949 if (!--limit) in compat_exit_robust_list()
966 * Kernel cleans up PI-state, but userspace is likely hosed.
967 * (Robust-futex cleanup is separate and might save the day for userspace.)
971 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list()
981 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
983 next = head->next; in exit_pi_state_list()
985 key = pi_state->key; in exit_pi_state_list()
998 if (!refcount_inc_not_zero(&pi_state->refcount)) { in exit_pi_state_list()
999 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1001 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1004 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1006 spin_lock(&hb->lock); in exit_pi_state_list()
1007 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1008 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list()
1010 * We dropped the pi-lock, so re-check whether this in exit_pi_state_list()
1011 * task still owns the PI-state: in exit_pi_state_list()
1013 if (head->next != next) { in exit_pi_state_list()
1014 /* retain curr->pi_lock for the loop invariant */ in exit_pi_state_list()
1015 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1016 spin_unlock(&hb->lock); in exit_pi_state_list()
1021 WARN_ON(pi_state->owner != curr); in exit_pi_state_list()
1022 WARN_ON(list_empty(&pi_state->list)); in exit_pi_state_list()
1023 list_del_init(&pi_state->list); in exit_pi_state_list()
1024 pi_state->owner = NULL; in exit_pi_state_list()
1026 raw_spin_unlock(&curr->pi_lock); in exit_pi_state_list()
1027 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); in exit_pi_state_list()
1028 spin_unlock(&hb->lock); in exit_pi_state_list()
1030 rt_mutex_futex_unlock(&pi_state->pi_mutex); in exit_pi_state_list()
1033 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
1035 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
1043 if (unlikely(tsk->robust_list)) { in futex_cleanup()
1045 tsk->robust_list = NULL; in futex_cleanup()
1049 if (unlikely(tsk->compat_robust_list)) { in futex_cleanup()
1051 tsk->compat_robust_list = NULL; in futex_cleanup()
1055 if (unlikely(!list_empty(&tsk->pi_state_list))) in futex_cleanup()
1060 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1068 * This is called from the recursive fault handling path in make_task_dead().
1079 if (tsk->futex_state == FUTEX_STATE_EXITING) in futex_exit_recursive()
1080 mutex_unlock(&tsk->futex_exit_mutex); in futex_exit_recursive()
1081 tsk->futex_state = FUTEX_STATE_DEAD; in futex_exit_recursive()
1089 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in in futex_cleanup_begin()
1092 mutex_lock(&tsk->futex_exit_mutex); in futex_cleanup_begin()
1095 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. in futex_cleanup_begin()
1097 * This ensures that all subsequent checks of tsk->futex_state in in futex_cleanup_begin()
1099 * tsk->pi_lock held. in futex_cleanup_begin()
1102 * the state change under tsk->pi_lock by a concurrent waiter must in futex_cleanup_begin()
1105 raw_spin_lock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1106 tsk->futex_state = FUTEX_STATE_EXITING; in futex_cleanup_begin()
1107 raw_spin_unlock_irq(&tsk->pi_lock); in futex_cleanup_begin()
1116 tsk->futex_state = state; in futex_cleanup_end()
1121 mutex_unlock(&tsk->futex_exit_mutex); in futex_cleanup_end()