Lines Matching refs:sk_lock

568 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);  in __sk_receive_skb()
572 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in __sk_receive_skb()
3045 __releases(&sk->sk_lock.slock) in __lock_sock()
3046 __acquires(&sk->sk_lock.slock) in __lock_sock()
3051 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
3053 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
3055 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
3059 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
3063 __releases(&sk->sk_lock.slock) in __release_sock()
3064 __acquires(&sk->sk_lock.slock) in __release_sock()
3071 spin_unlock_bh(&sk->sk_lock.slock); in __release_sock()
3085 spin_lock_bh(&sk->sk_lock.slock); in __release_sock()
3097 spin_lock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
3104 spin_unlock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
3617 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
3620 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3623 sk->sk_lock.owned = 1; in lock_sock_nested()
3624 spin_unlock_bh(&sk->sk_lock.slock); in lock_sock_nested()
3630 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
3639 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
3640 wake_up(&sk->sk_lock.wq); in release_sock()
3641 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
3645 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) in __lock_sock_fast()
3648 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock_fast()
3670 sk->sk_lock.owned = 1; in __lock_sock_fast()
3671 __acquire(&sk->sk_lock.slock); in __lock_sock_fast()
3672 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock_fast()
4346 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); in sock_struct_check()