Lines Matching refs:sk_lock

444 	socket_lock_t		sk_lock;  member
1591 sk->sk_lock.owned = 0; \
1592 init_waitqueue_head(&sk->sk_lock.wq); \
1593 spin_lock_init(&(sk)->sk_lock.slock); \
1594 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1595 sizeof((sk)->sk_lock)); \
1596 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1598 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1603 return lockdep_is_held(&sk->sk_lock) || in lockdep_sock_is_held()
1604 lockdep_is_held(&sk->sk_lock.slock); in lockdep_sock_is_held()
1619 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1621 spin_lock_nested(&((__sk)->sk_lock.slock), \
1623 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1625 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
1643 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
1651 mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in lock_sock_fast_nested()
1665 __releases(&sk->sk_lock.slock) in unlock_sock_fast()
1669 __release(&sk->sk_lock.slock); in unlock_sock_fast()
1671 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in unlock_sock_fast()
1672 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1712 return sk->sk_lock.owned; in sock_owned_by_user()
1717 return sk->sk_lock.owned; in sock_owned_by_user_nocheck()
1723 sk->sk_lock.owned = 0; in sock_release_ownership()
1726 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in sock_release_ownership()
1735 !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()