Home
last modified time | relevance | path

Searched refs:sk_rmem_alloc (Results 1 – 25 of 52) sorted by relevance

123

/linux-6.12.1/net/atm/
Datm_misc.c18 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) in atm_charge()
33 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { in atm_alloc_charge()
38 &sk->sk_rmem_alloc); in atm_alloc_charge()
Dcommon.c80 if (atomic_read(&sk->sk_rmem_alloc)) in vcc_sock_destruct()
82 __func__, atomic_read(&sk->sk_rmem_alloc)); in vcc_sock_destruct()
161 atomic_set(&sk->sk_rmem_alloc, 0); in vcc_create()
559 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), in vcc_recvmsg()
/linux-6.12.1/include/linux/
Datmdev.h254 atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc); in atm_force_charge()
260 atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc); in atm_return()
/linux-6.12.1/tools/testing/selftests/bpf/progs/
Dbpf_iter_netlink.c45 s->sk_rmem_alloc.counter, in dump_netlink()
Dbpf_iter_udp4.c55 rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; in dump_udp4()
Dbpf_iter_udp6.c57 rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; in dump_udp6()
Dbpf_tracing_net.h122 #define sk_rmem_alloc sk_backlog.rmem_alloc macro
/linux-6.12.1/include/trace/events/
Dsock.h85 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
119 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
/linux-6.12.1/net/bluetooth/rfcomm/
Dsock.c56 atomic_add(skb->len, &sk->sk_rmem_alloc); in rfcomm_sk_data_ready()
60 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in rfcomm_sk_data_ready()
610 atomic_sub(len, &sk->sk_rmem_alloc); in rfcomm_sock_recvmsg()
612 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) in rfcomm_sock_recvmsg()
/linux-6.12.1/net/ax25/
Dax25_std_timer.c62 if (atomic_read(&sk->sk_rmem_alloc) < in ax25_std_heartbeat_expiry()
Dax25_ds_timer.c124 if (atomic_read(&sk->sk_rmem_alloc) < in ax25_ds_heartbeat_expiry()
Dax25_in.c263 if (atomic_read(&sk->sk_rmem_alloc) >= in ax25_rcv()
/linux-6.12.1/net/rose/
Drose_timer.c142 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in rose_heartbeat_expiry()
Drose_in.c182 if (atomic_read(&sk->sk_rmem_alloc) > in rose_state3_machine()
/linux-6.12.1/net/netlink/
Daf_netlink.c390 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in netlink_skb_set_owner_r()
403 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in netlink_sock_destruct()
1223 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1237 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in netlink_attachskb()
1390 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in netlink_broadcast_deliver()
1394 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); in netlink_broadcast_deliver()
1967 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { in netlink_recvmsg()
2263 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in netlink_dump()
/linux-6.12.1/net/netrom/
Dnr_timer.c136 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && in nr_heartbeat_expiry()
/linux-6.12.1/net/caif/
Dcaif_socket.c128 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in caif_queue_rcv_skb()
131 atomic_read(&cf_sk->sk.sk_rmem_alloc), in caif_queue_rcv_skb()
257 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { in caif_check_flow_release()
/linux-6.12.1/include/net/
Dllc_c_ev.h220 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < in llc_conn_space()
Dsock.h402 #define sk_rmem_alloc sk_backlog.rmem_alloc macro
1080 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
1543 atomic_read(&sk->sk_rmem_alloc); in sk_unused_reserved_mem()
2227 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
2325 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
/linux-6.12.1/net/x25/
Dx25_subr.c376 if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && in x25_check_rbuf()
Dx25_in.c291 if (atomic_read(&sk->sk_rmem_alloc) > in x25_state3_machine()
/linux-6.12.1/net/kcm/
Dkcmproc.c150 atomic_read(&psock->sk->sk_rmem_alloc), in kcm_format_psock()
Dkcmsock.c178 atomic_sub(len, &sk->sk_rmem_alloc); in kcm_rfree()
195 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in kcm_queue_rcv_skb()
206 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in kcm_queue_rcv_skb()
/linux-6.12.1/net/ipv4/
Dtcp_input.c609 min(atomic_read(&sk->sk_rmem_alloc), rmem2)); in tcp_clamp_window()
611 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window()
4891 atomic_add(delta, &sk->sk_rmem_alloc); in tcp_try_coalesce()
4987 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule()
5599 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && in tcp_prune_ofo_queue()
5633 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in tcp_prune_queue()
5638 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
5648 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
5656 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
Dudp.c1450 atomic_sub(size, &sk->sk_rmem_alloc); in udp_rmem_release()
1525 rmem = atomic_read(&sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1544 atomic_add(size, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
1577 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in __udp_enqueue_schedule_skb()
2182 prefetch(&sk->sk_rmem_alloc); in udp_queue_rcv_one_skb()

123