Lines Matching +full:lite +full:- +full:on
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
15 * Alan Cox : Turned on udp checksums. I don't want to
34 * struct udp_skb_cb - UDP(-Lite) private variables
37 * @cscov: checksum coverage length (UDP-Lite only)
50 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
53 * struct udp_hslot - UDP hash slot
66 * struct udp_table - UDP table
68 * @hash: hash table, sockets are hashed on (local port)
69 * @hash2: hash table, sockets are hashed on (local port, local address)
85 return &table->hash[udp_hashfn(net, num, table->mask)]; in udp_hashslot()
94 return &table->hash2[hash & table->mask]; in udp_hashslot2()
110 * Generic checksumming routines for UDP(-Lite) v4 and v6
114 return (UDP_SKB_CB(skb)->cscov == skb->len ? in __udp_lib_checksum_complete()
116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); in __udp_lib_checksum_complete()
126 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
128 * @skb: sk_buff containing the filled-in UDP header
135 skb_queue_walk(&sk->sk_write_queue, skb) { in udp_csum_outgoing()
136 csum = csum_add(csum, skb->csum); in udp_csum_outgoing()
144 sizeof(struct udphdr), skb->csum); in udp_csum()
146 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { in udp_csum()
147 csum = csum_add(csum, skb->csum); in udp_csum()
163 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE) in udp_csum_pull_header()
164 skb->csum = csum_partial(skb->data, sizeof(struct udphdr), in udp_csum_pull_header()
165 skb->csum); in udp_csum_pull_header()
167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); in udp_csum_pull_header()
183 skb_queue_head_init(&up->reader_queue); in udp_lib_init_sock()
184 up->forward_threshold = sk->sk_rcvbuf >> 2; in udp_lib_init_sock()
185 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); in udp_lib_init_sock()
188 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
224 hash = jhash(skb->data, 2 * ETH_ALEN, in udp_flow_src_port()
225 (__force u32) skb->protocol); in udp_flow_src_port()
234 /* Since this is being sent on the wire obfuscate hash a bit in udp_flow_src_port()
241 return htons((((u64) hash * (max - min)) >> 32) + min); in udp_flow_src_port()
246 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); in udp_rqueue_get()
253 return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), in udp_sk_bound_dev_eq()
321 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
322 * possibly multiple cache miss on dequeue()
325 /* skb->truesize and the stateless bit are embedded in a single field;
333 * will be on cold cache lines at recvmsg time.
334 * skb->len can be stored on 16 bits since the udp header has been
345 return (struct udp_dev_scratch *)&skb->dev_scratch; in udp_skb_scratch()
351 return udp_skb_scratch(skb)->len; in udp_skb_len()
356 return udp_skb_scratch(skb)->csum_unnecessary; in udp_skb_csum_unnecessary()
361 return udp_skb_scratch(skb)->is_linear; in udp_skb_is_linear()
367 return skb->len; in udp_skb_len()
384 return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT; in copy_linear_skb()
388 * SNMP statistics for UDP and UDP-Lite
391 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
392 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
394 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
395 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
398 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
399 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
402 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
403 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
409 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
410 sock_net(sk)->mib.udp_statistics) : \
411 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
412 sock_net(sk)->mib.udp_stats_in6); \
417 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
418 sock_net(sk)->mib.udp_statistics; \
423 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
479 if (skb->pkt_type == PACKET_LOOPBACK) in udp_rcv_segment()
480 skb->ip_summed = CHECKSUM_PARTIAL; in udp_rcv_segment()
487 int segs_nr = skb_shinfo(skb)->gso_segs; in udp_rcv_segment()
489 atomic_add(segs_nr, &sk->sk_drops); in udp_rcv_segment()
501 /* UDP-lite can't land here - no GRO */ in udp_post_segment_fix_csum()
502 WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov); in udp_post_segment_fix_csum()
506 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx) in udp_post_segment_fix_csum()
517 UDP_SKB_CB(skb)->cscov = skb->len; in udp_post_segment_fix_csum()
518 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid) in udp_post_segment_fix_csum()
519 skb->csum_valid = 1; in udp_post_segment_fix_csum()