Home
last modified time | relevance | path

Searched refs:doff (Results 1 – 25 of 108) sorted by relevance

12345

/linux-6.12.1/arch/alpha/lib/
Dcsum_partial_copy.c181 unsigned long doff, in csum_partial_cfu_src_aligned() argument
190 mskql(partial_dest, doff, partial_dest); in csum_partial_cfu_src_aligned()
195 insql(word, doff, second_dest); in csum_partial_cfu_src_aligned()
200 insqh(word, doff, partial_dest); in csum_partial_cfu_src_aligned()
212 insql(word, doff, second_dest); in csum_partial_cfu_src_aligned()
213 len += doff; in csum_partial_cfu_src_aligned()
220 insqh(word, doff, partial_dest); in csum_partial_cfu_src_aligned()
222 doff = len; in csum_partial_cfu_src_aligned()
225 mskqh(second_dest, doff, second_dest); in csum_partial_cfu_src_aligned()
239 unsigned long soff, unsigned long doff, in csum_partial_cfu_unaligned() argument
[all …]
/linux-6.12.1/include/net/
Dinet6_hashtables.h60 struct sk_buff *skb, int doff,
69 struct sk_buff *skb, int doff,
78 struct sk_buff *skb, int doff,
87 struct sk_buff *skb, int doff, in __inet6_lookup() argument
102 return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, in __inet6_lookup()
107 struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff, in inet6_steal_sock() argument
132 reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, in inet6_steal_sock()
147 struct sk_buff *skb, int doff, in __inet6_lookup_skb() argument
157 sk = inet6_steal_sock(net, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, dport, in __inet6_lookup_skb()
165 doff, &ip6h->saddr, sport, in __inet6_lookup_skb()
[all …]
Dinet_hashtables.h309 struct sk_buff *skb, int doff,
317 struct sk_buff *skb, int doff, in inet_lookup_listener() argument
321 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, in inet_lookup_listener()
386 struct sk_buff *skb, int doff,
393 struct sk_buff *skb, int doff,
410 struct sk_buff *skb, int doff, in __inet_lookup() argument
425 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, in __inet_lookup()
431 struct sk_buff *skb, int doff, in inet_lookup() argument
439 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, in inet_lookup()
448 struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff, in inet_steal_sock() argument
[all …]
/linux-6.12.1/net/ipv6/
Dinet6_hashtables.c130 struct sk_buff *skb, int doff, in inet6_lookup_reuseport() argument
143 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); in inet6_lookup_reuseport()
152 struct sk_buff *skb, int doff, in inet6_lhash2_lookup() argument
164 result = inet6_lookup_reuseport(net, sk, skb, doff, in inet6_lhash2_lookup()
179 struct sk_buff *skb, int doff, in inet6_lookup_run_sk_lookup() argument
194 reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, in inet6_lookup_run_sk_lookup()
204 struct sk_buff *skb, int doff, in inet6_lookup_listener() argument
216 result = inet6_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, in inet6_lookup_listener()
226 result = inet6_lhash2_lookup(net, ilb2, skb, doff, in inet6_lookup_listener()
236 result = inet6_lhash2_lookup(net, ilb2, skb, doff, in inet6_lookup_listener()
[all …]
/linux-6.12.1/drivers/net/slip/
Dslhc.c268 if (th->doff < sizeof(struct tcphdr) / 4) in slhc_compress()
270 hlen = nlen + th->doff * 4; in slhc_compress()
357 || th->doff != cs->cs_tcp.doff in slhc_compress()
359 || (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){ in slhc_compress()
480 if (th->doff > 5) in slhc_compress()
481 memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4); in slhc_compress()
547 hdrlen = ip->ihl * 4 + thp->doff * 4; in slhc_uncompress()
631 if (thp->doff > 5) { in slhc_uncompress()
632 memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4); in slhc_uncompress()
633 cp += ((thp->doff) - 5) * 4; in slhc_uncompress()
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/
Dflow_dissector.c118 .tcp.doff = 5,
139 .tcp.doff = 5,
162 .tcp.doff = 5,
185 .tcp.doff = 5,
209 .tcp.doff = 5,
236 .tcp.doff = 5,
258 .tcp.doff = 5,
285 .tcp.doff = 5,
308 .tcp.doff = 5,
331 .tcp.doff = 5,
[all …]
Dkfree_skb.c46 if (CHECK(pkt_v6->tcp.doff != 5, "check_tcp", in on_sample()
47 "tcp.doff %x\n", pkt_v6->tcp.doff)) in on_sample()
/linux-6.12.1/net/ipv6/netfilter/
Dnf_socket_ipv6.c78 nf_socket_get_sock_v6(struct net *net, struct sk_buff *skb, int doff, in nf_socket_get_sock_v6() argument
87 skb, doff, saddr, sport, daddr, dport, in nf_socket_get_sock_v6()
104 int doff = 0; in nf_sk_lookup_slow_v6() local
127 doff = tproto == IPPROTO_TCP ? in nf_sk_lookup_slow_v6()
139 return nf_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr, in nf_sk_lookup_slow_v6()
/linux-6.12.1/net/ipv4/netfilter/
Dnf_socket_ipv4.c66 nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff, in nf_socket_get_sock_v4() argument
75 skb, doff, saddr, sport, daddr, dport, in nf_socket_get_sock_v4()
96 int doff = 0; in nf_sk_lookup_slow_v4() local
114 doff = iph->protocol == IPPROTO_TCP ? in nf_sk_lookup_slow_v4()
146 return nf_socket_get_sock_v4(net, data_skb, doff, protocol, saddr, in nf_sk_lookup_slow_v4()
/linux-6.12.1/drivers/dma/
Dfsl-edma-common.c449 edma_cp_tcd_to_reg(fsl_chan, tcd, doff); in fsl_edma_set_tcd_regs()
479 u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int, in fsl_edma_fill_tcd() argument
518 fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff); in fsl_edma_fill_tcd()
583 u16 soff, doff, iter; in fsl_edma_prep_dma_cyclic() local
625 doff = fsl_chan->is_multi_fifo ? 4 : 0; in fsl_edma_prep_dma_cyclic()
630 doff = fsl_chan->cfg.src_addr_width; in fsl_edma_prep_dma_cyclic()
635 soff = doff = 0; in fsl_edma_prep_dma_cyclic()
641 iter, doff, last_sg, major_int, false, true); in fsl_edma_prep_dma_cyclic()
657 u16 soff, doff, iter; in fsl_edma_prep_slave_sg() local
690 doff = 0; in fsl_edma_prep_slave_sg()
[all …]
Dfsl-edma-trace.h72 __field(u16, doff)
86 __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff),
110 __entry->doff,
Dmpc512x_dma.c153 u32 doff:16; /* Signed destination address offset */ member
636 tcd->doff = 32; in mpc_dma_prep_memcpy()
642 tcd->doff = 16; in mpc_dma_prep_memcpy()
647 tcd->doff = 4; in mpc_dma_prep_memcpy()
652 tcd->doff = 2; in mpc_dma_prep_memcpy()
657 tcd->doff = 1; in mpc_dma_prep_memcpy()
749 tcd->doff = mchan->dwidth; in mpc_dma_prep_slave_sg()
758 tcd->doff = 0; in mpc_dma_prep_slave_sg()
/linux-6.12.1/tools/testing/selftests/bpf/progs/
Dtest_tcp_custom_syncookie.c133 if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4) in tcp_reload_headers()
137 if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0)) in tcp_reload_headers()
164 ctx->tcp->doff * 4, IPPROTO_TCP, csum); in tcp_v4_csum()
170 ctx->tcp->doff * 4, IPPROTO_TCP, csum); in tcp_v6_csum()
180 csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); in tcp_validate_header()
418 ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2; in tcp_handle_syn()
424 csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); in tcp_handle_syn()
Dxdp_synproxy_kern.c424 hdr->tcp_len = hdr->tcp->doff * 4; in tcp_dissect()
524 tcp_header->doff = 5; /* doff is part of tcp_flag_word. */ in tcp_gen_synack()
533 tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale); in tcp_gen_synack()
555 hdr->tcp_len = hdr->tcp->doff * 4; in tcpv4_gen_synack()
576 hdr->tcp_len = hdr->tcp->doff * 4; in tcpv6_gen_synack()
718 new_pkt_size = sizeof(*hdr->eth) + ip_len + hdr->tcp->doff * 4; in syncookie_handle_syn()
814 hdr->tcp_len = hdr->tcp->doff * 4; in syncookie_part2()
Dtest_select_reuseport_kern.c120 if ((th->doff << 2) + sizeof(*cmd) > data_check.len) in _select_by_skb_data()
122 if (bpf_skb_load_bytes(reuse_md, th->doff << 2, &cmd_copy, in _select_by_skb_data()
/linux-6.12.1/net/netfilter/
Dxt_tcpmss.c41 if (th->doff*4 < sizeof(*th)) in tcpmss_mt()
44 optlen = th->doff*4 - sizeof(*th); in tcpmss_mt()
Dnf_synproxy_core.c28 synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, in synproxy_parse_options() argument
31 int length = (th->doff * 4) - sizeof(*th); in synproxy_parse_options()
37 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); in synproxy_parse_options()
197 optend = protoff + th->doff * 4; in synproxy_tstamp_adjust()
477 nth->doff = tcp_hdr_size / 4; in synproxy_send_client_synack()
523 nth->doff = tcp_hdr_size / 4; in synproxy_send_server_syn()
563 nth->doff = tcp_hdr_size / 4; in synproxy_send_server_ack()
601 nth->doff = tcp_hdr_size / 4; in synproxy_send_client_ack()
890 nth->doff = tcp_hdr_size / 4; in synproxy_send_client_synack_ipv6()
937 nth->doff = tcp_hdr_size / 4; in synproxy_send_server_syn_ipv6()
[all …]
/linux-6.12.1/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
Dgf100.c98 read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) in read_div() argument
101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); in read_div()
112 sclk = read_vco(clk, dsrc + (doff * 4)); in read_div()
115 if (doff <= 2) { in read_div()
116 sctl = nvkm_rd32(device, dctl + (doff * 4)); in read_div()
Dgk104.c106 read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl) in read_div() argument
109 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); in read_div()
110 u32 sctl = nvkm_rd32(device, dctl + (doff * 4)); in read_div()
121 u32 sclk = read_vco(clk, dsrc + (doff * 4)); in read_div()
126 return read_vco(clk, dsrc + (doff * 4)); in read_div()
/linux-6.12.1/net/ipv4/
Dinet_hashtables.c352 struct sk_buff *skb, int doff, in inet_lookup_reuseport() argument
363 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); in inet_lookup_reuseport()
379 struct sk_buff *skb, int doff, in inet_lhash2_lookup() argument
391 result = inet_lookup_reuseport(net, sk, skb, doff, in inet_lhash2_lookup()
406 struct sk_buff *skb, int doff, in inet_lookup_run_sk_lookup() argument
419 reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, in inet_lookup_run_sk_lookup()
428 struct sk_buff *skb, int doff, in __inet_lookup_listener() argument
440 result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, in __inet_lookup_listener()
450 result = inet_lhash2_lookup(net, ilb2, skb, doff, in __inet_lookup_listener()
460 result = inet_lhash2_lookup(net, ilb2, skb, doff, in __inet_lookup_listener()
Dtcp_offload.c107 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) in tcp4_gso_segment()
145 thlen = th->doff * 4; in tcp_gso_segment()
289 thlen = th->doff * 4; in tcp_gro_pull_header()
308 unsigned int thlen = th->doff * 4; in tcp_gro_receive()
/linux-6.12.1/tools/include/uapi/linux/
Dtcp.h32 doff:4, member
42 __u16 doff:4, member
/linux-6.12.1/drivers/gpu/drm/nouveau/nvkm/engine/disp/
Dnv50.c278 const u32 doff = nv50_ior_base(dac); in nv50_dac_clock() local
280 nvkm_mask(device, 0x614280 + doff, 0x07070707, 0x00000000); in nv50_dac_clock()
287 const u32 doff = nv50_ior_base(dac); in nv50_dac_sense() local
291 nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval); in nv50_dac_sense()
294 loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000); in nv50_dac_sense()
304 nv50_dac_power_wait(struct nvkm_device *device, const u32 doff) in nv50_dac_power_wait() argument
307 if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000)) in nv50_dac_power_wait()
316 const u32 doff = nv50_ior_base(dac); in nv50_dac_power() local
324 nv50_dac_power_wait(device, doff); in nv50_dac_power()
325 nvkm_mask(device, 0x61a004 + doff, field, state); in nv50_dac_power()
[all …]
/linux-6.12.1/include/linux/
Dtcp.h31 return th->doff * 4; in __tcp_hdrlen()
46 return inner_tcp_hdr(skb)->doff * 4; in inner_tcp_hdrlen()
81 return (tcp_hdr(skb)->doff - 5) * 4; in tcp_optlen()
/linux-6.12.1/include/uapi/linux/
Dtcp.h32 doff:4, member
42 __u16 doff:4, member

12345