Home
last modified time | relevance | path

Searched refs:skbn (Results 1 – 22 of 22) sorted by relevance

/linux-6.12.1/net/netrom/ !
Dnr_out.c34 struct sk_buff *skbn; in nr_output() local
46 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) in nr_output()
49 skb_reserve(skbn, frontlen); in nr_output()
54 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in nr_output()
58 skb_push(skbn, NR_TRANSPORT_LEN); in nr_output()
59 skb_copy_to_linear_data(skbn, transport, in nr_output()
62 skbn->data[4] |= NR_MORE_FLAG; in nr_output()
64 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ in nr_output()
99 struct sk_buff *skb, *skbn; in nr_send_nak_frame() local
105 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) in nr_send_nak_frame()
[all …]
Dnr_in.c31 struct sk_buff *skbo, *skbn = skb; in nr_queue_rx_frame() local
48 if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) in nr_queue_rx_frame()
51 skb_reset_transport_header(skbn); in nr_queue_rx_frame()
55 skb_put(skbn, skbo->len), in nr_queue_rx_frame()
63 return sock_queue_rcv_skb(sk, skbn); in nr_queue_rx_frame()
150 struct sk_buff *skbn; in nr_state3_machine() local
232 while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) { in nr_state3_machine()
233 ns = skbn->data[17]; in nr_state3_machine()
235 if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { in nr_state3_machine()
239 skb_queue_tail(&temp_queue, skbn); in nr_state3_machine()
[all …]
Dnr_loopback.c32 struct sk_buff *skbn; in nr_loopback_queue() local
34 if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { in nr_loopback_queue()
35 skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len); in nr_loopback_queue()
36 skb_reset_transport_header(skbn); in nr_loopback_queue()
38 skb_queue_tail(&loopback_queue, skbn); in nr_loopback_queue()
Dnr_subr.c215 struct sk_buff *skbn; in __nr_transmit_reply() local
221 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL) in __nr_transmit_reply()
224 skb_reserve(skbn, 0); in __nr_transmit_reply()
226 dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN); in __nr_transmit_reply()
257 if (!nr_route_frame(skbn, NULL)) in __nr_transmit_reply()
258 kfree_skb(skbn); in __nr_transmit_reply()
Dnr_route.c755 struct sk_buff *skbn; in nr_route_frame() local
808 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { in nr_route_frame()
815 skb=skbn; in nr_route_frame()
/linux-6.12.1/net/x25/ !
Dx25_out.c49 struct sk_buff *skbn; in x25_output() local
67 skbn = sock_alloc_send_skb(sk, frontlen + max_len, in x25_output()
70 if (!skbn) { in x25_output()
81 skb_reserve(skbn, frontlen); in x25_output()
86 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in x25_output()
90 skb_push(skbn, header_len); in x25_output()
91 skb_copy_to_linear_data(skbn, header, header_len); in x25_output()
95 skbn->data[3] |= X25_EXT_M_BIT; in x25_output()
97 skbn->data[2] |= X25_STD_M_BIT; in x25_output()
100 skb_queue_tail(&sk->sk_write_queue, skbn); in x25_output()
[all …]
Dx25_forward.c23 struct sk_buff *skbn; in x25_forward_call() local
72 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){ in x25_forward_call()
75 x25_transmit_link(skbn, neigh_new); in x25_forward_call()
95 struct sk_buff *skbn; in x25_forward_data() local
115 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){ in x25_forward_data()
119 x25_transmit_link(skbn, nb); in x25_forward_data()
Dx25_in.c34 struct sk_buff *skbo, *skbn = skb; in x25_queue_rx_frame() local
47 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){ in x25_queue_rx_frame()
54 skb_reset_transport_header(skbn); in x25_queue_rx_frame()
57 skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len), in x25_queue_rx_frame()
66 skb_put(skbn, skbo->len), in x25_queue_rx_frame()
74 skb_set_owner_r(skbn, sk); in x25_queue_rx_frame()
75 skb_queue_tail(&sk->sk_receive_queue, skbn); in x25_queue_rx_frame()
Dx25_link.c67 struct sk_buff *skbn; in x25_link_control() local
125 while ((skbn = skb_dequeue(&nb->queue)) != NULL) in x25_link_control()
126 x25_send_frame(skbn, nb); in x25_link_control()
/linux-6.12.1/net/ax25/ !
Dax25_out.c118 struct sk_buff *skbn; in ax25_output() local
144 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) { in ax25_output()
151 skb_set_owner_w(skbn, skb->sk); in ax25_output()
158 skb_reserve(skbn, frontlen + 2); in ax25_output()
159 skb_set_network_header(skbn, in ax25_output()
161 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in ax25_output()
162 p = skb_push(skbn, 2); in ax25_output()
172 skb_reserve(skbn, frontlen + 1); in ax25_output()
173 skb_set_network_header(skbn, in ax25_output()
175 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); in ax25_output()
[all …]
Dax25_in.c36 struct sk_buff *skbn, *skbo; in ax25_rx_fragment() local
49 skbn = alloc_skb(AX25_MAX_HEADER_LEN + in ax25_rx_fragment()
52 if (!skbn) { in ax25_rx_fragment()
57 skb_reserve(skbn, AX25_MAX_HEADER_LEN); in ax25_rx_fragment()
59 skbn->dev = ax25->ax25_dev->dev; in ax25_rx_fragment()
60 skb_reset_network_header(skbn); in ax25_rx_fragment()
61 skb_reset_transport_header(skbn); in ax25_rx_fragment()
66 skb_put(skbn, skbo->len), in ax25_rx_fragment()
73 if (ax25_rx_iframe(ax25, skbn) == 0) in ax25_rx_fragment()
74 kfree_skb(skbn); in ax25_rx_fragment()
[all …]
/linux-6.12.1/net/rose/ !
Drose_out.c48 struct sk_buff *skb, *skbn; in rose_kick() local
76 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { in rose_kick()
81 skb_set_owner_w(skbn, sk); in rose_kick()
86 rose_send_iframe(sk, skbn); in rose_kick()
Drose_loopback.c36 struct sk_buff *skbn = NULL; in rose_loopback_queue() local
39 skbn = skb_clone(skb, GFP_ATOMIC); in rose_loopback_queue()
41 if (skbn) { in rose_loopback_queue()
43 skb_queue_tail(&loopback_queue, skbn); in rose_loopback_queue()
Drose_link.c141 struct sk_buff *skbn; in rose_link_rx_restart() local
167 while ((skbn = skb_dequeue(&neigh->queue)) != NULL) in rose_link_rx_restart()
168 if (!rose_send_frame(skbn, neigh)) in rose_link_rx_restart()
169 kfree_skb(skbn); in rose_link_rx_restart()
Daf_rose.c1212 struct sk_buff *skbn; in rose_sendmsg() local
1223 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { in rose_sendmsg()
1228 skbn->sk = sk; in rose_sendmsg()
1229 skbn->free = 1; in rose_sendmsg()
1230 skbn->arp = 1; in rose_sendmsg()
1232 skb_reserve(skbn, frontlen); in rose_sendmsg()
1237 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); in rose_sendmsg()
1241 skb_push(skbn, ROSE_MIN_LEN); in rose_sendmsg()
1242 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); in rose_sendmsg()
1245 skbn->data[2] |= M_BIT; in rose_sendmsg()
[all …]
/linux-6.12.1/net/lapb/ !
Dlapb_out.c68 struct sk_buff *skb, *skbn; in lapb_kick() local
85 skbn = skb_copy(skb, GFP_ATOMIC); in lapb_kick()
86 if (!skbn) { in lapb_kick()
92 skb_set_owner_w(skbn, skb->sk); in lapb_kick()
97 lapb_send_iframe(lapb, skbn, LAPB_POLLOFF); in lapb_kick()
/linux-6.12.1/drivers/net/wwan/ !
Dmhi_wwan_mbim.c307 struct sk_buff *skbn; in mhi_mbim_rx() local
318 skbn = netdev_alloc_skb(link->ndev, dgram_len); in mhi_mbim_rx()
319 if (!skbn) in mhi_mbim_rx()
322 skb_put(skbn, dgram_len); in mhi_mbim_rx()
323 skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); in mhi_mbim_rx()
325 switch (skbn->data[0] & 0xf0) { in mhi_mbim_rx()
327 skbn->protocol = htons(ETH_P_IP); in mhi_mbim_rx()
330 skbn->protocol = htons(ETH_P_IPV6); in mhi_mbim_rx()
335 dev_kfree_skb_any(skbn); in mhi_mbim_rx()
344 u64_stats_add(&link->rx_bytes, skbn->len); in mhi_mbim_rx()
[all …]
/linux-6.12.1/drivers/net/ethernet/freescale/fs_enet/ !
Dfs_enet-main.c97 struct sk_buff *skb, *skbn; in fs_enet_napi() local
218 skbn = fep->rx_skbuff[curidx]; in fs_enet_napi()
229 skbn = netdev_alloc_skb(dev, pkt_len + 2); in fs_enet_napi()
230 if (skbn) { in fs_enet_napi()
231 skb_reserve(skbn, 2); /* align IP header */ in fs_enet_napi()
232 skb_copy_from_linear_data(skb, skbn->data, in fs_enet_napi()
234 swap(skb, skbn); in fs_enet_napi()
241 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); in fs_enet_napi()
243 if (skbn) { in fs_enet_napi()
246 skb_align(skbn, ENET_RX_ALIGN); in fs_enet_napi()
[all …]
/linux-6.12.1/drivers/net/ethernet/qualcomm/rmnet/ !
Drmnet_map_data.c348 struct sk_buff *skbn; in rmnet_map_deaggregate() local
385 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); in rmnet_map_deaggregate()
386 if (!skbn) in rmnet_map_deaggregate()
389 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); in rmnet_map_deaggregate()
390 skb_put(skbn, packet_len); in rmnet_map_deaggregate()
391 memcpy(skbn->data, skb->data, packet_len); in rmnet_map_deaggregate()
394 return skbn; in rmnet_map_deaggregate()
Drmnet_handlers.c112 struct sk_buff *skbn; in rmnet_map_ingress_handler() local
124 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) in rmnet_map_ingress_handler()
125 __rmnet_map_ingress_handler(skbn, port); in rmnet_map_ingress_handler()
/linux-6.12.1/drivers/net/usb/ !
Dqmi_wwan.c167 struct sk_buff *skbn; in qmimux_rx_fixup() local
191 skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER); in qmimux_rx_fixup()
192 if (!skbn) in qmimux_rx_fixup()
197 skbn->protocol = htons(ETH_P_IP); in qmimux_rx_fixup()
200 skbn->protocol = htons(ETH_P_IPV6); in qmimux_rx_fixup()
204 kfree_skb(skbn); in qmimux_rx_fixup()
208 skb_reserve(skbn, LL_MAX_HEADER); in qmimux_rx_fixup()
209 skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, pkt_len); in qmimux_rx_fixup()
210 if (netif_rx(skbn) != NET_RX_SUCCESS) { in qmimux_rx_fixup()
/linux-6.12.1/net/qrtr/ !
Daf_qrtr.c883 struct sk_buff *skbn; in qrtr_bcast_enqueue() local
887 skbn = pskb_copy(skb, GFP_KERNEL); in qrtr_bcast_enqueue()
888 if (!skbn) in qrtr_bcast_enqueue()
890 skb_set_owner_w(skbn, skb->sk); in qrtr_bcast_enqueue()
891 qrtr_node_enqueue(node, skbn, type, from, to); in qrtr_bcast_enqueue()