1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PACKET - implements raw packet sockets.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
42 * and packet_mreq.
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * layer.
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
47 */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/filter.h>
53 #include <linux/types.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
65 #include <linux/slab.h>
66 #include <linux/vmalloc.h>
67 #include <net/net_namespace.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <linux/errno.h>
73 #include <linux/timer.h>
74 #include <linux/uaccess.h>
75 #include <asm/ioctls.h>
76 #include <asm/page.h>
77 #include <asm/cacheflush.h>
78 #include <asm/io.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81 #include <linux/poll.h>
82 #include <linux/module.h>
83 #include <linux/init.h>
84 #include <linux/mutex.h>
85 #include <linux/if_vlan.h>
86 #include <linux/virtio_net.h>
87 #include <linux/errqueue.h>
88 #include <linux/net_tstamp.h>
89 #include <linux/percpu.h>
90 #ifdef CONFIG_INET
91 #include <net/inet_common.h>
92 #endif
93 #include <linux/bpf.h>
94 #include <net/compat.h>
95 #include <linux/netfilter_netdev.h>
96
97 #include "internal.h"
98
99 /*
100 Assumptions:
101 - If the device has no dev->header_ops->create, there is no LL header
102 visible above the device. In this case, its hard_header_len should be 0.
103 The device may prepend its own header internally. In this case, its
104 needed_headroom should be set to the space needed for it to add its
105 internal header.
106 For example, a WiFi driver pretending to be an Ethernet driver should
107 set its hard_header_len to be the Ethernet header length, and set its
108 needed_headroom to be (the real WiFi header length - the fake Ethernet
109 header length).
110 - packet socket receives packets with pulled ll header,
111 so that SOCK_RAW should push it back.
112
113 On receive:
114 -----------
115
116 Incoming, dev_has_header(dev) == true
117 mac_header -> ll header
118 data -> data
119
120 Outgoing, dev_has_header(dev) == true
121 mac_header -> ll header
122 data -> ll header
123
124 Incoming, dev_has_header(dev) == false
125 mac_header -> data
126 However drivers often make it point to the ll header.
127 This is incorrect because the ll header should be invisible to us.
128 data -> data
129
130 Outgoing, dev_has_header(dev) == false
131 mac_header -> data. ll header is invisible to us.
132 data -> data
133
134 Resume
135 If dev_has_header(dev) == false we are unable to restore the ll header,
136 because it is invisible to us.
137
138
139 On transmit:
140 ------------
141
142 dev_has_header(dev) == true
143 mac_header -> ll header
144 data -> ll header
145
146 dev_has_header(dev) == false (ll header is invisible to us)
147 mac_header -> data
148 data -> data
149
150 We should set network_header on output to the correct position,
151 packet classifier depends on it.
152 */
153
154 /* Private packet socket structures. */
155
156 /* identical to struct packet_mreq except it has
157 * a longer address field.
158 */
159 struct packet_mreq_max {
160 int mr_ifindex;
161 unsigned short mr_type;
162 unsigned short mr_alen;
163 unsigned char mr_address[MAX_ADDR_LEN];
164 };
165
166 union tpacket_uhdr {
167 struct tpacket_hdr *h1;
168 struct tpacket2_hdr *h2;
169 struct tpacket3_hdr *h3;
170 void *raw;
171 };
172
173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
174 int closing, int tx_ring);
175
176 #define V3_ALIGNMENT (8)
177
178 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
179
180 #define BLK_PLUS_PRIV(sz_of_priv) \
181 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
182
183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
184 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
185 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
186 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
187 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200 struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
209 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
212 struct tpacket3_hdr *);
213 static void packet_flush_mclist(struct sock *sk);
214 static u16 packet_pick_tx_queue(struct sk_buff *skb);
215
216 struct packet_skb_cb {
217 union {
218 struct sockaddr_pkt pkt;
219 union {
220 /* Trick: alias skb original length with
221 * ll.sll_family and ll.protocol in order
222 * to save room.
223 */
224 unsigned int origlen;
225 struct sockaddr_ll ll;
226 };
227 } sa;
228 };
229
230 #define vio_le() virtio_legacy_is_little_endian()
231
232 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
238 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241 ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 #ifdef CONFIG_NETFILTER_EGRESS
nf_hook_direct_egress(struct sk_buff * skb)247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
248 {
249 struct sk_buff *next, *head = NULL, *tail;
250 int rc;
251
252 rcu_read_lock();
253 for (; skb != NULL; skb = next) {
254 next = skb->next;
255 skb_mark_not_on_list(skb);
256
257 if (!nf_hook_egress(skb, &rc, skb->dev))
258 continue;
259
260 if (!head)
261 head = skb;
262 else
263 tail->next = skb;
264
265 tail = skb;
266 }
267 rcu_read_unlock();
268
269 return head;
270 }
271 #endif
272
packet_xmit(const struct packet_sock * po,struct sk_buff * skb)273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
274 {
275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
276 return dev_queue_xmit(skb);
277
278 #ifdef CONFIG_NETFILTER_EGRESS
279 if (nf_hook_egress_active()) {
280 skb = nf_hook_direct_egress(skb);
281 if (!skb)
282 return NET_XMIT_DROP;
283 }
284 #endif
285 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
286 }
287
packet_cached_dev_get(struct packet_sock * po)288 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
289 {
290 struct net_device *dev;
291
292 rcu_read_lock();
293 dev = rcu_dereference(po->cached_dev);
294 dev_hold(dev);
295 rcu_read_unlock();
296
297 return dev;
298 }
299
packet_cached_dev_assign(struct packet_sock * po,struct net_device * dev)300 static void packet_cached_dev_assign(struct packet_sock *po,
301 struct net_device *dev)
302 {
303 rcu_assign_pointer(po->cached_dev, dev);
304 }
305
packet_cached_dev_reset(struct packet_sock * po)306 static void packet_cached_dev_reset(struct packet_sock *po)
307 {
308 RCU_INIT_POINTER(po->cached_dev, NULL);
309 }
310
packet_pick_tx_queue(struct sk_buff * skb)311 static u16 packet_pick_tx_queue(struct sk_buff *skb)
312 {
313 struct net_device *dev = skb->dev;
314 const struct net_device_ops *ops = dev->netdev_ops;
315 int cpu = raw_smp_processor_id();
316 u16 queue_index;
317
318 #ifdef CONFIG_XPS
319 skb->sender_cpu = cpu + 1;
320 #endif
321 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
322 if (ops->ndo_select_queue) {
323 queue_index = ops->ndo_select_queue(dev, skb, NULL);
324 queue_index = netdev_cap_txqueue(dev, queue_index);
325 } else {
326 queue_index = netdev_pick_tx(dev, skb, NULL);
327 }
328
329 return queue_index;
330 }
331
332 /* __register_prot_hook must be invoked through register_prot_hook
333 * or from a context in which asynchronous accesses to the packet
334 * socket is not possible (packet_create()).
335 */
__register_prot_hook(struct sock * sk)336 static void __register_prot_hook(struct sock *sk)
337 {
338 struct packet_sock *po = pkt_sk(sk);
339
340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
341 if (po->fanout)
342 __fanout_link(sk, po);
343 else
344 dev_add_pack(&po->prot_hook);
345
346 sock_hold(sk);
347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
348 }
349 }
350
register_prot_hook(struct sock * sk)351 static void register_prot_hook(struct sock *sk)
352 {
353 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
354 __register_prot_hook(sk);
355 }
356
357 /* If the sync parameter is true, we will temporarily drop
358 * the po->bind_lock and do a synchronize_net to make sure no
359 * asynchronous packet processing paths still refer to the elements
360 * of po->prot_hook. If the sync parameter is false, it is the
361 * callers responsibility to take care of this.
362 */
__unregister_prot_hook(struct sock * sk,bool sync)363 static void __unregister_prot_hook(struct sock *sk, bool sync)
364 {
365 struct packet_sock *po = pkt_sk(sk);
366
367 lockdep_assert_held_once(&po->bind_lock);
368
369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
370
371 if (po->fanout)
372 __fanout_unlink(sk, po);
373 else
374 __dev_remove_pack(&po->prot_hook);
375
376 __sock_put(sk);
377
378 if (sync) {
379 spin_unlock(&po->bind_lock);
380 synchronize_net();
381 spin_lock(&po->bind_lock);
382 }
383 }
384
unregister_prot_hook(struct sock * sk,bool sync)385 static void unregister_prot_hook(struct sock *sk, bool sync)
386 {
387 struct packet_sock *po = pkt_sk(sk);
388
389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
390 __unregister_prot_hook(sk, sync);
391 }
392
pgv_to_page(void * addr)393 static inline struct page * __pure pgv_to_page(void *addr)
394 {
395 if (is_vmalloc_addr(addr))
396 return vmalloc_to_page(addr);
397 return virt_to_page(addr);
398 }
399
__packet_set_status(struct packet_sock * po,void * frame,int status)400 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
401 {
402 union tpacket_uhdr h;
403
404 /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
405
406 h.raw = frame;
407 switch (po->tp_version) {
408 case TPACKET_V1:
409 WRITE_ONCE(h.h1->tp_status, status);
410 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
411 break;
412 case TPACKET_V2:
413 WRITE_ONCE(h.h2->tp_status, status);
414 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
415 break;
416 case TPACKET_V3:
417 WRITE_ONCE(h.h3->tp_status, status);
418 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
419 break;
420 default:
421 WARN(1, "TPACKET version not supported.\n");
422 BUG();
423 }
424
425 smp_wmb();
426 }
427
__packet_get_status(const struct packet_sock * po,void * frame)428 static int __packet_get_status(const struct packet_sock *po, void *frame)
429 {
430 union tpacket_uhdr h;
431
432 smp_rmb();
433
434 /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
435
436 h.raw = frame;
437 switch (po->tp_version) {
438 case TPACKET_V1:
439 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
440 return READ_ONCE(h.h1->tp_status);
441 case TPACKET_V2:
442 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
443 return READ_ONCE(h.h2->tp_status);
444 case TPACKET_V3:
445 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
446 return READ_ONCE(h.h3->tp_status);
447 default:
448 WARN(1, "TPACKET version not supported.\n");
449 BUG();
450 return 0;
451 }
452 }
453
tpacket_get_timestamp(struct sk_buff * skb,struct timespec64 * ts,unsigned int flags)454 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
455 unsigned int flags)
456 {
457 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
458
459 if (shhwtstamps &&
460 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
461 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
462 return TP_STATUS_TS_RAW_HARDWARE;
463
464 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
465 ktime_to_timespec64_cond(skb_tstamp(skb), ts))
466 return TP_STATUS_TS_SOFTWARE;
467
468 return 0;
469 }
470
__packet_set_timestamp(struct packet_sock * po,void * frame,struct sk_buff * skb)471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
472 struct sk_buff *skb)
473 {
474 union tpacket_uhdr h;
475 struct timespec64 ts;
476 __u32 ts_status;
477
478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
479 return 0;
480
481 h.raw = frame;
482 /*
483 * versions 1 through 3 overflow the timestamps in y2106, since they
484 * all store the seconds in a 32-bit unsigned integer.
485 * If we create a version 4, that should have a 64-bit timestamp,
486 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
487 * nanoseconds.
488 */
489 switch (po->tp_version) {
490 case TPACKET_V1:
491 h.h1->tp_sec = ts.tv_sec;
492 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
493 break;
494 case TPACKET_V2:
495 h.h2->tp_sec = ts.tv_sec;
496 h.h2->tp_nsec = ts.tv_nsec;
497 break;
498 case TPACKET_V3:
499 h.h3->tp_sec = ts.tv_sec;
500 h.h3->tp_nsec = ts.tv_nsec;
501 break;
502 default:
503 WARN(1, "TPACKET version not supported.\n");
504 BUG();
505 }
506
507 /* one flush is safe, as both fields always lie on the same cacheline */
508 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
509 smp_wmb();
510
511 return ts_status;
512 }
513
packet_lookup_frame(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int position,int status)514 static void *packet_lookup_frame(const struct packet_sock *po,
515 const struct packet_ring_buffer *rb,
516 unsigned int position,
517 int status)
518 {
519 unsigned int pg_vec_pos, frame_offset;
520 union tpacket_uhdr h;
521
522 pg_vec_pos = position / rb->frames_per_block;
523 frame_offset = position % rb->frames_per_block;
524
525 h.raw = rb->pg_vec[pg_vec_pos].buffer +
526 (frame_offset * rb->frame_size);
527
528 if (status != __packet_get_status(po, h.raw))
529 return NULL;
530
531 return h.raw;
532 }
533
packet_current_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)534 static void *packet_current_frame(struct packet_sock *po,
535 struct packet_ring_buffer *rb,
536 int status)
537 {
538 return packet_lookup_frame(po, rb, rb->head, status);
539 }
540
vlan_get_tci(struct sk_buff * skb,struct net_device * dev)541 static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
542 {
543 u8 *skb_orig_data = skb->data;
544 int skb_orig_len = skb->len;
545 struct vlan_hdr vhdr, *vh;
546 unsigned int header_len;
547
548 if (!dev)
549 return 0;
550
551 /* In the SOCK_DGRAM scenario, skb data starts at the network
552 * protocol, which is after the VLAN headers. The outer VLAN
553 * header is at the hard_header_len offset in non-variable
554 * length link layer headers. If it's a VLAN device, the
555 * min_header_len should be used to exclude the VLAN header
556 * size.
557 */
558 if (dev->min_header_len == dev->hard_header_len)
559 header_len = dev->hard_header_len;
560 else if (is_vlan_dev(dev))
561 header_len = dev->min_header_len;
562 else
563 return 0;
564
565 skb_push(skb, skb->data - skb_mac_header(skb));
566 vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
567 if (skb_orig_data != skb->data) {
568 skb->data = skb_orig_data;
569 skb->len = skb_orig_len;
570 }
571 if (unlikely(!vh))
572 return 0;
573
574 return ntohs(vh->h_vlan_TCI);
575 }
576
vlan_get_protocol_dgram(struct sk_buff * skb)577 static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
578 {
579 __be16 proto = skb->protocol;
580
581 if (unlikely(eth_type_vlan(proto))) {
582 u8 *skb_orig_data = skb->data;
583 int skb_orig_len = skb->len;
584
585 skb_push(skb, skb->data - skb_mac_header(skb));
586 proto = __vlan_get_protocol(skb, proto, NULL);
587 if (skb_orig_data != skb->data) {
588 skb->data = skb_orig_data;
589 skb->len = skb_orig_len;
590 }
591 }
592
593 return proto;
594 }
595
prb_del_retire_blk_timer(struct tpacket_kbdq_core * pkc)596 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
597 {
598 del_timer_sync(&pkc->retire_blk_timer);
599 }
600
prb_shutdown_retire_blk_timer(struct packet_sock * po,struct sk_buff_head * rb_queue)601 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
602 struct sk_buff_head *rb_queue)
603 {
604 struct tpacket_kbdq_core *pkc;
605
606 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
607
608 spin_lock_bh(&rb_queue->lock);
609 pkc->delete_blk_timer = 1;
610 spin_unlock_bh(&rb_queue->lock);
611
612 prb_del_retire_blk_timer(pkc);
613 }
614
prb_setup_retire_blk_timer(struct packet_sock * po)615 static void prb_setup_retire_blk_timer(struct packet_sock *po)
616 {
617 struct tpacket_kbdq_core *pkc;
618
619 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
620 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
621 0);
622 pkc->retire_blk_timer.expires = jiffies;
623 }
624
prb_calc_retire_blk_tmo(struct packet_sock * po,int blk_size_in_bytes)625 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
626 int blk_size_in_bytes)
627 {
628 struct net_device *dev;
629 unsigned int mbits, div;
630 struct ethtool_link_ksettings ecmd;
631 int err;
632
633 rtnl_lock();
634 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
635 if (unlikely(!dev)) {
636 rtnl_unlock();
637 return DEFAULT_PRB_RETIRE_TOV;
638 }
639 err = __ethtool_get_link_ksettings(dev, &ecmd);
640 rtnl_unlock();
641 if (err)
642 return DEFAULT_PRB_RETIRE_TOV;
643
644 /* If the link speed is so slow you don't really
645 * need to worry about perf anyways
646 */
647 if (ecmd.base.speed < SPEED_1000 ||
648 ecmd.base.speed == SPEED_UNKNOWN)
649 return DEFAULT_PRB_RETIRE_TOV;
650
651 div = ecmd.base.speed / 1000;
652 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
653
654 if (div)
655 mbits /= div;
656
657 if (div)
658 return mbits + 1;
659 return mbits;
660 }
661
prb_init_ft_ops(struct tpacket_kbdq_core * p1,union tpacket_req_u * req_u)662 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
663 union tpacket_req_u *req_u)
664 {
665 p1->feature_req_word = req_u->req3.tp_feature_req_word;
666 }
667
init_prb_bdqc(struct packet_sock * po,struct packet_ring_buffer * rb,struct pgv * pg_vec,union tpacket_req_u * req_u)668 static void init_prb_bdqc(struct packet_sock *po,
669 struct packet_ring_buffer *rb,
670 struct pgv *pg_vec,
671 union tpacket_req_u *req_u)
672 {
673 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
674 struct tpacket_block_desc *pbd;
675
676 memset(p1, 0x0, sizeof(*p1));
677
678 p1->knxt_seq_num = 1;
679 p1->pkbdq = pg_vec;
680 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
681 p1->pkblk_start = pg_vec[0].buffer;
682 p1->kblk_size = req_u->req3.tp_block_size;
683 p1->knum_blocks = req_u->req3.tp_block_nr;
684 p1->hdrlen = po->tp_hdrlen;
685 p1->version = po->tp_version;
686 p1->last_kactive_blk_num = 0;
687 po->stats.stats3.tp_freeze_q_cnt = 0;
688 if (req_u->req3.tp_retire_blk_tov)
689 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
690 else
691 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
692 req_u->req3.tp_block_size);
693 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
694 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
695 rwlock_init(&p1->blk_fill_in_prog_lock);
696
697 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
698 prb_init_ft_ops(p1, req_u);
699 prb_setup_retire_blk_timer(po);
700 prb_open_block(p1, pbd);
701 }
702
703 /* Do NOT update the last_blk_num first.
704 * Assumes sk_buff_head lock is held.
705 */
_prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core * pkc)706 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
707 {
708 mod_timer(&pkc->retire_blk_timer,
709 jiffies + pkc->tov_in_jiffies);
710 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
711 }
712
713 /*
714 * Timer logic:
715 * 1) We refresh the timer only when we open a block.
716 * By doing this we don't waste cycles refreshing the timer
717 * on packet-by-packet basis.
718 *
719 * With a 1MB block-size, on a 1Gbps line, it will take
720 * i) ~8 ms to fill a block + ii) memcpy etc.
721 * In this cut we are not accounting for the memcpy time.
722 *
723 * So, if the user sets the 'tmo' to 10ms then the timer
724 * will never fire while the block is still getting filled
725 * (which is what we want). However, the user could choose
726 * to close a block early and that's fine.
727 *
728 * But when the timer does fire, we check whether or not to refresh it.
729 * Since the tmo granularity is in msecs, it is not too expensive
730 * to refresh the timer, lets say every '8' msecs.
731 * Either the user can set the 'tmo' or we can derive it based on
732 * a) line-speed and b) block-size.
733 * prb_calc_retire_blk_tmo() calculates the tmo.
734 *
735 */
prb_retire_rx_blk_timer_expired(struct timer_list * t)736 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
737 {
738 struct packet_sock *po =
739 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
740 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
741 unsigned int frozen;
742 struct tpacket_block_desc *pbd;
743
744 spin_lock(&po->sk.sk_receive_queue.lock);
745
746 frozen = prb_queue_frozen(pkc);
747 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
748
749 if (unlikely(pkc->delete_blk_timer))
750 goto out;
751
752 /* We only need to plug the race when the block is partially filled.
753 * tpacket_rcv:
754 * lock(); increment BLOCK_NUM_PKTS; unlock()
755 * copy_bits() is in progress ...
756 * timer fires on other cpu:
757 * we can't retire the current block because copy_bits
758 * is in progress.
759 *
760 */
761 if (BLOCK_NUM_PKTS(pbd)) {
762 /* Waiting for skb_copy_bits to finish... */
763 write_lock(&pkc->blk_fill_in_prog_lock);
764 write_unlock(&pkc->blk_fill_in_prog_lock);
765 }
766
767 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
768 if (!frozen) {
769 if (!BLOCK_NUM_PKTS(pbd)) {
770 /* An empty block. Just refresh the timer. */
771 goto refresh_timer;
772 }
773 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
774 if (!prb_dispatch_next_block(pkc, po))
775 goto refresh_timer;
776 else
777 goto out;
778 } else {
779 /* Case 1. Queue was frozen because user-space was
780 * lagging behind.
781 */
782 if (prb_curr_blk_in_use(pbd)) {
783 /*
784 * Ok, user-space is still behind.
785 * So just refresh the timer.
786 */
787 goto refresh_timer;
788 } else {
789 /* Case 2. queue was frozen,user-space caught up,
790 * now the link went idle && the timer fired.
791 * We don't have a block to close.So we open this
792 * block and restart the timer.
793 * opening a block thaws the queue,restarts timer
794 * Thawing/timer-refresh is a side effect.
795 */
796 prb_open_block(pkc, pbd);
797 goto out;
798 }
799 }
800 }
801
802 refresh_timer:
803 _prb_refresh_rx_retire_blk_timer(pkc);
804
805 out:
806 spin_unlock(&po->sk.sk_receive_queue.lock);
807 }
808
prb_flush_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,__u32 status)809 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
810 struct tpacket_block_desc *pbd1, __u32 status)
811 {
812 /* Flush everything minus the block header */
813
814 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
815 u8 *start, *end;
816
817 start = (u8 *)pbd1;
818
819 /* Skip the block header(we know header WILL fit in 4K) */
820 start += PAGE_SIZE;
821
822 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
823 for (; start < end; start += PAGE_SIZE)
824 flush_dcache_page(pgv_to_page(start));
825
826 smp_wmb();
827 #endif
828
829 /* Now update the block status. */
830
831 BLOCK_STATUS(pbd1) = status;
832
833 /* Flush the block header */
834
835 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
836 start = (u8 *)pbd1;
837 flush_dcache_page(pgv_to_page(start));
838
839 smp_wmb();
840 #endif
841 }
842
843 /*
844 * Side effect:
845 *
846 * 1) flush the block
847 * 2) Increment active_blk_num
848 *
849 * Note:We DONT refresh the timer on purpose.
850 * Because almost always the next block will be opened.
851 */
prb_close_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1,struct packet_sock * po,unsigned int stat)852 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
853 struct tpacket_block_desc *pbd1,
854 struct packet_sock *po, unsigned int stat)
855 {
856 __u32 status = TP_STATUS_USER | stat;
857
858 struct tpacket3_hdr *last_pkt;
859 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
860 struct sock *sk = &po->sk;
861
862 if (atomic_read(&po->tp_drops))
863 status |= TP_STATUS_LOSING;
864
865 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
866 last_pkt->tp_next_offset = 0;
867
868 /* Get the ts of the last pkt */
869 if (BLOCK_NUM_PKTS(pbd1)) {
870 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
871 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
872 } else {
873 /* Ok, we tmo'd - so get the current time.
874 *
875 * It shouldn't really happen as we don't close empty
876 * blocks. See prb_retire_rx_blk_timer_expired().
877 */
878 struct timespec64 ts;
879 ktime_get_real_ts64(&ts);
880 h1->ts_last_pkt.ts_sec = ts.tv_sec;
881 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
882 }
883
884 smp_wmb();
885
886 /* Flush the block */
887 prb_flush_block(pkc1, pbd1, status);
888
889 sk->sk_data_ready(sk);
890
891 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
892 }
893
prb_thaw_queue(struct tpacket_kbdq_core * pkc)894 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
895 {
896 pkc->reset_pending_on_curr_blk = 0;
897 }
898
899 /*
900 * Side effect of opening a block:
901 *
902 * 1) prb_queue is thawed.
903 * 2) retire_blk_timer is refreshed.
904 *
905 */
prb_open_block(struct tpacket_kbdq_core * pkc1,struct tpacket_block_desc * pbd1)906 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
907 struct tpacket_block_desc *pbd1)
908 {
909 struct timespec64 ts;
910 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
911
912 smp_rmb();
913
914 /* We could have just memset this but we will lose the
915 * flexibility of making the priv area sticky
916 */
917
918 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
919 BLOCK_NUM_PKTS(pbd1) = 0;
920 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
921
922 ktime_get_real_ts64(&ts);
923
924 h1->ts_first_pkt.ts_sec = ts.tv_sec;
925 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
926
927 pkc1->pkblk_start = (char *)pbd1;
928 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
929
930 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
931 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
932
933 pbd1->version = pkc1->version;
934 pkc1->prev = pkc1->nxt_offset;
935 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
936
937 prb_thaw_queue(pkc1);
938 _prb_refresh_rx_retire_blk_timer(pkc1);
939
940 smp_wmb();
941 }
942
943 /*
944 * Queue freeze logic:
945 * 1) Assume tp_block_nr = 8 blocks.
946 * 2) At time 't0', user opens Rx ring.
947 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
948 * 4) user-space is either sleeping or processing block '0'.
949 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
950 * it will close block-7,loop around and try to fill block '0'.
951 * call-flow:
952 * __packet_lookup_frame_in_block
953 * prb_retire_current_block()
954 * prb_dispatch_next_block()
955 * |->(BLOCK_STATUS == USER) evaluates to true
956 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
957 * 6) Now there are two cases:
958 * 6.1) Link goes idle right after the queue is frozen.
959 * But remember, the last open_block() refreshed the timer.
960 * When this timer expires,it will refresh itself so that we can
961 * re-open block-0 in near future.
962 * 6.2) Link is busy and keeps on receiving packets. This is a simple
963 * case and __packet_lookup_frame_in_block will check if block-0
964 * is free and can now be re-used.
965 */
prb_freeze_queue(struct tpacket_kbdq_core * pkc,struct packet_sock * po)966 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
967 struct packet_sock *po)
968 {
969 pkc->reset_pending_on_curr_blk = 1;
970 po->stats.stats3.tp_freeze_q_cnt++;
971 }
972
973 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
974
975 /*
976 * If the next block is free then we will dispatch it
977 * and return a good offset.
978 * Else, we will freeze the queue.
979 * So, caller must check the return value.
980 */
prb_dispatch_next_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po)981 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
982 struct packet_sock *po)
983 {
984 struct tpacket_block_desc *pbd;
985
986 smp_rmb();
987
988 /* 1. Get current block num */
989 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
990
991 /* 2. If this block is currently in_use then freeze the queue */
992 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
993 prb_freeze_queue(pkc, po);
994 return NULL;
995 }
996
997 /*
998 * 3.
999 * open this block and return the offset where the first packet
1000 * needs to get stored.
1001 */
1002 prb_open_block(pkc, pbd);
1003 return (void *)pkc->nxt_offset;
1004 }
1005
prb_retire_current_block(struct tpacket_kbdq_core * pkc,struct packet_sock * po,unsigned int status)1006 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
1007 struct packet_sock *po, unsigned int status)
1008 {
1009 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1010
1011 /* retire/close the current block */
1012 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
1013 /*
1014 * Plug the case where copy_bits() is in progress on
1015 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
1016 * have space to copy the pkt in the current block and
1017 * called prb_retire_current_block()
1018 *
1019 * We don't need to worry about the TMO case because
1020 * the timer-handler already handled this case.
1021 */
1022 if (!(status & TP_STATUS_BLK_TMO)) {
1023 /* Waiting for skb_copy_bits to finish... */
1024 write_lock(&pkc->blk_fill_in_prog_lock);
1025 write_unlock(&pkc->blk_fill_in_prog_lock);
1026 }
1027 prb_close_block(pkc, pbd, po, status);
1028 return;
1029 }
1030 }
1031
prb_curr_blk_in_use(struct tpacket_block_desc * pbd)1032 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
1033 {
1034 return TP_STATUS_USER & BLOCK_STATUS(pbd);
1035 }
1036
prb_queue_frozen(struct tpacket_kbdq_core * pkc)1037 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
1038 {
1039 return pkc->reset_pending_on_curr_blk;
1040 }
1041
prb_clear_blk_fill_status(struct packet_ring_buffer * rb)1042 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
1043 __releases(&pkc->blk_fill_in_prog_lock)
1044 {
1045 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1046
1047 read_unlock(&pkc->blk_fill_in_prog_lock);
1048 }
1049
prb_fill_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1050 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
1051 struct tpacket3_hdr *ppd)
1052 {
1053 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
1054 }
1055
prb_clear_rxhash(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1056 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1057 struct tpacket3_hdr *ppd)
1058 {
1059 ppd->hv1.tp_rxhash = 0;
1060 }
1061
prb_fill_vlan_info(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1062 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1063 struct tpacket3_hdr *ppd)
1064 {
1065 struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
1066
1067 if (skb_vlan_tag_present(pkc->skb)) {
1068 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1069 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1070 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1071 } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
1072 ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
1073 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
1074 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1075 } else {
1076 ppd->hv1.tp_vlan_tci = 0;
1077 ppd->hv1.tp_vlan_tpid = 0;
1078 ppd->tp_status = TP_STATUS_AVAILABLE;
1079 }
1080 }
1081
prb_run_all_ft_ops(struct tpacket_kbdq_core * pkc,struct tpacket3_hdr * ppd)1082 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1083 struct tpacket3_hdr *ppd)
1084 {
1085 ppd->hv1.tp_padding = 0;
1086 prb_fill_vlan_info(pkc, ppd);
1087
1088 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1089 prb_fill_rxhash(pkc, ppd);
1090 else
1091 prb_clear_rxhash(pkc, ppd);
1092 }
1093
prb_fill_curr_block(char * curr,struct tpacket_kbdq_core * pkc,struct tpacket_block_desc * pbd,unsigned int len)1094 static void prb_fill_curr_block(char *curr,
1095 struct tpacket_kbdq_core *pkc,
1096 struct tpacket_block_desc *pbd,
1097 unsigned int len)
1098 __acquires(&pkc->blk_fill_in_prog_lock)
1099 {
1100 struct tpacket3_hdr *ppd;
1101
1102 ppd = (struct tpacket3_hdr *)curr;
1103 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1104 pkc->prev = curr;
1105 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1106 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1107 BLOCK_NUM_PKTS(pbd) += 1;
1108 read_lock(&pkc->blk_fill_in_prog_lock);
1109 prb_run_all_ft_ops(pkc, ppd);
1110 }
1111
1112 /* Assumes caller has the sk->rx_queue.lock */
__packet_lookup_frame_in_block(struct packet_sock * po,struct sk_buff * skb,unsigned int len)1113 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1114 struct sk_buff *skb,
1115 unsigned int len
1116 )
1117 {
1118 struct tpacket_kbdq_core *pkc;
1119 struct tpacket_block_desc *pbd;
1120 char *curr, *end;
1121
1122 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1123 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1124
1125 /* Queue is frozen when user space is lagging behind */
1126 if (prb_queue_frozen(pkc)) {
1127 /*
1128 * Check if that last block which caused the queue to freeze,
1129 * is still in_use by user-space.
1130 */
1131 if (prb_curr_blk_in_use(pbd)) {
1132 /* Can't record this packet */
1133 return NULL;
1134 } else {
1135 /*
1136 * Ok, the block was released by user-space.
1137 * Now let's open that block.
1138 * opening a block also thaws the queue.
1139 * Thawing is a side effect.
1140 */
1141 prb_open_block(pkc, pbd);
1142 }
1143 }
1144
1145 smp_mb();
1146 curr = pkc->nxt_offset;
1147 pkc->skb = skb;
1148 end = (char *)pbd + pkc->kblk_size;
1149
1150 /* first try the current block */
1151 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1152 prb_fill_curr_block(curr, pkc, pbd, len);
1153 return (void *)curr;
1154 }
1155
1156 /* Ok, close the current block */
1157 prb_retire_current_block(pkc, po, 0);
1158
1159 /* Now, try to dispatch the next block */
1160 curr = (char *)prb_dispatch_next_block(pkc, po);
1161 if (curr) {
1162 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1163 prb_fill_curr_block(curr, pkc, pbd, len);
1164 return (void *)curr;
1165 }
1166
1167 /*
1168 * No free blocks are available.user_space hasn't caught up yet.
1169 * Queue was just frozen and now this packet will get dropped.
1170 */
1171 return NULL;
1172 }
1173
packet_current_rx_frame(struct packet_sock * po,struct sk_buff * skb,int status,unsigned int len)1174 static void *packet_current_rx_frame(struct packet_sock *po,
1175 struct sk_buff *skb,
1176 int status, unsigned int len)
1177 {
1178 char *curr = NULL;
1179 switch (po->tp_version) {
1180 case TPACKET_V1:
1181 case TPACKET_V2:
1182 curr = packet_lookup_frame(po, &po->rx_ring,
1183 po->rx_ring.head, status);
1184 return curr;
1185 case TPACKET_V3:
1186 return __packet_lookup_frame_in_block(po, skb, len);
1187 default:
1188 WARN(1, "TPACKET version not supported\n");
1189 BUG();
1190 return NULL;
1191 }
1192 }
1193
prb_lookup_block(const struct packet_sock * po,const struct packet_ring_buffer * rb,unsigned int idx,int status)1194 static void *prb_lookup_block(const struct packet_sock *po,
1195 const struct packet_ring_buffer *rb,
1196 unsigned int idx,
1197 int status)
1198 {
1199 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1200 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1201
1202 if (status != BLOCK_STATUS(pbd))
1203 return NULL;
1204 return pbd;
1205 }
1206
prb_previous_blk_num(struct packet_ring_buffer * rb)1207 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1208 {
1209 unsigned int prev;
1210 if (rb->prb_bdqc.kactive_blk_num)
1211 prev = rb->prb_bdqc.kactive_blk_num-1;
1212 else
1213 prev = rb->prb_bdqc.knum_blocks-1;
1214 return prev;
1215 }
1216
1217 /* Assumes caller has held the rx_queue.lock */
__prb_previous_block(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1218 static void *__prb_previous_block(struct packet_sock *po,
1219 struct packet_ring_buffer *rb,
1220 int status)
1221 {
1222 unsigned int previous = prb_previous_blk_num(rb);
1223 return prb_lookup_block(po, rb, previous, status);
1224 }
1225
packet_previous_rx_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1226 static void *packet_previous_rx_frame(struct packet_sock *po,
1227 struct packet_ring_buffer *rb,
1228 int status)
1229 {
1230 if (po->tp_version <= TPACKET_V2)
1231 return packet_previous_frame(po, rb, status);
1232
1233 return __prb_previous_block(po, rb, status);
1234 }
1235
packet_increment_rx_head(struct packet_sock * po,struct packet_ring_buffer * rb)1236 static void packet_increment_rx_head(struct packet_sock *po,
1237 struct packet_ring_buffer *rb)
1238 {
1239 switch (po->tp_version) {
1240 case TPACKET_V1:
1241 case TPACKET_V2:
1242 return packet_increment_head(rb);
1243 case TPACKET_V3:
1244 default:
1245 WARN(1, "TPACKET version not supported.\n");
1246 BUG();
1247 return;
1248 }
1249 }
1250
packet_previous_frame(struct packet_sock * po,struct packet_ring_buffer * rb,int status)1251 static void *packet_previous_frame(struct packet_sock *po,
1252 struct packet_ring_buffer *rb,
1253 int status)
1254 {
1255 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1256 return packet_lookup_frame(po, rb, previous, status);
1257 }
1258
packet_increment_head(struct packet_ring_buffer * buff)1259 static void packet_increment_head(struct packet_ring_buffer *buff)
1260 {
1261 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1262 }
1263
packet_inc_pending(struct packet_ring_buffer * rb)1264 static void packet_inc_pending(struct packet_ring_buffer *rb)
1265 {
1266 this_cpu_inc(*rb->pending_refcnt);
1267 }
1268
packet_dec_pending(struct packet_ring_buffer * rb)1269 static void packet_dec_pending(struct packet_ring_buffer *rb)
1270 {
1271 this_cpu_dec(*rb->pending_refcnt);
1272 }
1273
packet_read_pending(const struct packet_ring_buffer * rb)1274 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1275 {
1276 unsigned int refcnt = 0;
1277 int cpu;
1278
1279 /* We don't use pending refcount in rx_ring. */
1280 if (rb->pending_refcnt == NULL)
1281 return 0;
1282
1283 for_each_possible_cpu(cpu)
1284 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1285
1286 return refcnt;
1287 }
1288
packet_alloc_pending(struct packet_sock * po)1289 static int packet_alloc_pending(struct packet_sock *po)
1290 {
1291 po->rx_ring.pending_refcnt = NULL;
1292
1293 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1294 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1295 return -ENOBUFS;
1296
1297 return 0;
1298 }
1299
packet_free_pending(struct packet_sock * po)1300 static void packet_free_pending(struct packet_sock *po)
1301 {
1302 free_percpu(po->tx_ring.pending_refcnt);
1303 }
1304
1305 #define ROOM_POW_OFF 2
1306 #define ROOM_NONE 0x0
1307 #define ROOM_LOW 0x1
1308 #define ROOM_NORMAL 0x2
1309
__tpacket_has_room(const struct packet_sock * po,int pow_off)1310 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1311 {
1312 int idx, len;
1313
1314 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1315 idx = READ_ONCE(po->rx_ring.head);
1316 if (pow_off)
1317 idx += len >> pow_off;
1318 if (idx >= len)
1319 idx -= len;
1320 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1321 }
1322
__tpacket_v3_has_room(const struct packet_sock * po,int pow_off)1323 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1324 {
1325 int idx, len;
1326
1327 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1328 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1329 if (pow_off)
1330 idx += len >> pow_off;
1331 if (idx >= len)
1332 idx -= len;
1333 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1334 }
1335
__packet_rcv_has_room(const struct packet_sock * po,const struct sk_buff * skb)1336 static int __packet_rcv_has_room(const struct packet_sock *po,
1337 const struct sk_buff *skb)
1338 {
1339 const struct sock *sk = &po->sk;
1340 int ret = ROOM_NONE;
1341
1342 if (po->prot_hook.func != tpacket_rcv) {
1343 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1344 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1345 - (skb ? skb->truesize : 0);
1346
1347 if (avail > (rcvbuf >> ROOM_POW_OFF))
1348 return ROOM_NORMAL;
1349 else if (avail > 0)
1350 return ROOM_LOW;
1351 else
1352 return ROOM_NONE;
1353 }
1354
1355 if (po->tp_version == TPACKET_V3) {
1356 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1357 ret = ROOM_NORMAL;
1358 else if (__tpacket_v3_has_room(po, 0))
1359 ret = ROOM_LOW;
1360 } else {
1361 if (__tpacket_has_room(po, ROOM_POW_OFF))
1362 ret = ROOM_NORMAL;
1363 else if (__tpacket_has_room(po, 0))
1364 ret = ROOM_LOW;
1365 }
1366
1367 return ret;
1368 }
1369
packet_rcv_has_room(struct packet_sock * po,struct sk_buff * skb)1370 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1371 {
1372 bool pressure;
1373 int ret;
1374
1375 ret = __packet_rcv_has_room(po, skb);
1376 pressure = ret != ROOM_NORMAL;
1377
1378 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1379 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1380
1381 return ret;
1382 }
1383
packet_rcv_try_clear_pressure(struct packet_sock * po)1384 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1385 {
1386 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1387 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1388 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1389 }
1390
packet_sock_destruct(struct sock * sk)1391 static void packet_sock_destruct(struct sock *sk)
1392 {
1393 skb_queue_purge(&sk->sk_error_queue);
1394
1395 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1396 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1397
1398 if (!sock_flag(sk, SOCK_DEAD)) {
1399 pr_err("Attempt to release alive packet socket: %p\n", sk);
1400 return;
1401 }
1402 }
1403
fanout_flow_is_huge(struct packet_sock * po,struct sk_buff * skb)1404 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1405 {
1406 u32 *history = po->rollover->history;
1407 u32 victim, rxhash;
1408 int i, count = 0;
1409
1410 rxhash = skb_get_hash(skb);
1411 for (i = 0; i < ROLLOVER_HLEN; i++)
1412 if (READ_ONCE(history[i]) == rxhash)
1413 count++;
1414
1415 victim = get_random_u32_below(ROLLOVER_HLEN);
1416
1417 /* Avoid dirtying the cache line if possible */
1418 if (READ_ONCE(history[victim]) != rxhash)
1419 WRITE_ONCE(history[victim], rxhash);
1420
1421 return count > (ROLLOVER_HLEN >> 1);
1422 }
1423
fanout_demux_hash(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1424 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1425 struct sk_buff *skb,
1426 unsigned int num)
1427 {
1428 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1429 }
1430
fanout_demux_lb(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1431 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1432 struct sk_buff *skb,
1433 unsigned int num)
1434 {
1435 unsigned int val = atomic_inc_return(&f->rr_cur);
1436
1437 return val % num;
1438 }
1439
fanout_demux_cpu(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1440 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1441 struct sk_buff *skb,
1442 unsigned int num)
1443 {
1444 return smp_processor_id() % num;
1445 }
1446
fanout_demux_rnd(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1447 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1448 struct sk_buff *skb,
1449 unsigned int num)
1450 {
1451 return get_random_u32_below(num);
1452 }
1453
fanout_demux_rollover(struct packet_fanout * f,struct sk_buff * skb,unsigned int idx,bool try_self,unsigned int num)1454 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1455 struct sk_buff *skb,
1456 unsigned int idx, bool try_self,
1457 unsigned int num)
1458 {
1459 struct packet_sock *po, *po_next, *po_skip = NULL;
1460 unsigned int i, j, room = ROOM_NONE;
1461
1462 po = pkt_sk(rcu_dereference(f->arr[idx]));
1463
1464 if (try_self) {
1465 room = packet_rcv_has_room(po, skb);
1466 if (room == ROOM_NORMAL ||
1467 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1468 return idx;
1469 po_skip = po;
1470 }
1471
1472 i = j = min_t(int, po->rollover->sock, num - 1);
1473 do {
1474 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1475 if (po_next != po_skip &&
1476 !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1477 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1478 if (i != j)
1479 po->rollover->sock = i;
1480 atomic_long_inc(&po->rollover->num);
1481 if (room == ROOM_LOW)
1482 atomic_long_inc(&po->rollover->num_huge);
1483 return i;
1484 }
1485
1486 if (++i == num)
1487 i = 0;
1488 } while (i != j);
1489
1490 atomic_long_inc(&po->rollover->num_failed);
1491 return idx;
1492 }
1493
fanout_demux_qm(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1494 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1495 struct sk_buff *skb,
1496 unsigned int num)
1497 {
1498 return skb_get_queue_mapping(skb) % num;
1499 }
1500
fanout_demux_bpf(struct packet_fanout * f,struct sk_buff * skb,unsigned int num)1501 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1502 struct sk_buff *skb,
1503 unsigned int num)
1504 {
1505 struct bpf_prog *prog;
1506 unsigned int ret = 0;
1507
1508 rcu_read_lock();
1509 prog = rcu_dereference(f->bpf_prog);
1510 if (prog)
1511 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1512 rcu_read_unlock();
1513
1514 return ret;
1515 }
1516
fanout_has_flag(struct packet_fanout * f,u16 flag)1517 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1518 {
1519 return f->flags & (flag >> 8);
1520 }
1521
packet_rcv_fanout(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1522 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1523 struct packet_type *pt, struct net_device *orig_dev)
1524 {
1525 struct packet_fanout *f = pt->af_packet_priv;
1526 unsigned int num = READ_ONCE(f->num_members);
1527 struct net *net = read_pnet(&f->net);
1528 struct packet_sock *po;
1529 unsigned int idx;
1530
1531 if (!net_eq(dev_net(dev), net) || !num) {
1532 kfree_skb(skb);
1533 return 0;
1534 }
1535
1536 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1537 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1538 if (!skb)
1539 return 0;
1540 }
1541 switch (f->type) {
1542 case PACKET_FANOUT_HASH:
1543 default:
1544 idx = fanout_demux_hash(f, skb, num);
1545 break;
1546 case PACKET_FANOUT_LB:
1547 idx = fanout_demux_lb(f, skb, num);
1548 break;
1549 case PACKET_FANOUT_CPU:
1550 idx = fanout_demux_cpu(f, skb, num);
1551 break;
1552 case PACKET_FANOUT_RND:
1553 idx = fanout_demux_rnd(f, skb, num);
1554 break;
1555 case PACKET_FANOUT_QM:
1556 idx = fanout_demux_qm(f, skb, num);
1557 break;
1558 case PACKET_FANOUT_ROLLOVER:
1559 idx = fanout_demux_rollover(f, skb, 0, false, num);
1560 break;
1561 case PACKET_FANOUT_CBPF:
1562 case PACKET_FANOUT_EBPF:
1563 idx = fanout_demux_bpf(f, skb, num);
1564 break;
1565 }
1566
1567 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1568 idx = fanout_demux_rollover(f, skb, idx, true, num);
1569
1570 po = pkt_sk(rcu_dereference(f->arr[idx]));
1571 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1572 }
1573
1574 DEFINE_MUTEX(fanout_mutex);
1575 EXPORT_SYMBOL_GPL(fanout_mutex);
1576 static LIST_HEAD(fanout_list);
1577 static u16 fanout_next_id;
1578
__fanout_link(struct sock * sk,struct packet_sock * po)1579 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1580 {
1581 struct packet_fanout *f = po->fanout;
1582
1583 spin_lock(&f->lock);
1584 rcu_assign_pointer(f->arr[f->num_members], sk);
1585 smp_wmb();
1586 f->num_members++;
1587 if (f->num_members == 1)
1588 dev_add_pack(&f->prot_hook);
1589 spin_unlock(&f->lock);
1590 }
1591
__fanout_unlink(struct sock * sk,struct packet_sock * po)1592 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1593 {
1594 struct packet_fanout *f = po->fanout;
1595 int i;
1596
1597 spin_lock(&f->lock);
1598 for (i = 0; i < f->num_members; i++) {
1599 if (rcu_dereference_protected(f->arr[i],
1600 lockdep_is_held(&f->lock)) == sk)
1601 break;
1602 }
1603 BUG_ON(i >= f->num_members);
1604 rcu_assign_pointer(f->arr[i],
1605 rcu_dereference_protected(f->arr[f->num_members - 1],
1606 lockdep_is_held(&f->lock)));
1607 f->num_members--;
1608 if (f->num_members == 0)
1609 __dev_remove_pack(&f->prot_hook);
1610 spin_unlock(&f->lock);
1611 }
1612
match_fanout_group(struct packet_type * ptype,struct sock * sk)1613 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1614 {
1615 if (sk->sk_family != PF_PACKET)
1616 return false;
1617
1618 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1619 }
1620
fanout_init_data(struct packet_fanout * f)1621 static void fanout_init_data(struct packet_fanout *f)
1622 {
1623 switch (f->type) {
1624 case PACKET_FANOUT_LB:
1625 atomic_set(&f->rr_cur, 0);
1626 break;
1627 case PACKET_FANOUT_CBPF:
1628 case PACKET_FANOUT_EBPF:
1629 RCU_INIT_POINTER(f->bpf_prog, NULL);
1630 break;
1631 }
1632 }
1633
__fanout_set_data_bpf(struct packet_fanout * f,struct bpf_prog * new)1634 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1635 {
1636 struct bpf_prog *old;
1637
1638 spin_lock(&f->lock);
1639 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1640 rcu_assign_pointer(f->bpf_prog, new);
1641 spin_unlock(&f->lock);
1642
1643 if (old) {
1644 synchronize_net();
1645 bpf_prog_destroy(old);
1646 }
1647 }
1648
fanout_set_data_cbpf(struct packet_sock * po,sockptr_t data,unsigned int len)1649 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1650 unsigned int len)
1651 {
1652 struct bpf_prog *new;
1653 struct sock_fprog fprog;
1654 int ret;
1655
1656 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1657 return -EPERM;
1658
1659 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1660 if (ret)
1661 return ret;
1662
1663 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1664 if (ret)
1665 return ret;
1666
1667 __fanout_set_data_bpf(po->fanout, new);
1668 return 0;
1669 }
1670
fanout_set_data_ebpf(struct packet_sock * po,sockptr_t data,unsigned int len)1671 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1672 unsigned int len)
1673 {
1674 struct bpf_prog *new;
1675 u32 fd;
1676
1677 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1678 return -EPERM;
1679 if (len != sizeof(fd))
1680 return -EINVAL;
1681 if (copy_from_sockptr(&fd, data, len))
1682 return -EFAULT;
1683
1684 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1685 if (IS_ERR(new))
1686 return PTR_ERR(new);
1687
1688 __fanout_set_data_bpf(po->fanout, new);
1689 return 0;
1690 }
1691
fanout_set_data(struct packet_sock * po,sockptr_t data,unsigned int len)1692 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1693 unsigned int len)
1694 {
1695 switch (po->fanout->type) {
1696 case PACKET_FANOUT_CBPF:
1697 return fanout_set_data_cbpf(po, data, len);
1698 case PACKET_FANOUT_EBPF:
1699 return fanout_set_data_ebpf(po, data, len);
1700 default:
1701 return -EINVAL;
1702 }
1703 }
1704
fanout_release_data(struct packet_fanout * f)1705 static void fanout_release_data(struct packet_fanout *f)
1706 {
1707 switch (f->type) {
1708 case PACKET_FANOUT_CBPF:
1709 case PACKET_FANOUT_EBPF:
1710 __fanout_set_data_bpf(f, NULL);
1711 }
1712 }
1713
__fanout_id_is_free(struct sock * sk,u16 candidate_id)1714 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1715 {
1716 struct packet_fanout *f;
1717
1718 list_for_each_entry(f, &fanout_list, list) {
1719 if (f->id == candidate_id &&
1720 read_pnet(&f->net) == sock_net(sk)) {
1721 return false;
1722 }
1723 }
1724 return true;
1725 }
1726
fanout_find_new_id(struct sock * sk,u16 * new_id)1727 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1728 {
1729 u16 id = fanout_next_id;
1730
1731 do {
1732 if (__fanout_id_is_free(sk, id)) {
1733 *new_id = id;
1734 fanout_next_id = id + 1;
1735 return true;
1736 }
1737
1738 id++;
1739 } while (id != fanout_next_id);
1740
1741 return false;
1742 }
1743
fanout_add(struct sock * sk,struct fanout_args * args)1744 static int fanout_add(struct sock *sk, struct fanout_args *args)
1745 {
1746 struct packet_rollover *rollover = NULL;
1747 struct packet_sock *po = pkt_sk(sk);
1748 u16 type_flags = args->type_flags;
1749 struct packet_fanout *f, *match;
1750 u8 type = type_flags & 0xff;
1751 u8 flags = type_flags >> 8;
1752 u16 id = args->id;
1753 int err;
1754
1755 switch (type) {
1756 case PACKET_FANOUT_ROLLOVER:
1757 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1758 return -EINVAL;
1759 break;
1760 case PACKET_FANOUT_HASH:
1761 case PACKET_FANOUT_LB:
1762 case PACKET_FANOUT_CPU:
1763 case PACKET_FANOUT_RND:
1764 case PACKET_FANOUT_QM:
1765 case PACKET_FANOUT_CBPF:
1766 case PACKET_FANOUT_EBPF:
1767 break;
1768 default:
1769 return -EINVAL;
1770 }
1771
1772 mutex_lock(&fanout_mutex);
1773
1774 err = -EALREADY;
1775 if (po->fanout)
1776 goto out;
1777
1778 if (type == PACKET_FANOUT_ROLLOVER ||
1779 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1780 err = -ENOMEM;
1781 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1782 if (!rollover)
1783 goto out;
1784 atomic_long_set(&rollover->num, 0);
1785 atomic_long_set(&rollover->num_huge, 0);
1786 atomic_long_set(&rollover->num_failed, 0);
1787 }
1788
1789 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1790 if (id != 0) {
1791 err = -EINVAL;
1792 goto out;
1793 }
1794 if (!fanout_find_new_id(sk, &id)) {
1795 err = -ENOMEM;
1796 goto out;
1797 }
1798 /* ephemeral flag for the first socket in the group: drop it */
1799 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1800 }
1801
1802 match = NULL;
1803 list_for_each_entry(f, &fanout_list, list) {
1804 if (f->id == id &&
1805 read_pnet(&f->net) == sock_net(sk)) {
1806 match = f;
1807 break;
1808 }
1809 }
1810 err = -EINVAL;
1811 if (match) {
1812 if (match->flags != flags)
1813 goto out;
1814 if (args->max_num_members &&
1815 args->max_num_members != match->max_num_members)
1816 goto out;
1817 } else {
1818 if (args->max_num_members > PACKET_FANOUT_MAX)
1819 goto out;
1820 if (!args->max_num_members)
1821 /* legacy PACKET_FANOUT_MAX */
1822 args->max_num_members = 256;
1823 err = -ENOMEM;
1824 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1825 GFP_KERNEL);
1826 if (!match)
1827 goto out;
1828 write_pnet(&match->net, sock_net(sk));
1829 match->id = id;
1830 match->type = type;
1831 match->flags = flags;
1832 INIT_LIST_HEAD(&match->list);
1833 spin_lock_init(&match->lock);
1834 refcount_set(&match->sk_ref, 0);
1835 fanout_init_data(match);
1836 match->prot_hook.type = po->prot_hook.type;
1837 match->prot_hook.dev = po->prot_hook.dev;
1838 match->prot_hook.func = packet_rcv_fanout;
1839 match->prot_hook.af_packet_priv = match;
1840 match->prot_hook.af_packet_net = read_pnet(&match->net);
1841 match->prot_hook.id_match = match_fanout_group;
1842 match->max_num_members = args->max_num_members;
1843 match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1844 list_add(&match->list, &fanout_list);
1845 }
1846 err = -EINVAL;
1847
1848 spin_lock(&po->bind_lock);
1849 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
1850 match->type == type &&
1851 match->prot_hook.type == po->prot_hook.type &&
1852 match->prot_hook.dev == po->prot_hook.dev) {
1853 err = -ENOSPC;
1854 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1855 __dev_remove_pack(&po->prot_hook);
1856
1857 /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1858 WRITE_ONCE(po->fanout, match);
1859
1860 po->rollover = rollover;
1861 rollover = NULL;
1862 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1863 __fanout_link(sk, po);
1864 err = 0;
1865 }
1866 }
1867 spin_unlock(&po->bind_lock);
1868
1869 if (err && !refcount_read(&match->sk_ref)) {
1870 list_del(&match->list);
1871 kvfree(match);
1872 }
1873
1874 out:
1875 kfree(rollover);
1876 mutex_unlock(&fanout_mutex);
1877 return err;
1878 }
1879
1880 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1881 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1882 * It is the responsibility of the caller to call fanout_release_data() and
1883 * free the returned packet_fanout (after synchronize_net())
1884 */
fanout_release(struct sock * sk)1885 static struct packet_fanout *fanout_release(struct sock *sk)
1886 {
1887 struct packet_sock *po = pkt_sk(sk);
1888 struct packet_fanout *f;
1889
1890 mutex_lock(&fanout_mutex);
1891 f = po->fanout;
1892 if (f) {
1893 po->fanout = NULL;
1894
1895 if (refcount_dec_and_test(&f->sk_ref))
1896 list_del(&f->list);
1897 else
1898 f = NULL;
1899 }
1900 mutex_unlock(&fanout_mutex);
1901
1902 return f;
1903 }
1904
packet_extra_vlan_len_allowed(const struct net_device * dev,struct sk_buff * skb)1905 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1906 struct sk_buff *skb)
1907 {
1908 /* Earlier code assumed this would be a VLAN pkt, double-check
1909 * this now that we have the actual packet in hand. We can only
1910 * do this check on Ethernet devices.
1911 */
1912 if (unlikely(dev->type != ARPHRD_ETHER))
1913 return false;
1914
1915 skb_reset_mac_header(skb);
1916 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1917 }
1918
1919 static const struct proto_ops packet_ops;
1920
1921 static const struct proto_ops packet_ops_spkt;
1922
packet_rcv_spkt(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)1923 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1924 struct packet_type *pt, struct net_device *orig_dev)
1925 {
1926 struct sock *sk;
1927 struct sockaddr_pkt *spkt;
1928
1929 /*
1930 * When we registered the protocol we saved the socket in the data
1931 * field for just this event.
1932 */
1933
1934 sk = pt->af_packet_priv;
1935
1936 /*
1937 * Yank back the headers [hope the device set this
1938 * right or kerboom...]
1939 *
1940 * Incoming packets have ll header pulled,
1941 * push it back.
1942 *
1943 * For outgoing ones skb->data == skb_mac_header(skb)
1944 * so that this procedure is noop.
1945 */
1946
1947 if (skb->pkt_type == PACKET_LOOPBACK)
1948 goto out;
1949
1950 if (!net_eq(dev_net(dev), sock_net(sk)))
1951 goto out;
1952
1953 skb = skb_share_check(skb, GFP_ATOMIC);
1954 if (skb == NULL)
1955 goto oom;
1956
1957 /* drop any routing info */
1958 skb_dst_drop(skb);
1959
1960 /* drop conntrack reference */
1961 nf_reset_ct(skb);
1962
1963 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1964
1965 skb_push(skb, skb->data - skb_mac_header(skb));
1966
1967 /*
1968 * The SOCK_PACKET socket receives _all_ frames.
1969 */
1970
1971 spkt->spkt_family = dev->type;
1972 strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1973 spkt->spkt_protocol = skb->protocol;
1974
1975 /*
1976 * Charge the memory to the socket. This is done specifically
1977 * to prevent sockets using all the memory up.
1978 */
1979
1980 if (sock_queue_rcv_skb(sk, skb) == 0)
1981 return 0;
1982
1983 out:
1984 kfree_skb(skb);
1985 oom:
1986 return 0;
1987 }
1988
packet_parse_headers(struct sk_buff * skb,struct socket * sock)1989 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1990 {
1991 int depth;
1992
1993 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1994 sock->type == SOCK_RAW) {
1995 skb_reset_mac_header(skb);
1996 skb->protocol = dev_parse_header_protocol(skb);
1997 }
1998
1999 /* Move network header to the right position for VLAN tagged packets */
2000 if (likely(skb->dev->type == ARPHRD_ETHER) &&
2001 eth_type_vlan(skb->protocol) &&
2002 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
2003 skb_set_network_header(skb, depth);
2004
2005 skb_probe_transport_header(skb);
2006 }
2007
2008 /*
2009 * Output a raw packet to a device layer. This bypasses all the other
2010 * protocol layers and you must therefore supply it with a complete frame
2011 */
2012
packet_sendmsg_spkt(struct socket * sock,struct msghdr * msg,size_t len)2013 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
2014 size_t len)
2015 {
2016 struct sock *sk = sock->sk;
2017 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
2018 struct sk_buff *skb = NULL;
2019 struct net_device *dev;
2020 struct sockcm_cookie sockc;
2021 __be16 proto = 0;
2022 int err;
2023 int extra_len = 0;
2024
2025 /*
2026 * Get and verify the address.
2027 */
2028
2029 if (saddr) {
2030 if (msg->msg_namelen < sizeof(struct sockaddr))
2031 return -EINVAL;
2032 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
2033 proto = saddr->spkt_protocol;
2034 } else
2035 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
2036
2037 /*
2038 * Find the device first to size check it
2039 */
2040
2041 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
2042 retry:
2043 rcu_read_lock();
2044 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
2045 err = -ENODEV;
2046 if (dev == NULL)
2047 goto out_unlock;
2048
2049 err = -ENETDOWN;
2050 if (!(dev->flags & IFF_UP))
2051 goto out_unlock;
2052
2053 /*
2054 * You may not queue a frame bigger than the mtu. This is the lowest level
2055 * raw protocol and you must do your own fragmentation at this level.
2056 */
2057
2058 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2059 if (!netif_supports_nofcs(dev)) {
2060 err = -EPROTONOSUPPORT;
2061 goto out_unlock;
2062 }
2063 extra_len = 4; /* We're doing our own CRC */
2064 }
2065
2066 err = -EMSGSIZE;
2067 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2068 goto out_unlock;
2069
2070 if (!skb) {
2071 size_t reserved = LL_RESERVED_SPACE(dev);
2072 int tlen = dev->needed_tailroom;
2073 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2074
2075 rcu_read_unlock();
2076 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2077 if (skb == NULL)
2078 return -ENOBUFS;
2079 /* FIXME: Save some space for broken drivers that write a hard
2080 * header at transmission time by themselves. PPP is the notable
2081 * one here. This should really be fixed at the driver level.
2082 */
2083 skb_reserve(skb, reserved);
2084 skb_reset_network_header(skb);
2085
2086 /* Try to align data part correctly */
2087 if (hhlen) {
2088 skb->data -= hhlen;
2089 skb->tail -= hhlen;
2090 if (len < hhlen)
2091 skb_reset_network_header(skb);
2092 }
2093 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2094 if (err)
2095 goto out_free;
2096 goto retry;
2097 }
2098
2099 if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2100 err = -EINVAL;
2101 goto out_unlock;
2102 }
2103 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2104 !packet_extra_vlan_len_allowed(dev, skb)) {
2105 err = -EMSGSIZE;
2106 goto out_unlock;
2107 }
2108
2109 sockcm_init(&sockc, sk);
2110 if (msg->msg_controllen) {
2111 err = sock_cmsg_send(sk, msg, &sockc);
2112 if (unlikely(err))
2113 goto out_unlock;
2114 }
2115
2116 skb->protocol = proto;
2117 skb->dev = dev;
2118 skb->priority = READ_ONCE(sk->sk_priority);
2119 skb->mark = READ_ONCE(sk->sk_mark);
2120 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
2121 skb_setup_tx_timestamp(skb, sockc.tsflags);
2122
2123 if (unlikely(extra_len == 4))
2124 skb->no_fcs = 1;
2125
2126 packet_parse_headers(skb, sock);
2127
2128 dev_queue_xmit(skb);
2129 rcu_read_unlock();
2130 return len;
2131
2132 out_unlock:
2133 rcu_read_unlock();
2134 out_free:
2135 kfree_skb(skb);
2136 return err;
2137 }
2138
run_filter(struct sk_buff * skb,const struct sock * sk,unsigned int res)2139 static unsigned int run_filter(struct sk_buff *skb,
2140 const struct sock *sk,
2141 unsigned int res)
2142 {
2143 struct sk_filter *filter;
2144
2145 rcu_read_lock();
2146 filter = rcu_dereference(sk->sk_filter);
2147 if (filter != NULL)
2148 res = bpf_prog_run_clear_cb(filter->prog, skb);
2149 rcu_read_unlock();
2150
2151 return res;
2152 }
2153
packet_rcv_vnet(struct msghdr * msg,const struct sk_buff * skb,size_t * len,int vnet_hdr_sz)2154 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2155 size_t *len, int vnet_hdr_sz)
2156 {
2157 struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2158
2159 if (*len < vnet_hdr_sz)
2160 return -EINVAL;
2161 *len -= vnet_hdr_sz;
2162
2163 if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2164 return -EINVAL;
2165
2166 return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2167 }
2168
2169 /*
2170 * This function makes lazy skb cloning in hope that most of packets
2171 * are discarded by BPF.
2172 *
2173 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2174 * and skb->cb are mangled. It works because (and until) packets
2175 * falling here are owned by current CPU. Output packets are cloned
2176 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2177 * sequentially, so that if we return skb to original state on exit,
2178 * we will not harm anyone.
2179 */
2180
packet_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2181 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2182 struct packet_type *pt, struct net_device *orig_dev)
2183 {
2184 enum skb_drop_reason drop_reason = SKB_CONSUMED;
2185 struct sock *sk = NULL;
2186 struct sockaddr_ll *sll;
2187 struct packet_sock *po;
2188 u8 *skb_head = skb->data;
2189 int skb_len = skb->len;
2190 unsigned int snaplen, res;
2191
2192 if (skb->pkt_type == PACKET_LOOPBACK)
2193 goto drop;
2194
2195 sk = pt->af_packet_priv;
2196 po = pkt_sk(sk);
2197
2198 if (!net_eq(dev_net(dev), sock_net(sk)))
2199 goto drop;
2200
2201 skb->dev = dev;
2202
2203 if (dev_has_header(dev)) {
2204 /* The device has an explicit notion of ll header,
2205 * exported to higher levels.
2206 *
2207 * Otherwise, the device hides details of its frame
2208 * structure, so that corresponding packet head is
2209 * never delivered to user.
2210 */
2211 if (sk->sk_type != SOCK_DGRAM)
2212 skb_push(skb, skb->data - skb_mac_header(skb));
2213 else if (skb->pkt_type == PACKET_OUTGOING) {
2214 /* Special case: outgoing packets have ll header at head */
2215 skb_pull(skb, skb_network_offset(skb));
2216 }
2217 }
2218
2219 snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2220
2221 res = run_filter(skb, sk, snaplen);
2222 if (!res)
2223 goto drop_n_restore;
2224 if (snaplen > res)
2225 snaplen = res;
2226
2227 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2228 goto drop_n_acct;
2229
2230 if (skb_shared(skb)) {
2231 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2232 if (nskb == NULL)
2233 goto drop_n_acct;
2234
2235 if (skb_head != skb->data) {
2236 skb->data = skb_head;
2237 skb->len = skb_len;
2238 }
2239 consume_skb(skb);
2240 skb = nskb;
2241 }
2242
2243 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2244
2245 sll = &PACKET_SKB_CB(skb)->sa.ll;
2246 sll->sll_hatype = dev->type;
2247 sll->sll_pkttype = skb->pkt_type;
2248 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2249 sll->sll_ifindex = orig_dev->ifindex;
2250 else
2251 sll->sll_ifindex = dev->ifindex;
2252
2253 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2254
2255 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2256 * Use their space for storing the original skb length.
2257 */
2258 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2259
2260 if (pskb_trim(skb, snaplen))
2261 goto drop_n_acct;
2262
2263 skb_set_owner_r(skb, sk);
2264 skb->dev = NULL;
2265 skb_dst_drop(skb);
2266
2267 /* drop conntrack reference */
2268 nf_reset_ct(skb);
2269
2270 spin_lock(&sk->sk_receive_queue.lock);
2271 po->stats.stats1.tp_packets++;
2272 sock_skb_set_dropcount(sk, skb);
2273 skb_clear_delivery_time(skb);
2274 __skb_queue_tail(&sk->sk_receive_queue, skb);
2275 spin_unlock(&sk->sk_receive_queue.lock);
2276 sk->sk_data_ready(sk);
2277 return 0;
2278
2279 drop_n_acct:
2280 atomic_inc(&po->tp_drops);
2281 atomic_inc(&sk->sk_drops);
2282 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2283
2284 drop_n_restore:
2285 if (skb_head != skb->data && skb_shared(skb)) {
2286 skb->data = skb_head;
2287 skb->len = skb_len;
2288 }
2289 drop:
2290 sk_skb_reason_drop(sk, skb, drop_reason);
2291 return 0;
2292 }
2293
tpacket_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)2294 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2295 struct packet_type *pt, struct net_device *orig_dev)
2296 {
2297 enum skb_drop_reason drop_reason = SKB_CONSUMED;
2298 struct sock *sk = NULL;
2299 struct packet_sock *po;
2300 struct sockaddr_ll *sll;
2301 union tpacket_uhdr h;
2302 u8 *skb_head = skb->data;
2303 int skb_len = skb->len;
2304 unsigned int snaplen, res;
2305 unsigned long status = TP_STATUS_USER;
2306 unsigned short macoff, hdrlen;
2307 unsigned int netoff;
2308 struct sk_buff *copy_skb = NULL;
2309 struct timespec64 ts;
2310 __u32 ts_status;
2311 unsigned int slot_id = 0;
2312 int vnet_hdr_sz = 0;
2313
2314 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2315 * We may add members to them until current aligned size without forcing
2316 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2317 */
2318 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2319 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2320
2321 if (skb->pkt_type == PACKET_LOOPBACK)
2322 goto drop;
2323
2324 sk = pt->af_packet_priv;
2325 po = pkt_sk(sk);
2326
2327 if (!net_eq(dev_net(dev), sock_net(sk)))
2328 goto drop;
2329
2330 if (dev_has_header(dev)) {
2331 if (sk->sk_type != SOCK_DGRAM)
2332 skb_push(skb, skb->data - skb_mac_header(skb));
2333 else if (skb->pkt_type == PACKET_OUTGOING) {
2334 /* Special case: outgoing packets have ll header at head */
2335 skb_pull(skb, skb_network_offset(skb));
2336 }
2337 }
2338
2339 snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2340
2341 res = run_filter(skb, sk, snaplen);
2342 if (!res)
2343 goto drop_n_restore;
2344
2345 /* If we are flooded, just give up */
2346 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2347 atomic_inc(&po->tp_drops);
2348 goto drop_n_restore;
2349 }
2350
2351 if (skb->ip_summed == CHECKSUM_PARTIAL)
2352 status |= TP_STATUS_CSUMNOTREADY;
2353 else if (skb->pkt_type != PACKET_OUTGOING &&
2354 skb_csum_unnecessary(skb))
2355 status |= TP_STATUS_CSUM_VALID;
2356 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2357 status |= TP_STATUS_GSO_TCP;
2358
2359 if (snaplen > res)
2360 snaplen = res;
2361
2362 if (sk->sk_type == SOCK_DGRAM) {
2363 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2364 po->tp_reserve;
2365 } else {
2366 unsigned int maclen = skb_network_offset(skb);
2367 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2368 (maclen < 16 ? 16 : maclen)) +
2369 po->tp_reserve;
2370 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2371 if (vnet_hdr_sz)
2372 netoff += vnet_hdr_sz;
2373 macoff = netoff - maclen;
2374 }
2375 if (netoff > USHRT_MAX) {
2376 atomic_inc(&po->tp_drops);
2377 goto drop_n_restore;
2378 }
2379 if (po->tp_version <= TPACKET_V2) {
2380 if (macoff + snaplen > po->rx_ring.frame_size) {
2381 if (READ_ONCE(po->copy_thresh) &&
2382 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2383 if (skb_shared(skb)) {
2384 copy_skb = skb_clone(skb, GFP_ATOMIC);
2385 } else {
2386 copy_skb = skb_get(skb);
2387 skb_head = skb->data;
2388 }
2389 if (copy_skb) {
2390 memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2391 sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2392 skb_set_owner_r(copy_skb, sk);
2393 }
2394 }
2395 snaplen = po->rx_ring.frame_size - macoff;
2396 if ((int)snaplen < 0) {
2397 snaplen = 0;
2398 vnet_hdr_sz = 0;
2399 }
2400 }
2401 } else if (unlikely(macoff + snaplen >
2402 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2403 u32 nval;
2404
2405 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2406 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2407 snaplen, nval, macoff);
2408 snaplen = nval;
2409 if (unlikely((int)snaplen < 0)) {
2410 snaplen = 0;
2411 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2412 vnet_hdr_sz = 0;
2413 }
2414 }
2415 spin_lock(&sk->sk_receive_queue.lock);
2416 h.raw = packet_current_rx_frame(po, skb,
2417 TP_STATUS_KERNEL, (macoff+snaplen));
2418 if (!h.raw)
2419 goto drop_n_account;
2420
2421 if (po->tp_version <= TPACKET_V2) {
2422 slot_id = po->rx_ring.head;
2423 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2424 goto drop_n_account;
2425 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2426 }
2427
2428 if (vnet_hdr_sz &&
2429 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2430 sizeof(struct virtio_net_hdr),
2431 vio_le(), true, 0)) {
2432 if (po->tp_version == TPACKET_V3)
2433 prb_clear_blk_fill_status(&po->rx_ring);
2434 goto drop_n_account;
2435 }
2436
2437 if (po->tp_version <= TPACKET_V2) {
2438 packet_increment_rx_head(po, &po->rx_ring);
2439 /*
2440 * LOSING will be reported till you read the stats,
2441 * because it's COR - Clear On Read.
2442 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2443 * at packet level.
2444 */
2445 if (atomic_read(&po->tp_drops))
2446 status |= TP_STATUS_LOSING;
2447 }
2448
2449 po->stats.stats1.tp_packets++;
2450 if (copy_skb) {
2451 status |= TP_STATUS_COPY;
2452 skb_clear_delivery_time(copy_skb);
2453 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2454 }
2455 spin_unlock(&sk->sk_receive_queue.lock);
2456
2457 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2458
2459 /* Always timestamp; prefer an existing software timestamp taken
2460 * closer to the time of capture.
2461 */
2462 ts_status = tpacket_get_timestamp(skb, &ts,
2463 READ_ONCE(po->tp_tstamp) |
2464 SOF_TIMESTAMPING_SOFTWARE);
2465 if (!ts_status)
2466 ktime_get_real_ts64(&ts);
2467
2468 status |= ts_status;
2469
2470 switch (po->tp_version) {
2471 case TPACKET_V1:
2472 h.h1->tp_len = skb->len;
2473 h.h1->tp_snaplen = snaplen;
2474 h.h1->tp_mac = macoff;
2475 h.h1->tp_net = netoff;
2476 h.h1->tp_sec = ts.tv_sec;
2477 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2478 hdrlen = sizeof(*h.h1);
2479 break;
2480 case TPACKET_V2:
2481 h.h2->tp_len = skb->len;
2482 h.h2->tp_snaplen = snaplen;
2483 h.h2->tp_mac = macoff;
2484 h.h2->tp_net = netoff;
2485 h.h2->tp_sec = ts.tv_sec;
2486 h.h2->tp_nsec = ts.tv_nsec;
2487 if (skb_vlan_tag_present(skb)) {
2488 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2489 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2490 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2491 } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
2492 h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
2493 h.h2->tp_vlan_tpid = ntohs(skb->protocol);
2494 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2495 } else {
2496 h.h2->tp_vlan_tci = 0;
2497 h.h2->tp_vlan_tpid = 0;
2498 }
2499 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2500 hdrlen = sizeof(*h.h2);
2501 break;
2502 case TPACKET_V3:
2503 /* tp_nxt_offset,vlan are already populated above.
2504 * So DONT clear those fields here
2505 */
2506 h.h3->tp_status |= status;
2507 h.h3->tp_len = skb->len;
2508 h.h3->tp_snaplen = snaplen;
2509 h.h3->tp_mac = macoff;
2510 h.h3->tp_net = netoff;
2511 h.h3->tp_sec = ts.tv_sec;
2512 h.h3->tp_nsec = ts.tv_nsec;
2513 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2514 hdrlen = sizeof(*h.h3);
2515 break;
2516 default:
2517 BUG();
2518 }
2519
2520 sll = h.raw + TPACKET_ALIGN(hdrlen);
2521 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2522 sll->sll_family = AF_PACKET;
2523 sll->sll_hatype = dev->type;
2524 sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
2525 vlan_get_protocol_dgram(skb) : skb->protocol;
2526 sll->sll_pkttype = skb->pkt_type;
2527 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2528 sll->sll_ifindex = orig_dev->ifindex;
2529 else
2530 sll->sll_ifindex = dev->ifindex;
2531
2532 smp_mb();
2533
2534 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2535 if (po->tp_version <= TPACKET_V2) {
2536 u8 *start, *end;
2537
2538 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2539 macoff + snaplen);
2540
2541 for (start = h.raw; start < end; start += PAGE_SIZE)
2542 flush_dcache_page(pgv_to_page(start));
2543 }
2544 smp_wmb();
2545 #endif
2546
2547 if (po->tp_version <= TPACKET_V2) {
2548 spin_lock(&sk->sk_receive_queue.lock);
2549 __packet_set_status(po, h.raw, status);
2550 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2551 spin_unlock(&sk->sk_receive_queue.lock);
2552 sk->sk_data_ready(sk);
2553 } else if (po->tp_version == TPACKET_V3) {
2554 prb_clear_blk_fill_status(&po->rx_ring);
2555 }
2556
2557 drop_n_restore:
2558 if (skb_head != skb->data && skb_shared(skb)) {
2559 skb->data = skb_head;
2560 skb->len = skb_len;
2561 }
2562 drop:
2563 sk_skb_reason_drop(sk, skb, drop_reason);
2564 return 0;
2565
2566 drop_n_account:
2567 spin_unlock(&sk->sk_receive_queue.lock);
2568 atomic_inc(&po->tp_drops);
2569 drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2570
2571 sk->sk_data_ready(sk);
2572 sk_skb_reason_drop(sk, copy_skb, drop_reason);
2573 goto drop_n_restore;
2574 }
2575
tpacket_destruct_skb(struct sk_buff * skb)2576 static void tpacket_destruct_skb(struct sk_buff *skb)
2577 {
2578 struct packet_sock *po = pkt_sk(skb->sk);
2579
2580 if (likely(po->tx_ring.pg_vec)) {
2581 void *ph;
2582 __u32 ts;
2583
2584 ph = skb_zcopy_get_nouarg(skb);
2585 packet_dec_pending(&po->tx_ring);
2586
2587 ts = __packet_set_timestamp(po, ph, skb);
2588 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2589
2590 complete(&po->skb_completion);
2591 }
2592
2593 sock_wfree(skb);
2594 }
2595
__packet_snd_vnet_parse(struct virtio_net_hdr * vnet_hdr,size_t len)2596 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2597 {
2598 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2599 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2600 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2601 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2602 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2603 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2604 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2605
2606 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2607 return -EINVAL;
2608
2609 return 0;
2610 }
2611
packet_snd_vnet_parse(struct msghdr * msg,size_t * len,struct virtio_net_hdr * vnet_hdr,int vnet_hdr_sz)2612 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2613 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2614 {
2615 int ret;
2616
2617 if (*len < vnet_hdr_sz)
2618 return -EINVAL;
2619 *len -= vnet_hdr_sz;
2620
2621 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2622 return -EFAULT;
2623
2624 ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2625 if (ret)
2626 return ret;
2627
2628 /* move iter to point to the start of mac header */
2629 if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2630 iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2631
2632 return 0;
2633 }
2634
tpacket_fill_skb(struct packet_sock * po,struct sk_buff * skb,void * frame,struct net_device * dev,void * data,int tp_len,__be16 proto,unsigned char * addr,int hlen,int copylen,const struct sockcm_cookie * sockc)2635 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2636 void *frame, struct net_device *dev, void *data, int tp_len,
2637 __be16 proto, unsigned char *addr, int hlen, int copylen,
2638 const struct sockcm_cookie *sockc)
2639 {
2640 union tpacket_uhdr ph;
2641 int to_write, offset, len, nr_frags, len_max;
2642 struct socket *sock = po->sk.sk_socket;
2643 struct page *page;
2644 int err;
2645
2646 ph.raw = frame;
2647
2648 skb->protocol = proto;
2649 skb->dev = dev;
2650 skb->priority = READ_ONCE(po->sk.sk_priority);
2651 skb->mark = READ_ONCE(po->sk.sk_mark);
2652 skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid);
2653 skb_setup_tx_timestamp(skb, sockc->tsflags);
2654 skb_zcopy_set_nouarg(skb, ph.raw);
2655
2656 skb_reserve(skb, hlen);
2657 skb_reset_network_header(skb);
2658
2659 to_write = tp_len;
2660
2661 if (sock->type == SOCK_DGRAM) {
2662 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2663 NULL, tp_len);
2664 if (unlikely(err < 0))
2665 return -EINVAL;
2666 } else if (copylen) {
2667 int hdrlen = min_t(int, copylen, tp_len);
2668
2669 skb_push(skb, dev->hard_header_len);
2670 skb_put(skb, copylen - dev->hard_header_len);
2671 err = skb_store_bits(skb, 0, data, hdrlen);
2672 if (unlikely(err))
2673 return err;
2674 if (!dev_validate_header(dev, skb->data, hdrlen))
2675 return -EINVAL;
2676
2677 data += hdrlen;
2678 to_write -= hdrlen;
2679 }
2680
2681 offset = offset_in_page(data);
2682 len_max = PAGE_SIZE - offset;
2683 len = ((to_write > len_max) ? len_max : to_write);
2684
2685 skb->data_len = to_write;
2686 skb->len += to_write;
2687 skb->truesize += to_write;
2688 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2689
2690 while (likely(to_write)) {
2691 nr_frags = skb_shinfo(skb)->nr_frags;
2692
2693 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2694 pr_err("Packet exceed the number of skb frags(%u)\n",
2695 (unsigned int)MAX_SKB_FRAGS);
2696 return -EFAULT;
2697 }
2698
2699 page = pgv_to_page(data);
2700 data += len;
2701 flush_dcache_page(page);
2702 get_page(page);
2703 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2704 to_write -= len;
2705 offset = 0;
2706 len_max = PAGE_SIZE;
2707 len = ((to_write > len_max) ? len_max : to_write);
2708 }
2709
2710 packet_parse_headers(skb, sock);
2711
2712 return tp_len;
2713 }
2714
tpacket_parse_header(struct packet_sock * po,void * frame,int size_max,void ** data)2715 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2716 int size_max, void **data)
2717 {
2718 union tpacket_uhdr ph;
2719 int tp_len, off;
2720
2721 ph.raw = frame;
2722
2723 switch (po->tp_version) {
2724 case TPACKET_V3:
2725 if (ph.h3->tp_next_offset != 0) {
2726 pr_warn_once("variable sized slot not supported");
2727 return -EINVAL;
2728 }
2729 tp_len = ph.h3->tp_len;
2730 break;
2731 case TPACKET_V2:
2732 tp_len = ph.h2->tp_len;
2733 break;
2734 default:
2735 tp_len = ph.h1->tp_len;
2736 break;
2737 }
2738 if (unlikely(tp_len > size_max)) {
2739 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2740 return -EMSGSIZE;
2741 }
2742
2743 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2744 int off_min, off_max;
2745
2746 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2747 off_max = po->tx_ring.frame_size - tp_len;
2748 if (po->sk.sk_type == SOCK_DGRAM) {
2749 switch (po->tp_version) {
2750 case TPACKET_V3:
2751 off = ph.h3->tp_net;
2752 break;
2753 case TPACKET_V2:
2754 off = ph.h2->tp_net;
2755 break;
2756 default:
2757 off = ph.h1->tp_net;
2758 break;
2759 }
2760 } else {
2761 switch (po->tp_version) {
2762 case TPACKET_V3:
2763 off = ph.h3->tp_mac;
2764 break;
2765 case TPACKET_V2:
2766 off = ph.h2->tp_mac;
2767 break;
2768 default:
2769 off = ph.h1->tp_mac;
2770 break;
2771 }
2772 }
2773 if (unlikely((off < off_min) || (off_max < off)))
2774 return -EINVAL;
2775 } else {
2776 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2777 }
2778
2779 *data = frame + off;
2780 return tp_len;
2781 }
2782
tpacket_snd(struct packet_sock * po,struct msghdr * msg)2783 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2784 {
2785 struct sk_buff *skb = NULL;
2786 struct net_device *dev;
2787 struct virtio_net_hdr *vnet_hdr = NULL;
2788 struct sockcm_cookie sockc;
2789 __be16 proto;
2790 int err, reserve = 0;
2791 void *ph;
2792 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2793 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2794 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2795 unsigned char *addr = NULL;
2796 int tp_len, size_max;
2797 void *data;
2798 int len_sum = 0;
2799 int status = TP_STATUS_AVAILABLE;
2800 int hlen, tlen, copylen = 0;
2801 long timeo = 0;
2802
2803 mutex_lock(&po->pg_vec_lock);
2804
2805 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2806 * we need to confirm it under protection of pg_vec_lock.
2807 */
2808 if (unlikely(!po->tx_ring.pg_vec)) {
2809 err = -EBUSY;
2810 goto out;
2811 }
2812 if (likely(saddr == NULL)) {
2813 dev = packet_cached_dev_get(po);
2814 proto = READ_ONCE(po->num);
2815 } else {
2816 err = -EINVAL;
2817 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2818 goto out;
2819 if (msg->msg_namelen < (saddr->sll_halen
2820 + offsetof(struct sockaddr_ll,
2821 sll_addr)))
2822 goto out;
2823 proto = saddr->sll_protocol;
2824 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2825 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2826 if (dev && msg->msg_namelen < dev->addr_len +
2827 offsetof(struct sockaddr_ll, sll_addr))
2828 goto out_put;
2829 addr = saddr->sll_addr;
2830 }
2831 }
2832
2833 err = -ENXIO;
2834 if (unlikely(dev == NULL))
2835 goto out;
2836 err = -ENETDOWN;
2837 if (unlikely(!(dev->flags & IFF_UP)))
2838 goto out_put;
2839
2840 sockcm_init(&sockc, &po->sk);
2841 if (msg->msg_controllen) {
2842 err = sock_cmsg_send(&po->sk, msg, &sockc);
2843 if (unlikely(err))
2844 goto out_put;
2845 }
2846
2847 if (po->sk.sk_socket->type == SOCK_RAW)
2848 reserve = dev->hard_header_len;
2849 size_max = po->tx_ring.frame_size
2850 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2851
2852 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2853 size_max = dev->mtu + reserve + VLAN_HLEN;
2854
2855 reinit_completion(&po->skb_completion);
2856
2857 do {
2858 ph = packet_current_frame(po, &po->tx_ring,
2859 TP_STATUS_SEND_REQUEST);
2860 if (unlikely(ph == NULL)) {
2861 if (need_wait && skb) {
2862 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2863 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2864 if (timeo <= 0) {
2865 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2866 goto out_put;
2867 }
2868 }
2869 /* check for additional frames */
2870 continue;
2871 }
2872
2873 skb = NULL;
2874 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2875 if (tp_len < 0)
2876 goto tpacket_error;
2877
2878 status = TP_STATUS_SEND_REQUEST;
2879 hlen = LL_RESERVED_SPACE(dev);
2880 tlen = dev->needed_tailroom;
2881 if (vnet_hdr_sz) {
2882 vnet_hdr = data;
2883 data += vnet_hdr_sz;
2884 tp_len -= vnet_hdr_sz;
2885 if (tp_len < 0 ||
2886 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2887 tp_len = -EINVAL;
2888 goto tpacket_error;
2889 }
2890 copylen = __virtio16_to_cpu(vio_le(),
2891 vnet_hdr->hdr_len);
2892 }
2893 copylen = max_t(int, copylen, dev->hard_header_len);
2894 skb = sock_alloc_send_skb(&po->sk,
2895 hlen + tlen + sizeof(struct sockaddr_ll) +
2896 (copylen - dev->hard_header_len),
2897 !need_wait, &err);
2898
2899 if (unlikely(skb == NULL)) {
2900 /* we assume the socket was initially writeable ... */
2901 if (likely(len_sum > 0))
2902 err = len_sum;
2903 goto out_status;
2904 }
2905 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2906 addr, hlen, copylen, &sockc);
2907 if (likely(tp_len >= 0) &&
2908 tp_len > dev->mtu + reserve &&
2909 !vnet_hdr_sz &&
2910 !packet_extra_vlan_len_allowed(dev, skb))
2911 tp_len = -EMSGSIZE;
2912
2913 if (unlikely(tp_len < 0)) {
2914 tpacket_error:
2915 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2916 __packet_set_status(po, ph,
2917 TP_STATUS_AVAILABLE);
2918 packet_increment_head(&po->tx_ring);
2919 kfree_skb(skb);
2920 continue;
2921 } else {
2922 status = TP_STATUS_WRONG_FORMAT;
2923 err = tp_len;
2924 goto out_status;
2925 }
2926 }
2927
2928 if (vnet_hdr_sz) {
2929 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2930 tp_len = -EINVAL;
2931 goto tpacket_error;
2932 }
2933 virtio_net_hdr_set_proto(skb, vnet_hdr);
2934 }
2935
2936 skb->destructor = tpacket_destruct_skb;
2937 __packet_set_status(po, ph, TP_STATUS_SENDING);
2938 packet_inc_pending(&po->tx_ring);
2939
2940 status = TP_STATUS_SEND_REQUEST;
2941 err = packet_xmit(po, skb);
2942 if (unlikely(err != 0)) {
2943 if (err > 0)
2944 err = net_xmit_errno(err);
2945 if (err && __packet_get_status(po, ph) ==
2946 TP_STATUS_AVAILABLE) {
2947 /* skb was destructed already */
2948 skb = NULL;
2949 goto out_status;
2950 }
2951 /*
2952 * skb was dropped but not destructed yet;
2953 * let's treat it like congestion or err < 0
2954 */
2955 err = 0;
2956 }
2957 packet_increment_head(&po->tx_ring);
2958 len_sum += tp_len;
2959 } while (likely((ph != NULL) ||
2960 /* Note: packet_read_pending() might be slow if we have
2961 * to call it as it's per_cpu variable, but in fast-path
2962 * we already short-circuit the loop with the first
2963 * condition, and luckily don't have to go that path
2964 * anyway.
2965 */
2966 (need_wait && packet_read_pending(&po->tx_ring))));
2967
2968 err = len_sum;
2969 goto out_put;
2970
2971 out_status:
2972 __packet_set_status(po, ph, status);
2973 kfree_skb(skb);
2974 out_put:
2975 dev_put(dev);
2976 out:
2977 mutex_unlock(&po->pg_vec_lock);
2978 return err;
2979 }
2980
packet_alloc_skb(struct sock * sk,size_t prepad,size_t reserve,size_t len,size_t linear,int noblock,int * err)2981 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2982 size_t reserve, size_t len,
2983 size_t linear, int noblock,
2984 int *err)
2985 {
2986 struct sk_buff *skb;
2987
2988 /* Under a page? Don't bother with paged skb. */
2989 if (prepad + len < PAGE_SIZE || !linear)
2990 linear = len;
2991
2992 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2993 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2994 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2995 err, PAGE_ALLOC_COSTLY_ORDER);
2996 if (!skb)
2997 return NULL;
2998
2999 skb_reserve(skb, reserve);
3000 skb_put(skb, linear);
3001 skb->data_len = len - linear;
3002 skb->len += len - linear;
3003
3004 return skb;
3005 }
3006
packet_snd(struct socket * sock,struct msghdr * msg,size_t len)3007 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3008 {
3009 struct sock *sk = sock->sk;
3010 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
3011 struct sk_buff *skb;
3012 struct net_device *dev;
3013 __be16 proto;
3014 unsigned char *addr = NULL;
3015 int err, reserve = 0;
3016 struct sockcm_cookie sockc;
3017 struct virtio_net_hdr vnet_hdr = { 0 };
3018 int offset = 0;
3019 struct packet_sock *po = pkt_sk(sk);
3020 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
3021 int hlen, tlen, linear;
3022 int extra_len = 0;
3023
3024 /*
3025 * Get and verify the address.
3026 */
3027
3028 if (likely(saddr == NULL)) {
3029 dev = packet_cached_dev_get(po);
3030 proto = READ_ONCE(po->num);
3031 } else {
3032 err = -EINVAL;
3033 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
3034 goto out;
3035 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
3036 goto out;
3037 proto = saddr->sll_protocol;
3038 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3039 if (sock->type == SOCK_DGRAM) {
3040 if (dev && msg->msg_namelen < dev->addr_len +
3041 offsetof(struct sockaddr_ll, sll_addr))
3042 goto out_unlock;
3043 addr = saddr->sll_addr;
3044 }
3045 }
3046
3047 err = -ENXIO;
3048 if (unlikely(dev == NULL))
3049 goto out_unlock;
3050 err = -ENETDOWN;
3051 if (unlikely(!(dev->flags & IFF_UP)))
3052 goto out_unlock;
3053
3054 sockcm_init(&sockc, sk);
3055 sockc.mark = READ_ONCE(sk->sk_mark);
3056 if (msg->msg_controllen) {
3057 err = sock_cmsg_send(sk, msg, &sockc);
3058 if (unlikely(err))
3059 goto out_unlock;
3060 }
3061
3062 if (sock->type == SOCK_RAW)
3063 reserve = dev->hard_header_len;
3064 if (vnet_hdr_sz) {
3065 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3066 if (err)
3067 goto out_unlock;
3068 }
3069
3070 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3071 if (!netif_supports_nofcs(dev)) {
3072 err = -EPROTONOSUPPORT;
3073 goto out_unlock;
3074 }
3075 extra_len = 4; /* We're doing our own CRC */
3076 }
3077
3078 err = -EMSGSIZE;
3079 if (!vnet_hdr.gso_type &&
3080 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3081 goto out_unlock;
3082
3083 err = -ENOBUFS;
3084 hlen = LL_RESERVED_SPACE(dev);
3085 tlen = dev->needed_tailroom;
3086 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3087 linear = max(linear, min_t(int, len, dev->hard_header_len));
3088 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3089 msg->msg_flags & MSG_DONTWAIT, &err);
3090 if (skb == NULL)
3091 goto out_unlock;
3092
3093 skb_reset_network_header(skb);
3094
3095 err = -EINVAL;
3096 if (sock->type == SOCK_DGRAM) {
3097 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3098 if (unlikely(offset < 0))
3099 goto out_free;
3100 } else if (reserve) {
3101 skb_reserve(skb, -reserve);
3102 if (len < reserve + sizeof(struct ipv6hdr) &&
3103 dev->min_header_len != dev->hard_header_len)
3104 skb_reset_network_header(skb);
3105 }
3106
3107 /* Returns -EFAULT on error */
3108 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3109 if (err)
3110 goto out_free;
3111
3112 if ((sock->type == SOCK_RAW &&
3113 !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3114 err = -EINVAL;
3115 goto out_free;
3116 }
3117
3118 skb_setup_tx_timestamp(skb, sockc.tsflags);
3119
3120 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3121 !packet_extra_vlan_len_allowed(dev, skb)) {
3122 err = -EMSGSIZE;
3123 goto out_free;
3124 }
3125
3126 skb->protocol = proto;
3127 skb->dev = dev;
3128 skb->priority = READ_ONCE(sk->sk_priority);
3129 skb->mark = sockc.mark;
3130 skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
3131
3132 if (unlikely(extra_len == 4))
3133 skb->no_fcs = 1;
3134
3135 packet_parse_headers(skb, sock);
3136
3137 if (vnet_hdr_sz) {
3138 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3139 if (err)
3140 goto out_free;
3141 len += vnet_hdr_sz;
3142 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3143 }
3144
3145 err = packet_xmit(po, skb);
3146
3147 if (unlikely(err != 0)) {
3148 if (err > 0)
3149 err = net_xmit_errno(err);
3150 if (err)
3151 goto out_unlock;
3152 }
3153
3154 dev_put(dev);
3155
3156 return len;
3157
3158 out_free:
3159 kfree_skb(skb);
3160 out_unlock:
3161 dev_put(dev);
3162 out:
3163 return err;
3164 }
3165
packet_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)3166 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3167 {
3168 struct sock *sk = sock->sk;
3169 struct packet_sock *po = pkt_sk(sk);
3170
3171 /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3172 * tpacket_snd() will redo the check safely.
3173 */
3174 if (data_race(po->tx_ring.pg_vec))
3175 return tpacket_snd(po, msg);
3176
3177 return packet_snd(sock, msg, len);
3178 }
3179
3180 /*
3181 * Close a PACKET socket. This is fairly simple. We immediately go
3182 * to 'closed' state and remove our protocol entry in the device list.
3183 */
3184
packet_release(struct socket * sock)3185 static int packet_release(struct socket *sock)
3186 {
3187 struct sock *sk = sock->sk;
3188 struct packet_sock *po;
3189 struct packet_fanout *f;
3190 struct net *net;
3191 union tpacket_req_u req_u;
3192
3193 if (!sk)
3194 return 0;
3195
3196 net = sock_net(sk);
3197 po = pkt_sk(sk);
3198
3199 mutex_lock(&net->packet.sklist_lock);
3200 sk_del_node_init_rcu(sk);
3201 mutex_unlock(&net->packet.sklist_lock);
3202
3203 sock_prot_inuse_add(net, sk->sk_prot, -1);
3204
3205 spin_lock(&po->bind_lock);
3206 unregister_prot_hook(sk, false);
3207 packet_cached_dev_reset(po);
3208
3209 if (po->prot_hook.dev) {
3210 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3211 po->prot_hook.dev = NULL;
3212 }
3213 spin_unlock(&po->bind_lock);
3214
3215 packet_flush_mclist(sk);
3216
3217 lock_sock(sk);
3218 if (po->rx_ring.pg_vec) {
3219 memset(&req_u, 0, sizeof(req_u));
3220 packet_set_ring(sk, &req_u, 1, 0);
3221 }
3222
3223 if (po->tx_ring.pg_vec) {
3224 memset(&req_u, 0, sizeof(req_u));
3225 packet_set_ring(sk, &req_u, 1, 1);
3226 }
3227 release_sock(sk);
3228
3229 f = fanout_release(sk);
3230
3231 synchronize_net();
3232
3233 kfree(po->rollover);
3234 if (f) {
3235 fanout_release_data(f);
3236 kvfree(f);
3237 }
3238 /*
3239 * Now the socket is dead. No more input will appear.
3240 */
3241 sock_orphan(sk);
3242 sock->sk = NULL;
3243
3244 /* Purge queues */
3245
3246 skb_queue_purge(&sk->sk_receive_queue);
3247 packet_free_pending(po);
3248
3249 sock_put(sk);
3250 return 0;
3251 }
3252
3253 /*
3254 * Attach a packet hook.
3255 */
3256
packet_do_bind(struct sock * sk,const char * name,int ifindex,__be16 proto)3257 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3258 __be16 proto)
3259 {
3260 struct packet_sock *po = pkt_sk(sk);
3261 struct net_device *dev = NULL;
3262 bool unlisted = false;
3263 bool need_rehook;
3264 int ret = 0;
3265
3266 lock_sock(sk);
3267 spin_lock(&po->bind_lock);
3268 if (!proto)
3269 proto = po->num;
3270
3271 rcu_read_lock();
3272
3273 if (po->fanout) {
3274 ret = -EINVAL;
3275 goto out_unlock;
3276 }
3277
3278 if (name) {
3279 dev = dev_get_by_name_rcu(sock_net(sk), name);
3280 if (!dev) {
3281 ret = -ENODEV;
3282 goto out_unlock;
3283 }
3284 } else if (ifindex) {
3285 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3286 if (!dev) {
3287 ret = -ENODEV;
3288 goto out_unlock;
3289 }
3290 }
3291
3292 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3293
3294 if (need_rehook) {
3295 dev_hold(dev);
3296 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3297 rcu_read_unlock();
3298 /* prevents packet_notifier() from calling
3299 * register_prot_hook()
3300 */
3301 WRITE_ONCE(po->num, 0);
3302 __unregister_prot_hook(sk, true);
3303 rcu_read_lock();
3304 if (dev)
3305 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3306 dev->ifindex);
3307 }
3308
3309 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3310 WRITE_ONCE(po->num, proto);
3311 po->prot_hook.type = proto;
3312
3313 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3314
3315 if (unlikely(unlisted)) {
3316 po->prot_hook.dev = NULL;
3317 WRITE_ONCE(po->ifindex, -1);
3318 packet_cached_dev_reset(po);
3319 } else {
3320 netdev_hold(dev, &po->prot_hook.dev_tracker,
3321 GFP_ATOMIC);
3322 po->prot_hook.dev = dev;
3323 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3324 packet_cached_dev_assign(po, dev);
3325 }
3326 dev_put(dev);
3327 }
3328
3329 if (proto == 0 || !need_rehook)
3330 goto out_unlock;
3331
3332 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3333 register_prot_hook(sk);
3334 } else {
3335 sk->sk_err = ENETDOWN;
3336 if (!sock_flag(sk, SOCK_DEAD))
3337 sk_error_report(sk);
3338 }
3339
3340 out_unlock:
3341 rcu_read_unlock();
3342 spin_unlock(&po->bind_lock);
3343 release_sock(sk);
3344 return ret;
3345 }
3346
3347 /*
3348 * Bind a packet socket to a device
3349 */
3350
packet_bind_spkt(struct socket * sock,struct sockaddr * uaddr,int addr_len)3351 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3352 int addr_len)
3353 {
3354 struct sock *sk = sock->sk;
3355 char name[sizeof(uaddr->sa_data_min) + 1];
3356
3357 /*
3358 * Check legality
3359 */
3360
3361 if (addr_len != sizeof(struct sockaddr))
3362 return -EINVAL;
3363 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3364 * zero-terminated.
3365 */
3366 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3367 name[sizeof(uaddr->sa_data_min)] = 0;
3368
3369 return packet_do_bind(sk, name, 0, 0);
3370 }
3371
packet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)3372 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3373 {
3374 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3375 struct sock *sk = sock->sk;
3376
3377 /*
3378 * Check legality
3379 */
3380
3381 if (addr_len < sizeof(struct sockaddr_ll))
3382 return -EINVAL;
3383 if (sll->sll_family != AF_PACKET)
3384 return -EINVAL;
3385
3386 return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
3387 }
3388
3389 static struct proto packet_proto = {
3390 .name = "PACKET",
3391 .owner = THIS_MODULE,
3392 .obj_size = sizeof(struct packet_sock),
3393 };
3394
3395 /*
3396 * Create a packet of type SOCK_PACKET.
3397 */
3398
packet_create(struct net * net,struct socket * sock,int protocol,int kern)3399 static int packet_create(struct net *net, struct socket *sock, int protocol,
3400 int kern)
3401 {
3402 struct sock *sk;
3403 struct packet_sock *po;
3404 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3405 int err;
3406
3407 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3408 return -EPERM;
3409 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3410 sock->type != SOCK_PACKET)
3411 return -ESOCKTNOSUPPORT;
3412
3413 sock->state = SS_UNCONNECTED;
3414
3415 err = -ENOBUFS;
3416 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3417 if (sk == NULL)
3418 goto out;
3419
3420 sock->ops = &packet_ops;
3421 if (sock->type == SOCK_PACKET)
3422 sock->ops = &packet_ops_spkt;
3423
3424 sock_init_data(sock, sk);
3425
3426 po = pkt_sk(sk);
3427 init_completion(&po->skb_completion);
3428 sk->sk_family = PF_PACKET;
3429 po->num = proto;
3430
3431 err = packet_alloc_pending(po);
3432 if (err)
3433 goto out2;
3434
3435 packet_cached_dev_reset(po);
3436
3437 sk->sk_destruct = packet_sock_destruct;
3438
3439 /*
3440 * Attach a protocol block
3441 */
3442
3443 spin_lock_init(&po->bind_lock);
3444 mutex_init(&po->pg_vec_lock);
3445 po->rollover = NULL;
3446 po->prot_hook.func = packet_rcv;
3447
3448 if (sock->type == SOCK_PACKET)
3449 po->prot_hook.func = packet_rcv_spkt;
3450
3451 po->prot_hook.af_packet_priv = sk;
3452 po->prot_hook.af_packet_net = sock_net(sk);
3453
3454 if (proto) {
3455 po->prot_hook.type = proto;
3456 __register_prot_hook(sk);
3457 }
3458
3459 mutex_lock(&net->packet.sklist_lock);
3460 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3461 mutex_unlock(&net->packet.sklist_lock);
3462
3463 sock_prot_inuse_add(net, &packet_proto, 1);
3464
3465 return 0;
3466 out2:
3467 sk_free(sk);
3468 out:
3469 return err;
3470 }
3471
3472 /*
3473 * Pull a packet from our receive queue and hand it to the user.
3474 * If necessary we block.
3475 */
3476
packet_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)3477 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3478 int flags)
3479 {
3480 struct sock *sk = sock->sk;
3481 struct sk_buff *skb;
3482 int copied, err;
3483 int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3484 unsigned int origlen = 0;
3485
3486 err = -EINVAL;
3487 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3488 goto out;
3489
3490 #if 0
3491 /* What error should we return now? EUNATTACH? */
3492 if (pkt_sk(sk)->ifindex < 0)
3493 return -ENODEV;
3494 #endif
3495
3496 if (flags & MSG_ERRQUEUE) {
3497 err = sock_recv_errqueue(sk, msg, len,
3498 SOL_PACKET, PACKET_TX_TIMESTAMP);
3499 goto out;
3500 }
3501
3502 /*
3503 * Call the generic datagram receiver. This handles all sorts
3504 * of horrible races and re-entrancy so we can forget about it
3505 * in the protocol layers.
3506 *
3507 * Now it will return ENETDOWN, if device have just gone down,
3508 * but then it will block.
3509 */
3510
3511 skb = skb_recv_datagram(sk, flags, &err);
3512
3513 /*
3514 * An error occurred so return it. Because skb_recv_datagram()
3515 * handles the blocking we don't see and worry about blocking
3516 * retries.
3517 */
3518
3519 if (skb == NULL)
3520 goto out;
3521
3522 packet_rcv_try_clear_pressure(pkt_sk(sk));
3523
3524 if (vnet_hdr_len) {
3525 err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3526 if (err)
3527 goto out_free;
3528 }
3529
3530 /* You lose any data beyond the buffer you gave. If it worries
3531 * a user program they can ask the device for its MTU
3532 * anyway.
3533 */
3534 copied = skb->len;
3535 if (copied > len) {
3536 copied = len;
3537 msg->msg_flags |= MSG_TRUNC;
3538 }
3539
3540 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3541 if (err)
3542 goto out_free;
3543
3544 if (sock->type != SOCK_PACKET) {
3545 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3546
3547 /* Original length was stored in sockaddr_ll fields */
3548 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3549 sll->sll_family = AF_PACKET;
3550 sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
3551 vlan_get_protocol_dgram(skb) : skb->protocol;
3552 }
3553
3554 sock_recv_cmsgs(msg, sk, skb);
3555
3556 if (msg->msg_name) {
3557 const size_t max_len = min(sizeof(skb->cb),
3558 sizeof(struct sockaddr_storage));
3559 int copy_len;
3560
3561 /* If the address length field is there to be filled
3562 * in, we fill it in now.
3563 */
3564 if (sock->type == SOCK_PACKET) {
3565 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3566 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3567 copy_len = msg->msg_namelen;
3568 } else {
3569 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3570
3571 msg->msg_namelen = sll->sll_halen +
3572 offsetof(struct sockaddr_ll, sll_addr);
3573 copy_len = msg->msg_namelen;
3574 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3575 memset(msg->msg_name +
3576 offsetof(struct sockaddr_ll, sll_addr),
3577 0, sizeof(sll->sll_addr));
3578 msg->msg_namelen = sizeof(struct sockaddr_ll);
3579 }
3580 }
3581 if (WARN_ON_ONCE(copy_len > max_len)) {
3582 copy_len = max_len;
3583 msg->msg_namelen = copy_len;
3584 }
3585 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3586 }
3587
3588 if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3589 struct tpacket_auxdata aux;
3590
3591 aux.tp_status = TP_STATUS_USER;
3592 if (skb->ip_summed == CHECKSUM_PARTIAL)
3593 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3594 else if (skb->pkt_type != PACKET_OUTGOING &&
3595 skb_csum_unnecessary(skb))
3596 aux.tp_status |= TP_STATUS_CSUM_VALID;
3597 if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3598 aux.tp_status |= TP_STATUS_GSO_TCP;
3599
3600 aux.tp_len = origlen;
3601 aux.tp_snaplen = skb->len;
3602 aux.tp_mac = 0;
3603 aux.tp_net = skb_network_offset(skb);
3604 if (skb_vlan_tag_present(skb)) {
3605 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3606 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3607 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3608 } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
3609 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3610 struct net_device *dev;
3611
3612 rcu_read_lock();
3613 dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
3614 if (dev) {
3615 aux.tp_vlan_tci = vlan_get_tci(skb, dev);
3616 aux.tp_vlan_tpid = ntohs(skb->protocol);
3617 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3618 } else {
3619 aux.tp_vlan_tci = 0;
3620 aux.tp_vlan_tpid = 0;
3621 }
3622 rcu_read_unlock();
3623 } else {
3624 aux.tp_vlan_tci = 0;
3625 aux.tp_vlan_tpid = 0;
3626 }
3627 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3628 }
3629
3630 /*
3631 * Free or return the buffer as appropriate. Again this
3632 * hides all the races and re-entrancy issues from us.
3633 */
3634 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3635
3636 out_free:
3637 skb_free_datagram(sk, skb);
3638 out:
3639 return err;
3640 }
3641
packet_getname_spkt(struct socket * sock,struct sockaddr * uaddr,int peer)3642 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3643 int peer)
3644 {
3645 struct net_device *dev;
3646 struct sock *sk = sock->sk;
3647
3648 if (peer)
3649 return -EOPNOTSUPP;
3650
3651 uaddr->sa_family = AF_PACKET;
3652 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3653 rcu_read_lock();
3654 dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3655 if (dev)
3656 strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3657 rcu_read_unlock();
3658
3659 return sizeof(*uaddr);
3660 }
3661
packet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)3662 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3663 int peer)
3664 {
3665 struct net_device *dev;
3666 struct sock *sk = sock->sk;
3667 struct packet_sock *po = pkt_sk(sk);
3668 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3669 int ifindex;
3670
3671 if (peer)
3672 return -EOPNOTSUPP;
3673
3674 ifindex = READ_ONCE(po->ifindex);
3675 sll->sll_family = AF_PACKET;
3676 sll->sll_ifindex = ifindex;
3677 sll->sll_protocol = READ_ONCE(po->num);
3678 sll->sll_pkttype = 0;
3679 rcu_read_lock();
3680 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3681 if (dev) {
3682 sll->sll_hatype = dev->type;
3683 sll->sll_halen = dev->addr_len;
3684
3685 /* Let __fortify_memcpy_chk() know the actual buffer size. */
3686 memcpy(((struct sockaddr_storage *)sll)->__data +
3687 offsetof(struct sockaddr_ll, sll_addr) -
3688 offsetofend(struct sockaddr_ll, sll_family),
3689 dev->dev_addr, dev->addr_len);
3690 } else {
3691 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3692 sll->sll_halen = 0;
3693 }
3694 rcu_read_unlock();
3695
3696 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3697 }
3698
packet_dev_mc(struct net_device * dev,struct packet_mclist * i,int what)3699 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3700 int what)
3701 {
3702 switch (i->type) {
3703 case PACKET_MR_MULTICAST:
3704 if (i->alen != dev->addr_len)
3705 return -EINVAL;
3706 if (what > 0)
3707 return dev_mc_add(dev, i->addr);
3708 else
3709 return dev_mc_del(dev, i->addr);
3710 break;
3711 case PACKET_MR_PROMISC:
3712 return dev_set_promiscuity(dev, what);
3713 case PACKET_MR_ALLMULTI:
3714 return dev_set_allmulti(dev, what);
3715 case PACKET_MR_UNICAST:
3716 if (i->alen != dev->addr_len)
3717 return -EINVAL;
3718 if (what > 0)
3719 return dev_uc_add(dev, i->addr);
3720 else
3721 return dev_uc_del(dev, i->addr);
3722 break;
3723 default:
3724 break;
3725 }
3726 return 0;
3727 }
3728
packet_dev_mclist_delete(struct net_device * dev,struct packet_mclist ** mlp)3729 static void packet_dev_mclist_delete(struct net_device *dev,
3730 struct packet_mclist **mlp)
3731 {
3732 struct packet_mclist *ml;
3733
3734 while ((ml = *mlp) != NULL) {
3735 if (ml->ifindex == dev->ifindex) {
3736 packet_dev_mc(dev, ml, -1);
3737 *mlp = ml->next;
3738 kfree(ml);
3739 } else
3740 mlp = &ml->next;
3741 }
3742 }
3743
packet_mc_add(struct sock * sk,struct packet_mreq_max * mreq)3744 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3745 {
3746 struct packet_sock *po = pkt_sk(sk);
3747 struct packet_mclist *ml, *i;
3748 struct net_device *dev;
3749 int err;
3750
3751 rtnl_lock();
3752
3753 err = -ENODEV;
3754 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3755 if (!dev)
3756 goto done;
3757
3758 err = -EINVAL;
3759 if (mreq->mr_alen > dev->addr_len)
3760 goto done;
3761
3762 err = -ENOBUFS;
3763 i = kmalloc(sizeof(*i), GFP_KERNEL);
3764 if (i == NULL)
3765 goto done;
3766
3767 err = 0;
3768 for (ml = po->mclist; ml; ml = ml->next) {
3769 if (ml->ifindex == mreq->mr_ifindex &&
3770 ml->type == mreq->mr_type &&
3771 ml->alen == mreq->mr_alen &&
3772 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3773 ml->count++;
3774 /* Free the new element ... */
3775 kfree(i);
3776 goto done;
3777 }
3778 }
3779
3780 i->type = mreq->mr_type;
3781 i->ifindex = mreq->mr_ifindex;
3782 i->alen = mreq->mr_alen;
3783 memcpy(i->addr, mreq->mr_address, i->alen);
3784 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3785 i->count = 1;
3786 i->next = po->mclist;
3787 po->mclist = i;
3788 err = packet_dev_mc(dev, i, 1);
3789 if (err) {
3790 po->mclist = i->next;
3791 kfree(i);
3792 }
3793
3794 done:
3795 rtnl_unlock();
3796 return err;
3797 }
3798
packet_mc_drop(struct sock * sk,struct packet_mreq_max * mreq)3799 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3800 {
3801 struct packet_mclist *ml, **mlp;
3802
3803 rtnl_lock();
3804
3805 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3806 if (ml->ifindex == mreq->mr_ifindex &&
3807 ml->type == mreq->mr_type &&
3808 ml->alen == mreq->mr_alen &&
3809 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3810 if (--ml->count == 0) {
3811 struct net_device *dev;
3812 *mlp = ml->next;
3813 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3814 if (dev)
3815 packet_dev_mc(dev, ml, -1);
3816 kfree(ml);
3817 }
3818 break;
3819 }
3820 }
3821 rtnl_unlock();
3822 return 0;
3823 }
3824
packet_flush_mclist(struct sock * sk)3825 static void packet_flush_mclist(struct sock *sk)
3826 {
3827 struct packet_sock *po = pkt_sk(sk);
3828 struct packet_mclist *ml;
3829
3830 if (!po->mclist)
3831 return;
3832
3833 rtnl_lock();
3834 while ((ml = po->mclist) != NULL) {
3835 struct net_device *dev;
3836
3837 po->mclist = ml->next;
3838 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3839 if (dev != NULL)
3840 packet_dev_mc(dev, ml, -1);
3841 kfree(ml);
3842 }
3843 rtnl_unlock();
3844 }
3845
3846 static int
packet_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3847 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3848 unsigned int optlen)
3849 {
3850 struct sock *sk = sock->sk;
3851 struct packet_sock *po = pkt_sk(sk);
3852 int ret;
3853
3854 if (level != SOL_PACKET)
3855 return -ENOPROTOOPT;
3856
3857 switch (optname) {
3858 case PACKET_ADD_MEMBERSHIP:
3859 case PACKET_DROP_MEMBERSHIP:
3860 {
3861 struct packet_mreq_max mreq;
3862 int len = optlen;
3863 memset(&mreq, 0, sizeof(mreq));
3864 if (len < sizeof(struct packet_mreq))
3865 return -EINVAL;
3866 if (len > sizeof(mreq))
3867 len = sizeof(mreq);
3868 if (copy_from_sockptr(&mreq, optval, len))
3869 return -EFAULT;
3870 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3871 return -EINVAL;
3872 if (optname == PACKET_ADD_MEMBERSHIP)
3873 ret = packet_mc_add(sk, &mreq);
3874 else
3875 ret = packet_mc_drop(sk, &mreq);
3876 return ret;
3877 }
3878
3879 case PACKET_RX_RING:
3880 case PACKET_TX_RING:
3881 {
3882 union tpacket_req_u req_u;
3883
3884 ret = -EINVAL;
3885 lock_sock(sk);
3886 switch (po->tp_version) {
3887 case TPACKET_V1:
3888 case TPACKET_V2:
3889 if (optlen < sizeof(req_u.req))
3890 break;
3891 ret = copy_from_sockptr(&req_u.req, optval,
3892 sizeof(req_u.req)) ?
3893 -EINVAL : 0;
3894 break;
3895 case TPACKET_V3:
3896 default:
3897 if (optlen < sizeof(req_u.req3))
3898 break;
3899 ret = copy_from_sockptr(&req_u.req3, optval,
3900 sizeof(req_u.req3)) ?
3901 -EINVAL : 0;
3902 break;
3903 }
3904 if (!ret)
3905 ret = packet_set_ring(sk, &req_u, 0,
3906 optname == PACKET_TX_RING);
3907 release_sock(sk);
3908 return ret;
3909 }
3910 case PACKET_COPY_THRESH:
3911 {
3912 int val;
3913
3914 if (optlen != sizeof(val))
3915 return -EINVAL;
3916 if (copy_from_sockptr(&val, optval, sizeof(val)))
3917 return -EFAULT;
3918
3919 WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
3920 return 0;
3921 }
3922 case PACKET_VERSION:
3923 {
3924 int val;
3925
3926 if (optlen != sizeof(val))
3927 return -EINVAL;
3928 if (copy_from_sockptr(&val, optval, sizeof(val)))
3929 return -EFAULT;
3930 switch (val) {
3931 case TPACKET_V1:
3932 case TPACKET_V2:
3933 case TPACKET_V3:
3934 break;
3935 default:
3936 return -EINVAL;
3937 }
3938 lock_sock(sk);
3939 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3940 ret = -EBUSY;
3941 } else {
3942 po->tp_version = val;
3943 ret = 0;
3944 }
3945 release_sock(sk);
3946 return ret;
3947 }
3948 case PACKET_RESERVE:
3949 {
3950 unsigned int val;
3951
3952 if (optlen != sizeof(val))
3953 return -EINVAL;
3954 if (copy_from_sockptr(&val, optval, sizeof(val)))
3955 return -EFAULT;
3956 if (val > INT_MAX)
3957 return -EINVAL;
3958 lock_sock(sk);
3959 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3960 ret = -EBUSY;
3961 } else {
3962 po->tp_reserve = val;
3963 ret = 0;
3964 }
3965 release_sock(sk);
3966 return ret;
3967 }
3968 case PACKET_LOSS:
3969 {
3970 unsigned int val;
3971
3972 if (optlen != sizeof(val))
3973 return -EINVAL;
3974 if (copy_from_sockptr(&val, optval, sizeof(val)))
3975 return -EFAULT;
3976
3977 lock_sock(sk);
3978 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3979 ret = -EBUSY;
3980 } else {
3981 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3982 ret = 0;
3983 }
3984 release_sock(sk);
3985 return ret;
3986 }
3987 case PACKET_AUXDATA:
3988 {
3989 int val;
3990
3991 if (optlen < sizeof(val))
3992 return -EINVAL;
3993 if (copy_from_sockptr(&val, optval, sizeof(val)))
3994 return -EFAULT;
3995
3996 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3997 return 0;
3998 }
3999 case PACKET_ORIGDEV:
4000 {
4001 int val;
4002
4003 if (optlen < sizeof(val))
4004 return -EINVAL;
4005 if (copy_from_sockptr(&val, optval, sizeof(val)))
4006 return -EFAULT;
4007
4008 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
4009 return 0;
4010 }
4011 case PACKET_VNET_HDR:
4012 case PACKET_VNET_HDR_SZ:
4013 {
4014 int val, hdr_len;
4015
4016 if (sock->type != SOCK_RAW)
4017 return -EINVAL;
4018 if (optlen < sizeof(val))
4019 return -EINVAL;
4020 if (copy_from_sockptr(&val, optval, sizeof(val)))
4021 return -EFAULT;
4022
4023 if (optname == PACKET_VNET_HDR_SZ) {
4024 if (val && val != sizeof(struct virtio_net_hdr) &&
4025 val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
4026 return -EINVAL;
4027 hdr_len = val;
4028 } else {
4029 hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
4030 }
4031 lock_sock(sk);
4032 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4033 ret = -EBUSY;
4034 } else {
4035 WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
4036 ret = 0;
4037 }
4038 release_sock(sk);
4039 return ret;
4040 }
4041 case PACKET_TIMESTAMP:
4042 {
4043 int val;
4044
4045 if (optlen != sizeof(val))
4046 return -EINVAL;
4047 if (copy_from_sockptr(&val, optval, sizeof(val)))
4048 return -EFAULT;
4049
4050 WRITE_ONCE(po->tp_tstamp, val);
4051 return 0;
4052 }
4053 case PACKET_FANOUT:
4054 {
4055 struct fanout_args args = { 0 };
4056
4057 if (optlen != sizeof(int) && optlen != sizeof(args))
4058 return -EINVAL;
4059 if (copy_from_sockptr(&args, optval, optlen))
4060 return -EFAULT;
4061
4062 return fanout_add(sk, &args);
4063 }
4064 case PACKET_FANOUT_DATA:
4065 {
4066 /* Paired with the WRITE_ONCE() in fanout_add() */
4067 if (!READ_ONCE(po->fanout))
4068 return -EINVAL;
4069
4070 return fanout_set_data(po, optval, optlen);
4071 }
4072 case PACKET_IGNORE_OUTGOING:
4073 {
4074 int val;
4075
4076 if (optlen != sizeof(val))
4077 return -EINVAL;
4078 if (copy_from_sockptr(&val, optval, sizeof(val)))
4079 return -EFAULT;
4080 if (val < 0 || val > 1)
4081 return -EINVAL;
4082
4083 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4084 return 0;
4085 }
4086 case PACKET_TX_HAS_OFF:
4087 {
4088 unsigned int val;
4089
4090 if (optlen != sizeof(val))
4091 return -EINVAL;
4092 if (copy_from_sockptr(&val, optval, sizeof(val)))
4093 return -EFAULT;
4094
4095 lock_sock(sk);
4096 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4097 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4098
4099 release_sock(sk);
4100 return 0;
4101 }
4102 case PACKET_QDISC_BYPASS:
4103 {
4104 int val;
4105
4106 if (optlen != sizeof(val))
4107 return -EINVAL;
4108 if (copy_from_sockptr(&val, optval, sizeof(val)))
4109 return -EFAULT;
4110
4111 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4112 return 0;
4113 }
4114 default:
4115 return -ENOPROTOOPT;
4116 }
4117 }
4118
packet_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)4119 static int packet_getsockopt(struct socket *sock, int level, int optname,
4120 char __user *optval, int __user *optlen)
4121 {
4122 int len;
4123 int val, lv = sizeof(val);
4124 struct sock *sk = sock->sk;
4125 struct packet_sock *po = pkt_sk(sk);
4126 void *data = &val;
4127 union tpacket_stats_u st;
4128 struct tpacket_rollover_stats rstats;
4129 int drops;
4130
4131 if (level != SOL_PACKET)
4132 return -ENOPROTOOPT;
4133
4134 if (get_user(len, optlen))
4135 return -EFAULT;
4136
4137 if (len < 0)
4138 return -EINVAL;
4139
4140 switch (optname) {
4141 case PACKET_STATISTICS:
4142 spin_lock_bh(&sk->sk_receive_queue.lock);
4143 memcpy(&st, &po->stats, sizeof(st));
4144 memset(&po->stats, 0, sizeof(po->stats));
4145 spin_unlock_bh(&sk->sk_receive_queue.lock);
4146 drops = atomic_xchg(&po->tp_drops, 0);
4147
4148 if (po->tp_version == TPACKET_V3) {
4149 lv = sizeof(struct tpacket_stats_v3);
4150 st.stats3.tp_drops = drops;
4151 st.stats3.tp_packets += drops;
4152 data = &st.stats3;
4153 } else {
4154 lv = sizeof(struct tpacket_stats);
4155 st.stats1.tp_drops = drops;
4156 st.stats1.tp_packets += drops;
4157 data = &st.stats1;
4158 }
4159
4160 break;
4161 case PACKET_AUXDATA:
4162 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4163 break;
4164 case PACKET_ORIGDEV:
4165 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4166 break;
4167 case PACKET_VNET_HDR:
4168 val = !!READ_ONCE(po->vnet_hdr_sz);
4169 break;
4170 case PACKET_VNET_HDR_SZ:
4171 val = READ_ONCE(po->vnet_hdr_sz);
4172 break;
4173 case PACKET_COPY_THRESH:
4174 val = READ_ONCE(pkt_sk(sk)->copy_thresh);
4175 break;
4176 case PACKET_VERSION:
4177 val = po->tp_version;
4178 break;
4179 case PACKET_HDRLEN:
4180 if (len > sizeof(int))
4181 len = sizeof(int);
4182 if (len < sizeof(int))
4183 return -EINVAL;
4184 if (copy_from_user(&val, optval, len))
4185 return -EFAULT;
4186 switch (val) {
4187 case TPACKET_V1:
4188 val = sizeof(struct tpacket_hdr);
4189 break;
4190 case TPACKET_V2:
4191 val = sizeof(struct tpacket2_hdr);
4192 break;
4193 case TPACKET_V3:
4194 val = sizeof(struct tpacket3_hdr);
4195 break;
4196 default:
4197 return -EINVAL;
4198 }
4199 break;
4200 case PACKET_RESERVE:
4201 val = po->tp_reserve;
4202 break;
4203 case PACKET_LOSS:
4204 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4205 break;
4206 case PACKET_TIMESTAMP:
4207 val = READ_ONCE(po->tp_tstamp);
4208 break;
4209 case PACKET_FANOUT:
4210 val = (po->fanout ?
4211 ((u32)po->fanout->id |
4212 ((u32)po->fanout->type << 16) |
4213 ((u32)po->fanout->flags << 24)) :
4214 0);
4215 break;
4216 case PACKET_IGNORE_OUTGOING:
4217 val = READ_ONCE(po->prot_hook.ignore_outgoing);
4218 break;
4219 case PACKET_ROLLOVER_STATS:
4220 if (!po->rollover)
4221 return -EINVAL;
4222 rstats.tp_all = atomic_long_read(&po->rollover->num);
4223 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4224 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4225 data = &rstats;
4226 lv = sizeof(rstats);
4227 break;
4228 case PACKET_TX_HAS_OFF:
4229 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4230 break;
4231 case PACKET_QDISC_BYPASS:
4232 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4233 break;
4234 default:
4235 return -ENOPROTOOPT;
4236 }
4237
4238 if (len > lv)
4239 len = lv;
4240 if (put_user(len, optlen))
4241 return -EFAULT;
4242 if (copy_to_user(optval, data, len))
4243 return -EFAULT;
4244 return 0;
4245 }
4246
packet_notifier(struct notifier_block * this,unsigned long msg,void * ptr)4247 static int packet_notifier(struct notifier_block *this,
4248 unsigned long msg, void *ptr)
4249 {
4250 struct sock *sk;
4251 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4252 struct net *net = dev_net(dev);
4253
4254 rcu_read_lock();
4255 sk_for_each_rcu(sk, &net->packet.sklist) {
4256 struct packet_sock *po = pkt_sk(sk);
4257
4258 switch (msg) {
4259 case NETDEV_UNREGISTER:
4260 if (po->mclist)
4261 packet_dev_mclist_delete(dev, &po->mclist);
4262 fallthrough;
4263
4264 case NETDEV_DOWN:
4265 if (dev->ifindex == po->ifindex) {
4266 spin_lock(&po->bind_lock);
4267 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4268 __unregister_prot_hook(sk, false);
4269 sk->sk_err = ENETDOWN;
4270 if (!sock_flag(sk, SOCK_DEAD))
4271 sk_error_report(sk);
4272 }
4273 if (msg == NETDEV_UNREGISTER) {
4274 packet_cached_dev_reset(po);
4275 WRITE_ONCE(po->ifindex, -1);
4276 netdev_put(po->prot_hook.dev,
4277 &po->prot_hook.dev_tracker);
4278 po->prot_hook.dev = NULL;
4279 }
4280 spin_unlock(&po->bind_lock);
4281 }
4282 break;
4283 case NETDEV_UP:
4284 if (dev->ifindex == po->ifindex) {
4285 spin_lock(&po->bind_lock);
4286 if (po->num)
4287 register_prot_hook(sk);
4288 spin_unlock(&po->bind_lock);
4289 }
4290 break;
4291 }
4292 }
4293 rcu_read_unlock();
4294 return NOTIFY_DONE;
4295 }
4296
4297
packet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)4298 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4299 unsigned long arg)
4300 {
4301 struct sock *sk = sock->sk;
4302
4303 switch (cmd) {
4304 case SIOCOUTQ:
4305 {
4306 int amount = sk_wmem_alloc_get(sk);
4307
4308 return put_user(amount, (int __user *)arg);
4309 }
4310 case SIOCINQ:
4311 {
4312 struct sk_buff *skb;
4313 int amount = 0;
4314
4315 spin_lock_bh(&sk->sk_receive_queue.lock);
4316 skb = skb_peek(&sk->sk_receive_queue);
4317 if (skb)
4318 amount = skb->len;
4319 spin_unlock_bh(&sk->sk_receive_queue.lock);
4320 return put_user(amount, (int __user *)arg);
4321 }
4322 #ifdef CONFIG_INET
4323 case SIOCADDRT:
4324 case SIOCDELRT:
4325 case SIOCDARP:
4326 case SIOCGARP:
4327 case SIOCSARP:
4328 case SIOCGIFADDR:
4329 case SIOCSIFADDR:
4330 case SIOCGIFBRDADDR:
4331 case SIOCSIFBRDADDR:
4332 case SIOCGIFNETMASK:
4333 case SIOCSIFNETMASK:
4334 case SIOCGIFDSTADDR:
4335 case SIOCSIFDSTADDR:
4336 case SIOCSIFFLAGS:
4337 return inet_dgram_ops.ioctl(sock, cmd, arg);
4338 #endif
4339
4340 default:
4341 return -ENOIOCTLCMD;
4342 }
4343 return 0;
4344 }
4345
packet_poll(struct file * file,struct socket * sock,poll_table * wait)4346 static __poll_t packet_poll(struct file *file, struct socket *sock,
4347 poll_table *wait)
4348 {
4349 struct sock *sk = sock->sk;
4350 struct packet_sock *po = pkt_sk(sk);
4351 __poll_t mask = datagram_poll(file, sock, wait);
4352
4353 spin_lock_bh(&sk->sk_receive_queue.lock);
4354 if (po->rx_ring.pg_vec) {
4355 if (!packet_previous_rx_frame(po, &po->rx_ring,
4356 TP_STATUS_KERNEL))
4357 mask |= EPOLLIN | EPOLLRDNORM;
4358 }
4359 packet_rcv_try_clear_pressure(po);
4360 spin_unlock_bh(&sk->sk_receive_queue.lock);
4361 spin_lock_bh(&sk->sk_write_queue.lock);
4362 if (po->tx_ring.pg_vec) {
4363 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4364 mask |= EPOLLOUT | EPOLLWRNORM;
4365 }
4366 spin_unlock_bh(&sk->sk_write_queue.lock);
4367 return mask;
4368 }
4369
4370
4371 /* Dirty? Well, I still did not learn better way to account
4372 * for user mmaps.
4373 */
4374
packet_mm_open(struct vm_area_struct * vma)4375 static void packet_mm_open(struct vm_area_struct *vma)
4376 {
4377 struct file *file = vma->vm_file;
4378 struct socket *sock = file->private_data;
4379 struct sock *sk = sock->sk;
4380
4381 if (sk)
4382 atomic_long_inc(&pkt_sk(sk)->mapped);
4383 }
4384
packet_mm_close(struct vm_area_struct * vma)4385 static void packet_mm_close(struct vm_area_struct *vma)
4386 {
4387 struct file *file = vma->vm_file;
4388 struct socket *sock = file->private_data;
4389 struct sock *sk = sock->sk;
4390
4391 if (sk)
4392 atomic_long_dec(&pkt_sk(sk)->mapped);
4393 }
4394
4395 static const struct vm_operations_struct packet_mmap_ops = {
4396 .open = packet_mm_open,
4397 .close = packet_mm_close,
4398 };
4399
free_pg_vec(struct pgv * pg_vec,unsigned int order,unsigned int len)4400 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4401 unsigned int len)
4402 {
4403 int i;
4404
4405 for (i = 0; i < len; i++) {
4406 if (likely(pg_vec[i].buffer)) {
4407 if (is_vmalloc_addr(pg_vec[i].buffer))
4408 vfree(pg_vec[i].buffer);
4409 else
4410 free_pages((unsigned long)pg_vec[i].buffer,
4411 order);
4412 pg_vec[i].buffer = NULL;
4413 }
4414 }
4415 kfree(pg_vec);
4416 }
4417
alloc_one_pg_vec_page(unsigned long order)4418 static char *alloc_one_pg_vec_page(unsigned long order)
4419 {
4420 char *buffer;
4421 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4422 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4423
4424 buffer = (char *) __get_free_pages(gfp_flags, order);
4425 if (buffer)
4426 return buffer;
4427
4428 /* __get_free_pages failed, fall back to vmalloc */
4429 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4430 if (buffer)
4431 return buffer;
4432
4433 /* vmalloc failed, lets dig into swap here */
4434 gfp_flags &= ~__GFP_NORETRY;
4435 buffer = (char *) __get_free_pages(gfp_flags, order);
4436 if (buffer)
4437 return buffer;
4438
4439 /* complete and utter failure */
4440 return NULL;
4441 }
4442
alloc_pg_vec(struct tpacket_req * req,int order)4443 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4444 {
4445 unsigned int block_nr = req->tp_block_nr;
4446 struct pgv *pg_vec;
4447 int i;
4448
4449 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4450 if (unlikely(!pg_vec))
4451 goto out;
4452
4453 for (i = 0; i < block_nr; i++) {
4454 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4455 if (unlikely(!pg_vec[i].buffer))
4456 goto out_free_pgvec;
4457 }
4458
4459 out:
4460 return pg_vec;
4461
4462 out_free_pgvec:
4463 free_pg_vec(pg_vec, order, block_nr);
4464 pg_vec = NULL;
4465 goto out;
4466 }
4467
packet_set_ring(struct sock * sk,union tpacket_req_u * req_u,int closing,int tx_ring)4468 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4469 int closing, int tx_ring)
4470 {
4471 struct pgv *pg_vec = NULL;
4472 struct packet_sock *po = pkt_sk(sk);
4473 unsigned long *rx_owner_map = NULL;
4474 int was_running, order = 0;
4475 struct packet_ring_buffer *rb;
4476 struct sk_buff_head *rb_queue;
4477 __be16 num;
4478 int err;
4479 /* Added to avoid minimal code churn */
4480 struct tpacket_req *req = &req_u->req;
4481
4482 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4483 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4484
4485 err = -EBUSY;
4486 if (!closing) {
4487 if (atomic_long_read(&po->mapped))
4488 goto out;
4489 if (packet_read_pending(rb))
4490 goto out;
4491 }
4492
4493 if (req->tp_block_nr) {
4494 unsigned int min_frame_size;
4495
4496 /* Sanity tests and some calculations */
4497 err = -EBUSY;
4498 if (unlikely(rb->pg_vec))
4499 goto out;
4500
4501 switch (po->tp_version) {
4502 case TPACKET_V1:
4503 po->tp_hdrlen = TPACKET_HDRLEN;
4504 break;
4505 case TPACKET_V2:
4506 po->tp_hdrlen = TPACKET2_HDRLEN;
4507 break;
4508 case TPACKET_V3:
4509 po->tp_hdrlen = TPACKET3_HDRLEN;
4510 break;
4511 }
4512
4513 err = -EINVAL;
4514 if (unlikely((int)req->tp_block_size <= 0))
4515 goto out;
4516 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4517 goto out;
4518 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4519 if (po->tp_version >= TPACKET_V3 &&
4520 req->tp_block_size <
4521 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4522 goto out;
4523 if (unlikely(req->tp_frame_size < min_frame_size))
4524 goto out;
4525 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4526 goto out;
4527
4528 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4529 if (unlikely(rb->frames_per_block == 0))
4530 goto out;
4531 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4532 goto out;
4533 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4534 req->tp_frame_nr))
4535 goto out;
4536
4537 err = -ENOMEM;
4538 order = get_order(req->tp_block_size);
4539 pg_vec = alloc_pg_vec(req, order);
4540 if (unlikely(!pg_vec))
4541 goto out;
4542 switch (po->tp_version) {
4543 case TPACKET_V3:
4544 /* Block transmit is not supported yet */
4545 if (!tx_ring) {
4546 init_prb_bdqc(po, rb, pg_vec, req_u);
4547 } else {
4548 struct tpacket_req3 *req3 = &req_u->req3;
4549
4550 if (req3->tp_retire_blk_tov ||
4551 req3->tp_sizeof_priv ||
4552 req3->tp_feature_req_word) {
4553 err = -EINVAL;
4554 goto out_free_pg_vec;
4555 }
4556 }
4557 break;
4558 default:
4559 if (!tx_ring) {
4560 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4561 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4562 if (!rx_owner_map)
4563 goto out_free_pg_vec;
4564 }
4565 break;
4566 }
4567 }
4568 /* Done */
4569 else {
4570 err = -EINVAL;
4571 if (unlikely(req->tp_frame_nr))
4572 goto out;
4573 }
4574
4575
4576 /* Detach socket from network */
4577 spin_lock(&po->bind_lock);
4578 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4579 num = po->num;
4580 if (was_running) {
4581 WRITE_ONCE(po->num, 0);
4582 __unregister_prot_hook(sk, false);
4583 }
4584 spin_unlock(&po->bind_lock);
4585
4586 synchronize_net();
4587
4588 err = -EBUSY;
4589 mutex_lock(&po->pg_vec_lock);
4590 if (closing || atomic_long_read(&po->mapped) == 0) {
4591 err = 0;
4592 spin_lock_bh(&rb_queue->lock);
4593 swap(rb->pg_vec, pg_vec);
4594 if (po->tp_version <= TPACKET_V2)
4595 swap(rb->rx_owner_map, rx_owner_map);
4596 rb->frame_max = (req->tp_frame_nr - 1);
4597 rb->head = 0;
4598 rb->frame_size = req->tp_frame_size;
4599 spin_unlock_bh(&rb_queue->lock);
4600
4601 swap(rb->pg_vec_order, order);
4602 swap(rb->pg_vec_len, req->tp_block_nr);
4603
4604 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4605 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4606 tpacket_rcv : packet_rcv;
4607 skb_queue_purge(rb_queue);
4608 if (atomic_long_read(&po->mapped))
4609 pr_err("packet_mmap: vma is busy: %ld\n",
4610 atomic_long_read(&po->mapped));
4611 }
4612 mutex_unlock(&po->pg_vec_lock);
4613
4614 spin_lock(&po->bind_lock);
4615 if (was_running) {
4616 WRITE_ONCE(po->num, num);
4617 register_prot_hook(sk);
4618 }
4619 spin_unlock(&po->bind_lock);
4620 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4621 /* Because we don't support block-based V3 on tx-ring */
4622 if (!tx_ring)
4623 prb_shutdown_retire_blk_timer(po, rb_queue);
4624 }
4625
4626 out_free_pg_vec:
4627 if (pg_vec) {
4628 bitmap_free(rx_owner_map);
4629 free_pg_vec(pg_vec, order, req->tp_block_nr);
4630 }
4631 out:
4632 return err;
4633 }
4634
packet_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)4635 static int packet_mmap(struct file *file, struct socket *sock,
4636 struct vm_area_struct *vma)
4637 {
4638 struct sock *sk = sock->sk;
4639 struct packet_sock *po = pkt_sk(sk);
4640 unsigned long size, expected_size;
4641 struct packet_ring_buffer *rb;
4642 unsigned long start;
4643 int err = -EINVAL;
4644 int i;
4645
4646 if (vma->vm_pgoff)
4647 return -EINVAL;
4648
4649 mutex_lock(&po->pg_vec_lock);
4650
4651 expected_size = 0;
4652 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4653 if (rb->pg_vec) {
4654 expected_size += rb->pg_vec_len
4655 * rb->pg_vec_pages
4656 * PAGE_SIZE;
4657 }
4658 }
4659
4660 if (expected_size == 0)
4661 goto out;
4662
4663 size = vma->vm_end - vma->vm_start;
4664 if (size != expected_size)
4665 goto out;
4666
4667 start = vma->vm_start;
4668 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4669 if (rb->pg_vec == NULL)
4670 continue;
4671
4672 for (i = 0; i < rb->pg_vec_len; i++) {
4673 struct page *page;
4674 void *kaddr = rb->pg_vec[i].buffer;
4675 int pg_num;
4676
4677 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4678 page = pgv_to_page(kaddr);
4679 err = vm_insert_page(vma, start, page);
4680 if (unlikely(err))
4681 goto out;
4682 start += PAGE_SIZE;
4683 kaddr += PAGE_SIZE;
4684 }
4685 }
4686 }
4687
4688 atomic_long_inc(&po->mapped);
4689 vma->vm_ops = &packet_mmap_ops;
4690 err = 0;
4691
4692 out:
4693 mutex_unlock(&po->pg_vec_lock);
4694 return err;
4695 }
4696
4697 static const struct proto_ops packet_ops_spkt = {
4698 .family = PF_PACKET,
4699 .owner = THIS_MODULE,
4700 .release = packet_release,
4701 .bind = packet_bind_spkt,
4702 .connect = sock_no_connect,
4703 .socketpair = sock_no_socketpair,
4704 .accept = sock_no_accept,
4705 .getname = packet_getname_spkt,
4706 .poll = datagram_poll,
4707 .ioctl = packet_ioctl,
4708 .gettstamp = sock_gettstamp,
4709 .listen = sock_no_listen,
4710 .shutdown = sock_no_shutdown,
4711 .sendmsg = packet_sendmsg_spkt,
4712 .recvmsg = packet_recvmsg,
4713 .mmap = sock_no_mmap,
4714 };
4715
4716 static const struct proto_ops packet_ops = {
4717 .family = PF_PACKET,
4718 .owner = THIS_MODULE,
4719 .release = packet_release,
4720 .bind = packet_bind,
4721 .connect = sock_no_connect,
4722 .socketpair = sock_no_socketpair,
4723 .accept = sock_no_accept,
4724 .getname = packet_getname,
4725 .poll = packet_poll,
4726 .ioctl = packet_ioctl,
4727 .gettstamp = sock_gettstamp,
4728 .listen = sock_no_listen,
4729 .shutdown = sock_no_shutdown,
4730 .setsockopt = packet_setsockopt,
4731 .getsockopt = packet_getsockopt,
4732 .sendmsg = packet_sendmsg,
4733 .recvmsg = packet_recvmsg,
4734 .mmap = packet_mmap,
4735 };
4736
4737 static const struct net_proto_family packet_family_ops = {
4738 .family = PF_PACKET,
4739 .create = packet_create,
4740 .owner = THIS_MODULE,
4741 };
4742
4743 static struct notifier_block packet_netdev_notifier = {
4744 .notifier_call = packet_notifier,
4745 };
4746
4747 #ifdef CONFIG_PROC_FS
4748
packet_seq_start(struct seq_file * seq,loff_t * pos)4749 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4750 __acquires(RCU)
4751 {
4752 struct net *net = seq_file_net(seq);
4753
4754 rcu_read_lock();
4755 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4756 }
4757
packet_seq_next(struct seq_file * seq,void * v,loff_t * pos)4758 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4759 {
4760 struct net *net = seq_file_net(seq);
4761 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4762 }
4763
packet_seq_stop(struct seq_file * seq,void * v)4764 static void packet_seq_stop(struct seq_file *seq, void *v)
4765 __releases(RCU)
4766 {
4767 rcu_read_unlock();
4768 }
4769
packet_seq_show(struct seq_file * seq,void * v)4770 static int packet_seq_show(struct seq_file *seq, void *v)
4771 {
4772 if (v == SEQ_START_TOKEN)
4773 seq_printf(seq,
4774 "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
4775 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4776 else {
4777 struct sock *s = sk_entry(v);
4778 const struct packet_sock *po = pkt_sk(s);
4779
4780 seq_printf(seq,
4781 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4782 s,
4783 refcount_read(&s->sk_refcnt),
4784 s->sk_type,
4785 ntohs(READ_ONCE(po->num)),
4786 READ_ONCE(po->ifindex),
4787 packet_sock_flag(po, PACKET_SOCK_RUNNING),
4788 atomic_read(&s->sk_rmem_alloc),
4789 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4790 sock_i_ino(s));
4791 }
4792
4793 return 0;
4794 }
4795
4796 static const struct seq_operations packet_seq_ops = {
4797 .start = packet_seq_start,
4798 .next = packet_seq_next,
4799 .stop = packet_seq_stop,
4800 .show = packet_seq_show,
4801 };
4802 #endif
4803
packet_net_init(struct net * net)4804 static int __net_init packet_net_init(struct net *net)
4805 {
4806 mutex_init(&net->packet.sklist_lock);
4807 INIT_HLIST_HEAD(&net->packet.sklist);
4808
4809 #ifdef CONFIG_PROC_FS
4810 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4811 sizeof(struct seq_net_private)))
4812 return -ENOMEM;
4813 #endif /* CONFIG_PROC_FS */
4814
4815 return 0;
4816 }
4817
packet_net_exit(struct net * net)4818 static void __net_exit packet_net_exit(struct net *net)
4819 {
4820 remove_proc_entry("packet", net->proc_net);
4821 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4822 }
4823
4824 static struct pernet_operations packet_net_ops = {
4825 .init = packet_net_init,
4826 .exit = packet_net_exit,
4827 };
4828
4829
packet_exit(void)4830 static void __exit packet_exit(void)
4831 {
4832 sock_unregister(PF_PACKET);
4833 proto_unregister(&packet_proto);
4834 unregister_netdevice_notifier(&packet_netdev_notifier);
4835 unregister_pernet_subsys(&packet_net_ops);
4836 }
4837
packet_init(void)4838 static int __init packet_init(void)
4839 {
4840 int rc;
4841
4842 rc = register_pernet_subsys(&packet_net_ops);
4843 if (rc)
4844 goto out;
4845 rc = register_netdevice_notifier(&packet_netdev_notifier);
4846 if (rc)
4847 goto out_pernet;
4848 rc = proto_register(&packet_proto, 0);
4849 if (rc)
4850 goto out_notifier;
4851 rc = sock_register(&packet_family_ops);
4852 if (rc)
4853 goto out_proto;
4854
4855 return 0;
4856
4857 out_proto:
4858 proto_unregister(&packet_proto);
4859 out_notifier:
4860 unregister_netdevice_notifier(&packet_netdev_notifier);
4861 out_pernet:
4862 unregister_pernet_subsys(&packet_net_ops);
4863 out:
4864 return rc;
4865 }
4866
4867 module_init(packet_init);
4868 module_exit(packet_exit);
4869 MODULE_DESCRIPTION("Packet socket support (AF_PACKET)");
4870 MODULE_LICENSE("GPL");
4871 MODULE_ALIAS_NETPROTO(PF_PACKET);
4872