Lines Matching +full:tie +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0-only
130 cmd->base.speed = SPEED_10000; in veth_get_link_ksettings()
131 cmd->base.duplex = DUPLEX_FULL; in veth_get_link_ksettings()
132 cmd->base.port = PORT_TP; in veth_get_link_ksettings()
133 cmd->base.autoneg = AUTONEG_DISABLE; in veth_get_link_ksettings()
139 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in veth_get_drvinfo()
140 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in veth_get_drvinfo()
152 for (i = 0; i < dev->real_num_rx_queues; i++) in veth_get_strings()
157 for (i = 0; i < dev->real_num_tx_queues; i++) in veth_get_strings()
172 VETH_RQ_STATS_LEN * dev->real_num_rx_queues + in veth_get_sset_count()
173 VETH_TQ_STATS_LEN * dev->real_num_tx_queues + in veth_get_sset_count()
176 return -EOPNOTSUPP; in veth_get_sset_count()
187 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_get_page_pool_stats()
188 if (!priv->rq[i].page_pool) in veth_get_page_pool_stats()
190 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); in veth_get_page_pool_stats()
200 struct net_device *peer = rtnl_dereference(priv->peer); in veth_get_ethtool_stats()
203 data[0] = peer ? peer->ifindex : 0; in veth_get_ethtool_stats()
205 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_get_ethtool_stats()
206 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats; in veth_get_ethtool_stats()
207 const void *stats_base = (void *)&rq_stats->vs; in veth_get_ethtool_stats()
212 start = u64_stats_fetch_begin(&rq_stats->syncp); in veth_get_ethtool_stats()
217 } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); in veth_get_ethtool_stats()
226 for (i = 0; i < peer->real_num_rx_queues; i++) { in veth_get_ethtool_stats()
227 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats; in veth_get_ethtool_stats()
228 const void *base = (void *)&rq_stats->vs; in veth_get_ethtool_stats()
232 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN; in veth_get_ethtool_stats()
234 start = u64_stats_fetch_begin(&rq_stats->syncp); in veth_get_ethtool_stats()
239 } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); in veth_get_ethtool_stats()
241 pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; in veth_get_ethtool_stats()
250 channels->tx_count = dev->real_num_tx_queues; in veth_get_channels()
251 channels->rx_count = dev->real_num_rx_queues; in veth_get_channels()
252 channels->max_tx = dev->num_tx_queues; in veth_get_channels()
253 channels->max_rx = dev->num_rx_queues; in veth_get_channels()
300 if (!READ_ONCE(rq->rx_notify_masked) && in __veth_xdp_flush()
301 napi_schedule_prep(&rq->xdp_napi)) { in __veth_xdp_flush()
302 WRITE_ONCE(rq->rx_notify_masked, true); in __veth_xdp_flush()
303 __napi_schedule(&rq->xdp_napi); in __veth_xdp_flush()
309 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { in veth_xdp_rx()
329 * device has TSO off.
332 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
339 return !(dev->features & NETIF_F_ALL_TSO) || in veth_skb_is_eligible_for_gro()
340 (skb->destructor == sock_wfree && in veth_skb_is_eligible_for_gro()
341 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)); in veth_skb_is_eligible_for_gro()
350 int length = skb->len; in veth_xmit()
355 rcv = rcu_dereference(priv->peer); in veth_xmit()
363 if (rxq < rcv->real_num_rx_queues) { in veth_xmit()
364 rq = &rcv_priv->rq[rxq]; in veth_xmit()
370 use_napi = rcu_access_pointer(rq->napi) && in veth_xmit()
382 atomic64_inc(&priv->dropped); in veth_xmit()
396 result->peer_tq_xdp_xmit_err = 0; in veth_stats_rx()
397 result->xdp_packets = 0; in veth_stats_rx()
398 result->xdp_tx_err = 0; in veth_stats_rx()
399 result->xdp_bytes = 0; in veth_stats_rx()
400 result->rx_drops = 0; in veth_stats_rx()
401 for (i = 0; i < dev->num_rx_queues; i++) { in veth_stats_rx()
403 struct veth_rq_stats *stats = &priv->rq[i].stats; in veth_stats_rx()
407 start = u64_stats_fetch_begin(&stats->syncp); in veth_stats_rx()
408 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; in veth_stats_rx()
409 xdp_tx_err = stats->vs.xdp_tx_err; in veth_stats_rx()
410 packets = stats->vs.xdp_packets; in veth_stats_rx()
411 bytes = stats->vs.xdp_bytes; in veth_stats_rx()
412 drops = stats->vs.rx_drops; in veth_stats_rx()
413 } while (u64_stats_fetch_retry(&stats->syncp, start)); in veth_stats_rx()
414 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; in veth_stats_rx()
415 result->xdp_tx_err += xdp_tx_err; in veth_stats_rx()
416 result->xdp_packets += packets; in veth_stats_rx()
417 result->xdp_bytes += bytes; in veth_stats_rx()
418 result->rx_drops += drops; in veth_stats_rx()
429 tot->tx_dropped = atomic64_read(&priv->dropped); in veth_get_stats64()
430 dev_fetch_sw_netstats(tot, dev->tstats); in veth_get_stats64()
433 tot->tx_dropped += rx.xdp_tx_err; in veth_get_stats64()
434 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; in veth_get_stats64()
435 tot->rx_bytes += rx.xdp_bytes; in veth_get_stats64()
436 tot->rx_packets += rx.xdp_packets; in veth_get_stats64()
439 peer = rcu_dereference(priv->peer); in veth_get_stats64()
443 dev_fetch_sw_netstats(&tot_peer, peer->tstats); in veth_get_stats64()
444 tot->rx_bytes += tot_peer.tx_bytes; in veth_get_stats64()
445 tot->rx_packets += tot_peer.tx_packets; in veth_get_stats64()
448 tot->tx_dropped += rx.peer_tq_xdp_xmit_err; in veth_get_stats64()
449 tot->rx_dropped += rx.xdp_tx_err; in veth_get_stats64()
450 tot->tx_bytes += rx.xdp_bytes; in veth_get_stats64()
451 tot->tx_packets += rx.xdp_packets; in veth_get_stats64()
463 return smp_processor_id() % dev->real_num_rx_queues; in veth_select_rxq()
471 return rcu_dereference(priv->peer); in veth_peer_dev()
479 int i, ret = -ENXIO, nxmit = 0; in veth_xdp_xmit()
485 return -EINVAL; in veth_xdp_xmit()
488 rcv = rcu_dereference(priv->peer); in veth_xdp_xmit()
493 rq = &rcv_priv->rq[veth_select_rxq(rcv)]; in veth_xdp_xmit()
497 if (!rcu_access_pointer(rq->napi)) in veth_xdp_xmit()
500 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; in veth_xdp_xmit()
502 spin_lock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit()
508 __ptr_ring_produce(&rq->xdp_ring, ptr))) in veth_xdp_xmit()
512 spin_unlock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit()
519 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_xmit()
520 rq->stats.vs.peer_tq_xdp_xmit += nxmit; in veth_xdp_xmit()
521 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit; in veth_xdp_xmit()
522 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_xmit()
540 atomic64_add(n, &priv->dropped); in veth_ndo_xdp_xmit()
550 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false); in veth_xdp_flush_bq()
556 for (i = sent; unlikely(i < bq->count); i++) in veth_xdp_flush_bq()
557 xdp_return_frame(bq->q[i]); in veth_xdp_flush_bq()
559 drops = bq->count - sent; in veth_xdp_flush_bq()
560 trace_xdp_bulk_tx(rq->dev, sent, drops, err); in veth_xdp_flush_bq()
562 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_flush_bq()
563 rq->stats.vs.xdp_tx += sent; in veth_xdp_flush_bq()
564 rq->stats.vs.xdp_tx_err += drops; in veth_xdp_flush_bq()
565 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_flush_bq()
567 bq->count = 0; in veth_xdp_flush_bq()
572 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev); in veth_xdp_flush()
578 rcv = rcu_dereference(priv->peer); in veth_xdp_flush()
583 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)]; in veth_xdp_flush()
585 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog))) in veth_xdp_flush()
599 return -EOVERFLOW; in veth_xdp_tx()
601 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE)) in veth_xdp_tx()
604 bq->q[bq->count++] = frame; in veth_xdp_tx()
618 xdp_prog = rcu_dereference(rq->xdp_prog); in veth_xdp_rcv_one()
625 xdp->rxq = &rq->xdp_rxq; in veth_xdp_rcv_one()
637 xdp->rxq->mem = frame->mem; in veth_xdp_rcv_one()
639 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
641 stats->rx_drops++; in veth_xdp_rcv_one()
644 stats->xdp_tx++; in veth_xdp_rcv_one()
649 xdp->rxq->mem = frame->mem; in veth_xdp_rcv_one()
650 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { in veth_xdp_rcv_one()
652 stats->rx_drops++; in veth_xdp_rcv_one()
655 stats->xdp_redirect++; in veth_xdp_rcv_one()
659 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
662 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_one()
665 stats->xdp_drops++; in veth_xdp_rcv_one()
691 stats->rx_drops += n_xdpf; in veth_xdp_rcv_bulk_skb()
700 rq->dev); in veth_xdp_rcv_bulk_skb()
703 stats->rx_drops++; in veth_xdp_rcv_bulk_skb()
706 napi_gro_receive(&rq->xdp_napi, skb); in veth_xdp_rcv_bulk_skb()
715 get_page(virt_to_page(xdp->data)); in veth_xdp_get()
719 for (i = 0; i < sinfo->nr_frags; i++) in veth_xdp_get()
720 __skb_frag_ref(&sinfo->frags[i]); in veth_xdp_get()
731 skb_shinfo(skb)->nr_frags || in veth_convert_skb_to_xdp_buff()
733 if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM)) in veth_convert_skb_to_xdp_buff()
740 frame_sz = skb_end_pointer(skb) - skb->head; in veth_convert_skb_to_xdp_buff()
742 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in veth_convert_skb_to_xdp_buff()
743 xdp_prepare_buff(xdp, skb->head, skb_headroom(skb), in veth_convert_skb_to_xdp_buff()
747 skb_shinfo(skb)->xdp_frags_size = skb->data_len; in veth_convert_skb_to_xdp_buff()
759 return -ENOMEM; in veth_convert_skb_to_xdp_buff()
772 int off; in veth_xdp_rcv_skb() local
777 xdp_prog = rcu_dereference(rq->xdp_prog); in veth_xdp_rcv_skb()
783 __skb_push(skb, skb->data - skb_mac_header(skb)); in veth_xdp_rcv_skb()
788 orig_data = xdp->data; in veth_xdp_rcv_skb()
789 orig_data_end = xdp->data_end; in veth_xdp_rcv_skb()
799 xdp->rxq->mem = rq->xdp_mem; in veth_xdp_rcv_skb()
801 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
802 stats->rx_drops++; in veth_xdp_rcv_skb()
805 stats->xdp_tx++; in veth_xdp_rcv_skb()
811 xdp->rxq->mem = rq->xdp_mem; in veth_xdp_rcv_skb()
812 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) { in veth_xdp_rcv_skb()
813 stats->rx_drops++; in veth_xdp_rcv_skb()
816 stats->xdp_redirect++; in veth_xdp_rcv_skb()
820 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
823 trace_xdp_exception(rq->dev, xdp_prog, act); in veth_xdp_rcv_skb()
826 stats->xdp_drops++; in veth_xdp_rcv_skb()
832 off = orig_data - xdp->data; in veth_xdp_rcv_skb()
833 if (off > 0) in veth_xdp_rcv_skb()
834 __skb_push(skb, off); in veth_xdp_rcv_skb()
835 else if (off < 0) in veth_xdp_rcv_skb()
836 __skb_pull(skb, -off); in veth_xdp_rcv_skb()
841 off = xdp->data_end - orig_data_end; in veth_xdp_rcv_skb()
842 if (off != 0) in veth_xdp_rcv_skb()
843 __skb_put(skb, off); /* positive on grow, negative on shrink */ in veth_xdp_rcv_skb()
849 skb->data_len = skb_shinfo(skb)->xdp_frags_size; in veth_xdp_rcv_skb()
851 skb->data_len = 0; in veth_xdp_rcv_skb()
853 skb->protocol = eth_type_trans(skb, rq->dev); in veth_xdp_rcv_skb()
855 metalen = xdp->data - xdp->data_meta; in veth_xdp_rcv_skb()
861 stats->rx_drops++; in veth_xdp_rcv_skb()
881 void *ptr = __ptr_ring_consume(&rq->xdp_ring); in veth_xdp_rcv()
890 stats->xdp_bytes += xdp_get_frame_len(frame); in veth_xdp_rcv()
905 stats->xdp_bytes += skb->len; in veth_xdp_rcv()
911 napi_gro_receive(&rq->xdp_napi, skb); in veth_xdp_rcv()
920 u64_stats_update_begin(&rq->stats.syncp); in veth_xdp_rcv()
921 rq->stats.vs.xdp_redirect += stats->xdp_redirect; in veth_xdp_rcv()
922 rq->stats.vs.xdp_bytes += stats->xdp_bytes; in veth_xdp_rcv()
923 rq->stats.vs.xdp_drops += stats->xdp_drops; in veth_xdp_rcv()
924 rq->stats.vs.rx_drops += stats->rx_drops; in veth_xdp_rcv()
925 rq->stats.vs.xdp_packets += done; in veth_xdp_rcv()
926 u64_stats_update_end(&rq->stats.syncp); in veth_xdp_rcv()
949 smp_store_mb(rq->rx_notify_masked, false); in veth_poll()
950 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { in veth_poll()
951 if (napi_schedule_prep(&rq->xdp_napi)) { in veth_poll()
952 WRITE_ONCE(rq->rx_notify_masked, true); in veth_poll()
953 __napi_schedule(&rq->xdp_napi); in veth_poll()
971 .dev = &rq->dev->dev, in veth_create_page_pool()
974 rq->page_pool = page_pool_create(&pp_params); in veth_create_page_pool()
975 if (IS_ERR(rq->page_pool)) { in veth_create_page_pool()
976 int err = PTR_ERR(rq->page_pool); in veth_create_page_pool()
978 rq->page_pool = NULL; in veth_create_page_pool()
991 err = veth_create_page_pool(&priv->rq[i]); in __veth_napi_enable_range()
997 struct veth_rq *rq = &priv->rq[i]; in __veth_napi_enable_range()
999 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); in __veth_napi_enable_range()
1005 struct veth_rq *rq = &priv->rq[i]; in __veth_napi_enable_range()
1007 napi_enable(&rq->xdp_napi); in __veth_napi_enable_range()
1008 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); in __veth_napi_enable_range()
1014 for (i--; i >= start; i--) in __veth_napi_enable_range()
1015 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); in __veth_napi_enable_range()
1018 for (i--; i >= start; i--) { in __veth_napi_enable_range()
1019 page_pool_destroy(priv->rq[i].page_pool); in __veth_napi_enable_range()
1020 priv->rq[i].page_pool = NULL; in __veth_napi_enable_range()
1028 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); in __veth_napi_enable()
1037 struct veth_rq *rq = &priv->rq[i]; in veth_napi_del_range()
1039 rcu_assign_pointer(priv->rq[i].napi, NULL); in veth_napi_del_range()
1040 napi_disable(&rq->xdp_napi); in veth_napi_del_range()
1041 __netif_napi_del(&rq->xdp_napi); in veth_napi_del_range()
1046 struct veth_rq *rq = &priv->rq[i]; in veth_napi_del_range()
1048 rq->rx_notify_masked = false; in veth_napi_del_range()
1049 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); in veth_napi_del_range()
1053 page_pool_destroy(priv->rq[i].page_pool); in veth_napi_del_range()
1054 priv->rq[i].page_pool = NULL; in veth_napi_del_range()
1060 veth_napi_del_range(dev, 0, dev->real_num_rx_queues); in veth_napi_del()
1065 return !!(dev->wanted_features & NETIF_F_GRO); in veth_gro_requested()
1075 struct veth_rq *rq = &priv->rq[i]; in veth_enable_xdp_range()
1078 netif_napi_add(dev, &rq->xdp_napi, veth_poll); in veth_enable_xdp_range()
1079 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); in veth_enable_xdp_range()
1083 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, in veth_enable_xdp_range()
1090 rq->xdp_mem = rq->xdp_rxq.mem; in veth_enable_xdp_range()
1095 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); in veth_enable_xdp_range()
1097 for (i--; i >= start; i--) { in veth_enable_xdp_range()
1098 struct veth_rq *rq = &priv->rq[i]; in veth_enable_xdp_range()
1100 xdp_rxq_info_unreg(&rq->xdp_rxq); in veth_enable_xdp_range()
1102 netif_napi_del(&rq->xdp_napi); in veth_enable_xdp_range()
1115 struct veth_rq *rq = &priv->rq[i]; in veth_disable_xdp_range()
1117 rq->xdp_rxq.mem = rq->xdp_mem; in veth_disable_xdp_range()
1118 xdp_rxq_info_unreg(&rq->xdp_rxq); in veth_disable_xdp_range()
1121 netif_napi_del(&rq->xdp_napi); in veth_disable_xdp_range()
1127 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); in veth_enable_xdp()
1131 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { in veth_enable_xdp()
1132 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); in veth_enable_xdp()
1139 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); in veth_enable_xdp()
1145 for (i = 0; i < dev->real_num_rx_queues; i++) { in veth_enable_xdp()
1146 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog); in veth_enable_xdp()
1147 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi); in veth_enable_xdp()
1158 for (i = 0; i < dev->real_num_rx_queues; i++) in veth_disable_xdp()
1159 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); in veth_disable_xdp()
1164 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); in veth_disable_xdp()
1173 struct veth_rq *rq = &priv->rq[i]; in veth_napi_enable_range()
1175 netif_napi_add(dev, &rq->xdp_napi, veth_poll); in veth_napi_enable_range()
1181 struct veth_rq *rq = &priv->rq[i]; in veth_napi_enable_range()
1183 netif_napi_del(&rq->xdp_napi); in veth_napi_enable_range()
1192 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); in veth_napi_enable()
1202 if (priv->_xdp_prog) { in veth_disable_range_safe()
1218 if (priv->_xdp_prog) { in veth_enable_range_safe()
1243 peer = rtnl_dereference(priv->peer); in veth_set_xdp_features()
1244 if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { in veth_set_xdp_features()
1250 if (priv_peer->_xdp_prog || veth_gro_requested(peer)) in veth_set_xdp_features()
1269 if (!ch->rx_count || !ch->tx_count) in veth_set_channels()
1270 return -EINVAL; in veth_set_channels()
1273 peer = rtnl_dereference(priv->peer); in veth_set_channels()
1275 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) in veth_set_channels()
1276 return -EINVAL; in veth_set_channels()
1278 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) in veth_set_channels()
1279 return -EINVAL; in veth_set_channels()
1281 old_rx_count = dev->real_num_rx_queues; in veth_set_channels()
1282 new_rx_count = ch->rx_count; in veth_set_channels()
1284 /* turn device off */ in veth_set_channels()
1295 err = netif_set_real_num_rx_queues(dev, ch->rx_count); in veth_set_channels()
1299 err = netif_set_real_num_tx_queues(dev, ch->tx_count); in veth_set_channels()
1309 pr_warn("Can't restore rx queues config %d -> %d %d", in veth_set_channels()
1335 old_rx_count = ch->rx_count; in veth_set_channels()
1342 struct net_device *peer = rtnl_dereference(priv->peer); in veth_open()
1346 return -ENOTCONN; in veth_open()
1348 if (priv->_xdp_prog) { in veth_open()
1358 if (peer->flags & IFF_UP) { in veth_open()
1371 struct net_device *peer = rtnl_dereference(priv->peer); in veth_close()
1377 if (priv->_xdp_prog) in veth_close()
1395 priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq), in veth_alloc_queues()
1397 if (!priv->rq) in veth_alloc_queues()
1398 return -ENOMEM; in veth_alloc_queues()
1400 for (i = 0; i < dev->num_rx_queues; i++) { in veth_alloc_queues()
1401 priv->rq[i].dev = dev; in veth_alloc_queues()
1402 u64_stats_init(&priv->rq[i].stats.syncp); in veth_alloc_queues()
1412 kvfree(priv->rq); in veth_free_queues()
1448 peer = rcu_dereference(priv->peer); in veth_get_iflink()
1449 iflink = peer ? READ_ONCE(peer->ifindex) : 0; in veth_get_iflink()
1461 peer = rtnl_dereference(priv->peer); in veth_fix_features()
1465 if (peer_priv->_xdp_prog) in veth_fix_features()
1475 netdev_features_t changed = features ^ dev->features; in veth_set_features()
1480 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog) in veth_set_features()
1483 peer = rtnl_dereference(priv->peer); in veth_set_features()
1508 peer = rcu_dereference(priv->peer); in veth_set_rx_headroom()
1513 priv->requested_headroom = new_hr; in veth_set_rx_headroom()
1514 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); in veth_set_rx_headroom()
1515 dev->needed_headroom = new_hr; in veth_set_rx_headroom()
1516 peer->needed_headroom = new_hr; in veth_set_rx_headroom()
1531 old_prog = priv->_xdp_prog; in veth_xdp_set()
1532 priv->_xdp_prog = prog; in veth_xdp_set()
1533 peer = rtnl_dereference(priv->peer); in veth_xdp_set()
1538 err = -ENOTCONN; in veth_xdp_set()
1542 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - in veth_xdp_set()
1543 peer->hard_header_len; in veth_xdp_set()
1547 if (prog->aux->xdp_has_frags) in veth_xdp_set()
1550 if (peer->mtu > max_mtu) { in veth_xdp_set()
1552 err = -ERANGE; in veth_xdp_set()
1556 if (dev->real_num_rx_queues < peer->real_num_tx_queues) { in veth_xdp_set()
1558 err = -ENOSPC; in veth_xdp_set()
1562 if (dev->flags & IFF_UP) { in veth_xdp_set()
1571 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; in veth_xdp_set()
1572 peer->max_mtu = max_mtu; in veth_xdp_set()
1583 if (dev->flags & IFF_UP) in veth_xdp_set()
1587 peer->hw_features |= NETIF_F_GSO_SOFTWARE; in veth_xdp_set()
1588 peer->max_mtu = ETH_MAX_MTU; in veth_xdp_set()
1599 priv->_xdp_prog = old_prog; in veth_xdp_set()
1606 switch (xdp->command) { in veth_xdp()
1608 return veth_xdp_set(dev, xdp->prog, xdp->extack); in veth_xdp()
1610 return -EINVAL; in veth_xdp()
1618 if (!_ctx->skb) in veth_xdp_rx_timestamp()
1619 return -ENODATA; in veth_xdp_rx_timestamp()
1621 *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp; in veth_xdp_rx_timestamp()
1629 struct sk_buff *skb = _ctx->skb; in veth_xdp_rx_hash()
1632 return -ENODATA; in veth_xdp_rx_hash()
1635 *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE; in veth_xdp_rx_hash()
1644 const struct sk_buff *skb = _ctx->skb; in veth_xdp_rx_vlan_tag()
1648 return -ENODATA; in veth_xdp_rx_vlan_tag()
1654 *vlan_proto = skb->vlan_proto; in veth_xdp_rx_vlan_tag()
1695 dev->priv_flags &= ~IFF_TX_SKB_SHARING; in veth_setup()
1696 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in veth_setup()
1697 dev->priv_flags |= IFF_NO_QUEUE; in veth_setup()
1698 dev->priv_flags |= IFF_PHONY_HEADROOM; in veth_setup()
1699 dev->priv_flags |= IFF_DISABLE_NETPOLL; in veth_setup()
1700 dev->lltx = true; in veth_setup()
1702 dev->netdev_ops = &veth_netdev_ops; in veth_setup()
1703 dev->xdp_metadata_ops = &veth_xdp_metadata_ops; in veth_setup()
1704 dev->ethtool_ops = &veth_ethtool_ops; in veth_setup()
1705 dev->features |= VETH_FEATURES; in veth_setup()
1706 dev->vlan_features = dev->features & in veth_setup()
1711 dev->needs_free_netdev = true; in veth_setup()
1712 dev->priv_destructor = veth_dev_free; in veth_setup()
1713 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; in veth_setup()
1714 dev->max_mtu = ETH_MAX_MTU; in veth_setup()
1716 dev->hw_features = VETH_FEATURES; in veth_setup()
1717 dev->hw_enc_features = VETH_FEATURES; in veth_setup()
1718 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; in veth_setup()
1731 return -EINVAL; in veth_validate()
1733 return -EADDRNOTAVAIL; in veth_validate()
1737 return -EINVAL; in veth_validate()
1746 dev->features &= ~NETIF_F_GRO; in veth_disable_gro()
1747 dev->wanted_features &= ~NETIF_F_GRO; in veth_disable_gro()
1755 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { in veth_init_queues()
1760 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { in veth_init_queues()
1825 if (ifmp && (dev->ifindex != 0)) in veth_newlink()
1826 peer->ifindex = ifmp->ifi_index; in veth_newlink()
1850 * should be re-allocated in veth_newlink()
1857 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); in veth_newlink()
1859 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); in veth_newlink()
1868 * tie the deviced together in veth_newlink()
1872 rcu_assign_pointer(priv->peer, peer); in veth_newlink()
1878 rcu_assign_pointer(priv->peer, dev); in veth_newlink()
1909 peer = rtnl_dereference(priv->peer); in veth_dellink()
1915 RCU_INIT_POINTER(priv->peer, NULL); in veth_dellink()
1920 RCU_INIT_POINTER(priv->peer, NULL); in veth_dellink()
1932 struct net_device *peer = rtnl_dereference(priv->peer); in veth_get_link_net()