Lines Matching +full:num +full:- +full:txq
1 // SPDX-License-Identifier: GPL-2.0-or-later
53 * at once, the weight is chosen so that the EWMA will be insensitive to short-
115 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
116 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
161 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
164 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
167 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
519 rss->indirection_table = NULL; in rss_indirection_table_alloc()
523 rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL); in rss_indirection_table_alloc()
524 if (!rss->indirection_table) in rss_indirection_table_alloc()
525 return -ENOMEM; in rss_indirection_table_alloc()
532 kfree(rss->indirection_table); in rss_indirection_table_free()
565 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, in __free_old_xmit() argument
571 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit()
578 stats->packets++; in __free_old_xmit()
579 stats->bytes += skb->len; in __free_old_xmit()
581 stats->napi_packets++; in __free_old_xmit()
582 stats->napi_bytes += skb->len; in __free_old_xmit()
588 stats->packets++; in __free_old_xmit()
589 stats->bytes += xdp_get_frame_len(frame); in __free_old_xmit()
593 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); in __free_old_xmit()
601 return (vq->index - 1) / 2; in vq2txq()
604 static int txq2vq(int txq) in txq2vq() argument
606 return txq * 2 + 1; in txq2vq()
611 return vq->index / 2; in vq2rxq()
621 if (qid == vi->max_queue_pairs * 2) in vq_type()
633 return (struct virtio_net_common_hdr *)skb->cb; in skb_vnet_common_hdr()
644 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
645 for (end = page; end->private; end = (struct page *)end->private); in give_pages()
646 end->private = (unsigned long)rq->pages; in give_pages()
647 rq->pages = page; in give_pages()
652 struct page *p = rq->pages; in get_a_page()
655 rq->pages = (struct page *)p->private; in get_a_page()
657 p->private = 0; in get_a_page()
666 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
668 else if (vi->big_packets) in virtnet_rq_free_buf()
676 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
677 vi->refill_enabled = true; in enable_delayed_refill()
678 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
683 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
684 vi->refill_enabled = false; in disable_delayed_refill()
685 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
691 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
698 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
731 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
732 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
737 if (napi->weight) in skb_xmit_done()
741 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
758 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); in mergeable_ctx_to_truesize()
794 hdr_len = vi->hdr_len; in page_to_skb()
795 if (vi->mergeable_rx_bufs) in page_to_skb()
800 buf = p - headroom; in page_to_skb()
801 len -= hdr_len; in page_to_skb()
804 tailroom = truesize - headroom - hdr_padded_len - len; in page_to_skb()
809 skb = virtnet_build_skb(buf, truesize, p - buf, len); in page_to_skb()
813 page = (struct page *)page->private; in page_to_skb()
820 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
824 /* Copy all frame if it fits skb->head, otherwise in page_to_skb()
833 len -= copy; in page_to_skb()
836 if (vi->mergeable_rx_bufs) { in page_to_skb()
851 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); in page_to_skb()
857 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); in page_to_skb()
858 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, in page_to_skb()
860 len -= frag_size; in page_to_skb()
861 page = (struct page *)page->private; in page_to_skb()
888 --dma->ref; in virtnet_rq_unmap()
890 if (dma->need_sync && len) { in virtnet_rq_unmap()
891 offset = buf - (head + sizeof(*dma)); in virtnet_rq_unmap()
893 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, in virtnet_rq_unmap()
898 if (dma->ref) in virtnet_rq_unmap()
901 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, in virtnet_rq_unmap()
910 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); in virtnet_rq_get_buf()
911 if (buf && rq->do_dma) in virtnet_rq_get_buf()
924 if (!rq->do_dma) { in virtnet_rq_init_one_sg()
925 sg_init_one(rq->sg, buf, len); in virtnet_rq_init_one_sg()
929 head = page_address(rq->alloc_frag.page); in virtnet_rq_init_one_sg()
931 offset = buf - head; in virtnet_rq_init_one_sg()
935 addr = dma->addr - sizeof(*dma) + offset; in virtnet_rq_init_one_sg()
937 sg_init_table(rq->sg, 1); in virtnet_rq_init_one_sg()
938 rq->sg[0].dma_address = addr; in virtnet_rq_init_one_sg()
939 rq->sg[0].length = len; in virtnet_rq_init_one_sg()
944 struct page_frag *alloc_frag = &rq->alloc_frag; in virtnet_rq_alloc()
952 head = page_address(alloc_frag->page); in virtnet_rq_alloc()
954 if (rq->do_dma) { in virtnet_rq_alloc()
958 if (!alloc_frag->offset) { in virtnet_rq_alloc()
959 if (rq->last_dma) { in virtnet_rq_alloc()
964 virtnet_rq_unmap(rq, rq->last_dma, 0); in virtnet_rq_alloc()
965 rq->last_dma = NULL; in virtnet_rq_alloc()
968 dma->len = alloc_frag->size - sizeof(*dma); in virtnet_rq_alloc()
970 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, in virtnet_rq_alloc()
971 dma->len, DMA_FROM_DEVICE, 0); in virtnet_rq_alloc()
972 if (virtqueue_dma_mapping_error(rq->vq, addr)) in virtnet_rq_alloc()
975 dma->addr = addr; in virtnet_rq_alloc()
976 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); in virtnet_rq_alloc()
982 get_page(alloc_frag->page); in virtnet_rq_alloc()
983 dma->ref = 1; in virtnet_rq_alloc()
984 alloc_frag->offset = sizeof(*dma); in virtnet_rq_alloc()
986 rq->last_dma = dma; in virtnet_rq_alloc()
989 ++dma->ref; in virtnet_rq_alloc()
992 buf = head + alloc_frag->offset; in virtnet_rq_alloc()
994 get_page(alloc_frag->page); in virtnet_rq_alloc()
995 alloc_frag->offset += size; in virtnet_rq_alloc()
1002 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf()
1006 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1008 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1013 if (rq->do_dma) in virtnet_rq_unmap_free_buf()
1019 static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, in free_old_xmit() argument
1024 __free_old_xmit(sq, txq, in_napi, &stats); in free_old_xmit()
1032 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit()
1033 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); in free_old_xmit()
1034 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); in free_old_xmit()
1035 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit()
1040 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1042 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1052 bool use_napi = sq->napi.weight; in check_sq_full_and_disable()
1055 qnum = sq - vi->sq; in check_sq_full_and_disable()
1063 * the stack to do a non-trivial amount of useless work. in check_sq_full_and_disable()
1067 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in check_sq_full_and_disable()
1068 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); in check_sq_full_and_disable() local
1070 netif_tx_stop_queue(txq); in check_sq_full_and_disable()
1071 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1072 u64_stats_inc(&sq->stats.stop); in check_sq_full_and_disable()
1073 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1075 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in check_sq_full_and_disable()
1076 virtqueue_napi_schedule(&sq->napi, sq->vq); in check_sq_full_and_disable()
1077 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in check_sq_full_and_disable()
1079 free_old_xmit(sq, txq, false); in check_sq_full_and_disable()
1080 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in check_sq_full_and_disable()
1082 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1083 u64_stats_inc(&sq->stats.wake); in check_sq_full_and_disable()
1084 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1085 virtqueue_disable_cb(sq->vq); in check_sq_full_and_disable()
1093 sg->dma_address = addr; in sg_fill_dma()
1094 sg->length = len; in sg_fill_dma()
1105 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; in buf_to_xdp()
1109 vi->dev->name, len, bufsize); in buf_to_xdp()
1110 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1124 unsigned int metasize = xdp->data - xdp->data_meta; in xsk_construct_skb()
1128 size = xdp->data_end - xdp->data_hard_start; in xsk_construct_skb()
1129 skb = napi_alloc_skb(&rq->napi, size); in xsk_construct_skb()
1135 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); in xsk_construct_skb()
1137 size = xdp->data_end - xdp->data_meta; in xsk_construct_skb()
1138 memcpy(__skb_put(skb, size), xdp->data_meta, size); in xsk_construct_skb()
1160 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_small()
1176 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_small()
1189 while (num_buf-- > 1) { in xsk_drop_follow_bufs()
1190 xdp = virtqueue_get_buf(rq->vq, &len); in xsk_drop_follow_bufs()
1193 dev->name, num_buf); in xsk_drop_follow_bufs()
1197 u64_stats_add(&stats->bytes, len); in xsk_drop_follow_bufs()
1217 while (--num_buf) { in xsk_append_merge_buffer()
1218 buf = virtqueue_get_buf(rq->vq, &len); in xsk_append_merge_buffer()
1221 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1222 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1223 hdr->num_buffers)); in xsk_append_merge_buffer()
1224 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1225 return -EINVAL; in xsk_append_merge_buffer()
1228 u64_stats_add(&stats->bytes, len); in xsk_append_merge_buffer()
1240 memcpy(buf, xdp->data - vi->hdr_len, len); in xsk_append_merge_buffer()
1259 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1260 return -EINVAL; in xsk_append_merge_buffer()
1273 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1274 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1278 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_merge()
1310 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_merge()
1319 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1324 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1326 u64_stats_add(&stats->bytes, len); in virtnet_receive_xsk_buf()
1333 pr_debug("%s: short packet %i\n", dev->name, len); in virtnet_receive_xsk_buf()
1339 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1341 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1357 int num; in virtnet_add_recvbuf_xsk() local
1359 xsk_buffs = rq->xsk_buffs; in virtnet_add_recvbuf_xsk()
1361 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); in virtnet_add_recvbuf_xsk()
1362 if (!num) in virtnet_add_recvbuf_xsk()
1363 return -ENOMEM; in virtnet_add_recvbuf_xsk()
1365 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1367 for (i = 0; i < num; ++i) { in virtnet_add_recvbuf_xsk()
1369 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. in virtnet_add_recvbuf_xsk()
1372 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1374 sg_init_table(rq->sg, 1); in virtnet_add_recvbuf_xsk()
1375 sg_fill_dma(rq->sg, addr, len); in virtnet_add_recvbuf_xsk()
1377 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp); in virtnet_add_recvbuf_xsk()
1382 return num; in virtnet_add_recvbuf_xsk()
1385 for (; i < num; ++i) in virtnet_add_recvbuf_xsk()
1397 return -ENETDOWN; in virtnet_xsk_wakeup()
1399 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1400 return -EINVAL; in virtnet_xsk_wakeup()
1402 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1404 if (napi_if_scheduled_mark_missed(&sq->napi)) in virtnet_xsk_wakeup()
1408 virtqueue_napi_schedule(&sq->napi, sq->vq); in virtnet_xsk_wakeup()
1423 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1424 return -EOVERFLOW; in __virtnet_xdp_xmit_one()
1428 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one()
1434 * xdp_return_frame(), which will involve to xdpf->data and in __virtnet_xdp_xmit_one()
1435 * xdpf->headroom. Therefore, we need to update the value of in __virtnet_xdp_xmit_one()
1438 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1439 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1441 hdr = xdpf->data; in __virtnet_xdp_xmit_one()
1442 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1443 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1445 sg_init_table(sq->sg, nr_frags + 1); in __virtnet_xdp_xmit_one()
1446 sg_set_buf(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
1448 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one()
1450 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), in __virtnet_xdp_xmit_one()
1454 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, in __virtnet_xdp_xmit_one()
1457 return -ENOSPC; /* Caller handle free/refcnt */ in __virtnet_xdp_xmit_one()
1462 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1467 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1472 struct netdev_queue *txq; \
1476 if (v->curr_queue_pairs > nr_cpu_ids) { \
1477 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1479 txq = netdev_get_tx_queue(v->dev, qp); \
1480 __netif_tx_acquire(txq); \
1482 qp = cpu % v->curr_queue_pairs; \
1483 txq = netdev_get_tx_queue(v->dev, qp); \
1484 __netif_tx_lock(txq, cpu); \
1486 v->sq + qp; \
1490 struct netdev_queue *txq; \
1493 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1494 if (v->curr_queue_pairs > nr_cpu_ids) \
1495 __netif_tx_release(txq); \
1497 __netif_tx_unlock(txq); \
1505 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1516 xdp_prog = rcu_access_pointer(rq->xdp_prog); in virtnet_xdp_xmit()
1518 return -ENXIO; in virtnet_xdp_xmit()
1523 ret = -EINVAL; in virtnet_xdp_xmit()
1528 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1540 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1544 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
1548 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
1549 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xdp_xmit()
1550 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xdp_xmit()
1551 u64_stats_add(&sq->stats.xdp_tx, n); in virtnet_xdp_xmit()
1552 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); in virtnet_xdp_xmit()
1553 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xdp_xmit()
1554 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1568 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags()
1569 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags()
1585 u64_stats_inc(&stats->xdp_packets); in virtnet_xdp_handler()
1592 u64_stats_inc(&stats->xdp_tx); in virtnet_xdp_handler()
1610 u64_stats_inc(&stats->xdp_redirects); in virtnet_xdp_handler()
1631 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1641 * with large buffers with sufficient headroom - so it should affect
1668 while (--*num_buf) { in xdp_linearize_page()
1678 off = buf - page_address(p); in xdp_linearize_page()
1695 *len = page_off - XDP_PACKET_HEADROOM; in xdp_linearize_page()
1713 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1722 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1738 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1748 if (unlikely(hdr->hdr.gso_type)) in receive_small_xdp()
1752 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in receive_small_xdp()
1759 int offset = buf - page_address(page) + header_offset; in receive_small_xdp()
1760 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1765 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1779 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small_xdp()
1780 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1788 len = xdp.data_end - xdp.data; in receive_small_xdp()
1789 metasize = xdp.data - xdp.data_meta; in receive_small_xdp()
1800 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); in receive_small_xdp()
1810 u64_stats_inc(&stats->xdp_drops); in receive_small_xdp()
1812 u64_stats_inc(&stats->drops); in receive_small_xdp()
1830 /* We passed the address of virtnet header to virtio-core, in receive_small()
1833 buf -= VIRTNET_RX_PAD + xdp_headroom; in receive_small()
1835 len -= vi->hdr_len; in receive_small()
1836 u64_stats_add(&stats->bytes, len); in receive_small()
1840 dev->name, len, GOOD_PACKET_LEN); in receive_small()
1845 if (unlikely(vi->xdp_enabled)) { in receive_small()
1849 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_small()
1865 u64_stats_inc(&stats->drops); in receive_small()
1881 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
1888 u64_stats_inc(&stats->drops); in receive_big()
1901 while (num_buf-- > 1) { in mergeable_buf_free()
1905 dev->name, num_buf); in mergeable_buf_free()
1909 u64_stats_add(&stats->bytes, len); in mergeable_buf_free()
1917 * virtio-net there are 2 points that do not match its requirements:
1920 * like eth_type_trans() (which virtio-net does in receive_buf()).
1933 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { in build_skb_from_xdp_buff()
1939 nr_frags = sinfo->nr_frags; in build_skb_from_xdp_buff()
1941 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); in build_skb_from_xdp_buff()
1945 headroom = xdp->data - xdp->data_hard_start; in build_skb_from_xdp_buff()
1946 data_len = xdp->data_end - xdp->data; in build_skb_from_xdp_buff()
1950 metasize = xdp->data - xdp->data_meta; in build_skb_from_xdp_buff()
1957 sinfo->xdp_frags_size, in build_skb_from_xdp_buff()
1986 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in virtnet_build_xdp_buff_mrg()
1987 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, in virtnet_build_xdp_buff_mrg()
1988 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
1994 /* If we want to build multi-buffer xdp, we need in virtnet_build_xdp_buff_mrg()
2002 shinfo->nr_frags = 0; in virtnet_build_xdp_buff_mrg()
2003 shinfo->xdp_frags_size = 0; in virtnet_build_xdp_buff_mrg()
2007 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2009 while (--*num_buf > 0) { in virtnet_build_xdp_buff_mrg()
2013 dev->name, *num_buf, in virtnet_build_xdp_buff_mrg()
2014 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2019 u64_stats_add(&stats->bytes, len); in virtnet_build_xdp_buff_mrg()
2021 offset = buf - page_address(page); in virtnet_build_xdp_buff_mrg()
2030 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { in virtnet_build_xdp_buff_mrg()
2033 dev->name, len, (unsigned long)(truesize - room)); in virtnet_build_xdp_buff_mrg()
2038 frag = &shinfo->frags[shinfo->nr_frags++]; in virtnet_build_xdp_buff_mrg()
2043 shinfo->xdp_frags_size += len; in virtnet_build_xdp_buff_mrg()
2051 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2071 * in-flight packets from before XDP was enabled reach in mergeable_xdp_get_buf()
2074 if (unlikely(hdr->hdr.gso_type)) in mergeable_xdp_get_buf()
2078 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in mergeable_xdp_get_buf()
2089 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { in mergeable_xdp_get_buf()
2101 if (!xdp_prog->aux->xdp_has_frags) { in mergeable_xdp_get_buf()
2143 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2145 int offset = buf - page_address(page); in receive_mergeable_xdp()
2187 u64_stats_inc(&stats->xdp_drops); in receive_mergeable_xdp()
2188 u64_stats_inc(&stats->drops); in receive_mergeable_xdp()
2200 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; in virtnet_skb_append_frag()
2208 skb_shinfo(curr_skb)->frag_list = nskb; in virtnet_skb_append_frag()
2210 curr_skb->next = nskb; in virtnet_skb_append_frag()
2212 head_skb->truesize += nskb->truesize; in virtnet_skb_append_frag()
2217 head_skb->data_len += len; in virtnet_skb_append_frag()
2218 head_skb->len += len; in virtnet_skb_append_frag()
2219 head_skb->truesize += truesize; in virtnet_skb_append_frag()
2222 offset = buf - page_address(page); in virtnet_skb_append_frag()
2225 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, in virtnet_skb_append_frag()
2245 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2247 int offset = buf - page_address(page); in receive_mergeable()
2255 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2257 if (unlikely(len > truesize - room)) { in receive_mergeable()
2259 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2264 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2268 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_mergeable()
2283 while (--num_buf) { in receive_mergeable()
2287 dev->name, num_buf, in receive_mergeable()
2288 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2289 hdr->num_buffers)); in receive_mergeable()
2294 u64_stats_add(&stats->bytes, len); in receive_mergeable()
2301 if (unlikely(len > truesize - room)) { in receive_mergeable()
2303 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2314 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
2322 u64_stats_inc(&stats->drops); in receive_mergeable()
2335 switch (__le16_to_cpu(hdr_hash->hash_report)) { in virtio_skb_set_hash()
2353 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); in virtio_skb_set_hash()
2360 struct net_device *dev = vi->dev; in virtnet_receive_done()
2363 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2364 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); in virtnet_receive_done()
2367 skb->ip_summed = CHECKSUM_UNNECESSARY; in virtnet_receive_done()
2369 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, in virtnet_receive_done()
2370 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2372 dev->name, hdr->hdr.gso_type, in virtnet_receive_done()
2373 hdr->hdr.gso_size); in virtnet_receive_done()
2377 skb_record_rx_queue(skb, vq2rxq(rq->vq)); in virtnet_receive_done()
2378 skb->protocol = eth_type_trans(skb, dev); in virtnet_receive_done()
2380 ntohs(skb->protocol), skb->len, skb->pkt_type); in virtnet_receive_done()
2382 napi_gro_receive(&rq->napi, skb); in virtnet_receive_done()
2395 struct net_device *dev = vi->dev; in receive_buf()
2399 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2400 pr_debug("%s: short packet %i\n", dev->name, len); in receive_buf()
2413 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2415 if (vi->mergeable_rx_bufs) in receive_buf()
2418 else if (vi->big_packets) in receive_buf()
2431 * not need to use mergeable_len_to_ctx here - it is enough
2440 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2448 return -ENOMEM; in add_recvbuf_small()
2452 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2454 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_small()
2456 if (rq->do_dma) in add_recvbuf_small()
2471 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2473 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2474 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2479 return -ENOMEM; in add_recvbuf_big()
2481 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
2484 first->private = (unsigned long)list; in add_recvbuf_big()
2491 return -ENOMEM; in add_recvbuf_big()
2495 /* rq->sg[0], rq->sg[1] share the same page */ in add_recvbuf_big()
2496 /* a separated rq->sg[0] for header - required in case !any_header_sg */ in add_recvbuf_big()
2497 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2499 /* rq->sg[1] for data packet, from offset */ in add_recvbuf_big()
2501 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
2504 first->private = (unsigned long)list; in add_recvbuf_big()
2505 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2517 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len()
2518 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2522 return PAGE_SIZE - room; in get_mergeable_buf_len()
2525 rq->min_buf_len, PAGE_SIZE - hdr_len); in get_mergeable_buf_len()
2533 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
2546 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); in add_recvbuf_mergeable()
2550 return -ENOMEM; in add_recvbuf_mergeable()
2553 hole = alloc_frag->size - alloc_frag->offset; in add_recvbuf_mergeable()
2563 alloc_frag->offset += hole; in add_recvbuf_mergeable()
2569 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_mergeable()
2571 if (rq->do_dma) in add_recvbuf_mergeable()
2591 if (rq->xsk_pool) { in try_fill_recv()
2592 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2597 if (vi->mergeable_rx_bufs) in try_fill_recv()
2599 else if (vi->big_packets) in try_fill_recv()
2606 } while (rq->vq->num_free); in try_fill_recv()
2609 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { in try_fill_recv()
2612 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); in try_fill_recv()
2613 u64_stats_inc(&rq->stats.kicks); in try_fill_recv()
2614 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); in try_fill_recv()
2617 return err != -ENOMEM; in try_fill_recv()
2622 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done()
2623 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2625 rq->calls++; in skb_recv_done()
2626 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
2646 if (!napi->weight) in virtnet_napi_tx_enable()
2652 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2653 napi->weight = 0; in virtnet_napi_tx_enable()
2662 if (napi->weight) in virtnet_napi_tx_disable()
2673 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2674 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2676 napi_disable(&rq->napi); in refill_work()
2678 virtnet_napi_enable(rq->vq, &rq->napi); in refill_work()
2684 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2699 buf = virtqueue_get_buf(rq->vq, &len); in virtnet_receive_xsk_bufs()
2720 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
2741 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
2745 if (rq->xsk_pool) in virtnet_receive()
2750 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { in virtnet_receive()
2752 spin_lock(&vi->refill_lock); in virtnet_receive()
2753 if (vi->refill_enabled) in virtnet_receive()
2754 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2755 spin_unlock(&vi->refill_lock); in virtnet_receive()
2760 u64_stats_update_begin(&rq->stats.syncp); in virtnet_receive()
2765 item = (u64_stats_t *)((u8 *)&rq->stats + offset); in virtnet_receive()
2770 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); in virtnet_receive()
2771 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); in virtnet_receive()
2773 u64_stats_update_end(&rq->stats.syncp); in virtnet_receive()
2780 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx()
2781 unsigned int index = vq2rxq(rq->vq); in virtnet_poll_cleantx()
2782 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2783 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx() local
2785 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
2788 if (__netif_tx_trylock(txq)) { in virtnet_poll_cleantx()
2789 if (sq->reset) { in virtnet_poll_cleantx()
2790 __netif_tx_unlock(txq); in virtnet_poll_cleantx()
2795 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
2796 free_old_xmit(sq, txq, !!budget); in virtnet_poll_cleantx()
2797 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
2799 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_cleantx()
2800 if (netif_tx_queue_stopped(txq)) { in virtnet_poll_cleantx()
2801 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_cleantx()
2802 u64_stats_inc(&sq->stats.wake); in virtnet_poll_cleantx()
2803 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_cleantx()
2805 netif_tx_wake_queue(txq); in virtnet_poll_cleantx()
2808 __netif_tx_unlock(txq); in virtnet_poll_cleantx()
2816 if (!rq->packets_in_napi) in virtnet_rx_dim_update()
2822 dim_update_sample(rq->calls, in virtnet_rx_dim_update()
2823 u64_stats_read(&rq->stats.packets), in virtnet_rx_dim_update()
2824 u64_stats_read(&rq->stats.bytes), in virtnet_rx_dim_update()
2827 net_dim(&rq->dim, cur_sample); in virtnet_rx_dim_update()
2828 rq->packets_in_napi = 0; in virtnet_rx_dim_update()
2835 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
2844 rq->packets_in_napi += received; in virtnet_poll()
2851 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
2856 if (napi_complete && rq->dim_enabled) in virtnet_poll()
2862 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
2863 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
2864 u64_stats_inc(&sq->stats.kicks); in virtnet_poll()
2865 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
2875 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); in virtnet_disable_queue_pair()
2876 napi_disable(&vi->rq[qp_index].napi); in virtnet_disable_queue_pair()
2877 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
2882 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
2885 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
2886 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
2890 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
2895 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index)); in virtnet_enable_queue_pair()
2896 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); in virtnet_enable_queue_pair()
2897 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); in virtnet_enable_queue_pair()
2902 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
2908 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
2918 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
2921 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
2924 vi->speed = speed; in virtnet_update_settings()
2926 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
2929 vi->duplex = duplex; in virtnet_update_settings()
2939 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
2940 if (i < vi->curr_queue_pairs) in virtnet_open()
2942 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
2943 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
2950 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
2951 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
2952 netif_carrier_on(vi->dev); in virtnet_open()
2953 virtio_config_driver_enable(vi->vdev); in virtnet_open()
2955 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
2963 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
2965 for (i--; i >= 0; i--) { in virtnet_open()
2967 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
2976 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
2977 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
2978 struct netdev_queue *txq; in virtnet_poll_tx() local
2988 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
2989 __netif_tx_lock(txq, raw_smp_processor_id()); in virtnet_poll_tx()
2990 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
2991 free_old_xmit(sq, txq, !!budget); in virtnet_poll_tx()
2993 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_tx()
2994 if (netif_tx_queue_stopped(txq)) { in virtnet_poll_tx()
2995 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_tx()
2996 u64_stats_inc(&sq->stats.wake); in virtnet_poll_tx()
2997 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_tx()
2999 netif_tx_wake_queue(txq); in virtnet_poll_tx()
3002 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
3007 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3009 __netif_tx_unlock(txq); in virtnet_poll_tx()
3012 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
3014 __netif_tx_lock(txq, raw_smp_processor_id()); in virtnet_poll_tx()
3015 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3016 __netif_tx_unlock(txq); in virtnet_poll_tx()
3028 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; in xmit_skb()
3029 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
3031 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3034 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3036 can_push = vi->any_header_sg && in xmit_skb()
3037 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && in xmit_skb()
3042 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); in xmit_skb()
3044 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; in xmit_skb()
3046 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, in xmit_skb()
3047 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
3049 return -EPROTO; in xmit_skb()
3051 if (vi->mergeable_rx_bufs) in xmit_skb()
3052 hdr->num_buffers = 0; in xmit_skb()
3054 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
3057 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
3063 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
3064 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
3069 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, in xmit_skb()
3077 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3079 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); in start_xmit() local
3081 bool use_napi = sq->napi.weight; in start_xmit()
3087 virtqueue_disable_cb(sq->vq); in start_xmit()
3089 free_old_xmit(sq, txq, false); in start_xmit()
3092 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in start_xmit()
3104 dev_warn(&dev->dev, in start_xmit()
3105 "Unexpected TXQ (%d) queue failure: %d\n", in start_xmit()
3120 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : in start_xmit()
3121 !xmit_more || netif_xmit_stopped(txq); in start_xmit()
3123 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
3124 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
3125 u64_stats_inc(&sq->stats.kicks); in start_xmit()
3126 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
3135 bool running = netif_running(vi->dev); in virtnet_rx_pause()
3138 napi_disable(&rq->napi); in virtnet_rx_pause()
3139 virtnet_cancel_dim(vi, &rq->dim); in virtnet_rx_pause()
3145 bool running = netif_running(vi->dev); in virtnet_rx_resume()
3148 schedule_delayed_work(&vi->refill, 0); in virtnet_rx_resume()
3151 virtnet_napi_enable(rq->vq, &rq->napi); in virtnet_rx_resume()
3159 qindex = rq - vi->rq; in virtnet_rx_resize()
3163 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); in virtnet_rx_resize()
3165 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3173 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3174 struct netdev_queue *txq; in virtnet_tx_pause() local
3177 qindex = sq - vi->sq; in virtnet_tx_pause()
3180 virtnet_napi_tx_disable(&sq->napi); in virtnet_tx_pause()
3182 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3187 __netif_tx_lock_bh(txq); in virtnet_tx_pause()
3190 sq->reset = true; in virtnet_tx_pause()
3193 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3195 __netif_tx_unlock_bh(txq); in virtnet_tx_pause()
3200 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3201 struct netdev_queue *txq; in virtnet_tx_resume() local
3204 qindex = sq - vi->sq; in virtnet_tx_resume()
3206 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3208 __netif_tx_lock_bh(txq); in virtnet_tx_resume()
3209 sq->reset = false; in virtnet_tx_resume()
3210 netif_tx_wake_queue(txq); in virtnet_tx_resume()
3211 __netif_tx_unlock_bh(txq); in virtnet_tx_resume()
3214 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); in virtnet_tx_resume()
3222 qindex = sq - vi->sq; in virtnet_tx_resize()
3226 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); in virtnet_tx_resize()
3228 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3250 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3252 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3253 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3254 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3255 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3257 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3264 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3271 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3273 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3275 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3279 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3285 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3286 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3292 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3293 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3306 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3311 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3312 return -EOPNOTSUPP; in virtnet_set_mac_address()
3316 return -ENOMEM; in virtnet_set_mac_address()
3323 sg_init_one(&sg, addr->sa_data, dev->addr_len); in virtnet_set_mac_address()
3326 dev_warn(&vdev->dev, in virtnet_set_mac_address()
3328 ret = -EINVAL; in virtnet_set_mac_address()
3336 for (i = 0; i < dev->addr_len; i++) in virtnet_set_mac_address()
3339 i, addr->sa_data[i]); in virtnet_set_mac_address()
3357 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3359 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3360 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3363 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_stats()
3364 tpackets = u64_stats_read(&sq->stats.packets); in virtnet_stats()
3365 tbytes = u64_stats_read(&sq->stats.bytes); in virtnet_stats()
3366 terrors = u64_stats_read(&sq->stats.tx_timeouts); in virtnet_stats()
3367 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_stats()
3370 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_stats()
3371 rpackets = u64_stats_read(&rq->stats.packets); in virtnet_stats()
3372 rbytes = u64_stats_read(&rq->stats.bytes); in virtnet_stats()
3373 rdrops = u64_stats_read(&rq->stats.drops); in virtnet_stats()
3374 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_stats()
3376 tot->rx_packets += rpackets; in virtnet_stats()
3377 tot->tx_packets += tpackets; in virtnet_stats()
3378 tot->rx_bytes += rbytes; in virtnet_stats()
3379 tot->tx_bytes += tbytes; in virtnet_stats()
3380 tot->rx_dropped += rdrops; in virtnet_stats()
3381 tot->tx_errors += terrors; in virtnet_stats()
3384 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); in virtnet_stats()
3385 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); in virtnet_stats()
3386 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); in virtnet_stats()
3387 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); in virtnet_stats()
3394 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3404 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3406 vi->rss.indirection_table[i] = indir_val; in virtnet_rss_update_by_qpairs()
3408 vi->rss.max_tx_vq = queue_pairs; in virtnet_rss_update_by_qpairs()
3415 struct net_device *dev = vi->dev; in virtnet_set_queues()
3418 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3428 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3429 memcpy(&old_rss, &vi->rss, sizeof(old_rss)); in virtnet_set_queues()
3430 if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) { in virtnet_set_queues()
3431 vi->rss.indirection_table = old_rss.indirection_table; in virtnet_set_queues()
3432 return -ENOMEM; in virtnet_set_queues()
3439 rss_indirection_table_free(&vi->rss); in virtnet_set_queues()
3440 memcpy(&vi->rss, &old_rss, sizeof(old_rss)); in virtnet_set_queues()
3442 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", in virtnet_set_queues()
3444 return -EINVAL; in virtnet_set_queues()
3452 return -ENOMEM; in virtnet_set_queues()
3454 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3459 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", in virtnet_set_queues()
3461 return -EINVAL; in virtnet_set_queues()
3464 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3466 if (dev->flags & IFF_UP) in virtnet_set_queues()
3467 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3479 /* Make sure refill_work doesn't re-enable napi! */ in virtnet_close()
3480 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3484 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3488 cancel_work_sync(&vi->config_work); in virtnet_close()
3490 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3492 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3505 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3515 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3520 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); in virtnet_rx_mode_work()
3526 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); in virtnet_rx_mode_work()
3531 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", in virtnet_rx_mode_work()
3534 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); in virtnet_rx_mode_work()
3539 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", in virtnet_rx_mode_work()
3546 /* MAC filter - use one buffer for both lists */ in virtnet_rx_mode_work()
3548 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); in virtnet_rx_mode_work()
3559 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3562 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3565 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3568 mac_data = (void *)&mac_data->macs[uc_count][0]; in virtnet_rx_mode_work()
3570 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3573 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3578 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3582 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); in virtnet_rx_mode_work()
3593 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3594 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3606 return -ENOMEM; in virtnet_vlan_rx_add_vid()
3608 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3613 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); in virtnet_vlan_rx_add_vid()
3626 return -ENOMEM; in virtnet_vlan_rx_kill_vid()
3628 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3633 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); in virtnet_vlan_rx_kill_vid()
3641 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3642 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3643 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3644 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
3647 vi->affinity_hint_set = false; in virtnet_clean_affinity()
3666 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
3667 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
3668 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
3672 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
3680 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
3681 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
3682 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
3686 vi->affinity_hint_set = true; in virtnet_set_affinity()
3721 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3725 &vi->node_dead); in virtnet_cpu_notif_add()
3728 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3734 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
3736 &vi->node_dead); in virtnet_cpu_notif_remove()
3747 return -ENOMEM; in virtnet_send_ctrl_coal_vq_cmd()
3749 coal_vq->vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
3750 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
3751 coal_vq->coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
3757 return -EINVAL; in virtnet_send_ctrl_coal_vq_cmd()
3768 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
3769 return -EOPNOTSUPP; in virtnet_send_rx_ctrl_coal_vq_cmd()
3776 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
3777 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
3788 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
3789 return -EOPNOTSUPP; in virtnet_send_tx_ctrl_coal_vq_cmd()
3796 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
3797 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
3809 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
3810 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
3811 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
3812 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
3826 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in virtnet_set_ringparam()
3827 return -EINVAL; in virtnet_set_ringparam()
3829 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
3830 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
3832 if (ring->rx_pending == rx_pending && in virtnet_set_ringparam()
3833 ring->tx_pending == tx_pending) in virtnet_set_ringparam()
3836 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
3837 return -EINVAL; in virtnet_set_ringparam()
3839 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
3840 return -EINVAL; in virtnet_set_ringparam()
3842 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
3843 rq = vi->rq + i; in virtnet_set_ringparam()
3844 sq = vi->sq + i; in virtnet_set_ringparam()
3846 if (ring->tx_pending != tx_pending) { in virtnet_set_ringparam()
3847 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
3851 /* Upon disabling and re-enabling a transmit virtqueue, the device must in virtnet_set_ringparam()
3857 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
3858 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
3863 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
3867 if (ring->rx_pending != rx_pending) { in virtnet_set_ringparam()
3868 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
3873 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
3875 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
3876 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
3877 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
3878 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
3888 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
3896 sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); in virtnet_commit_rss_command()
3898 if (vi->has_rss) { in virtnet_commit_rss_command()
3899 sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size; in virtnet_commit_rss_command()
3900 sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); in virtnet_commit_rss_command()
3902 sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t)); in virtnet_commit_rss_command()
3906 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); in virtnet_commit_rss_command()
3907 sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); in virtnet_commit_rss_command()
3909 sg_buf_size = vi->rss_key_size; in virtnet_commit_rss_command()
3910 sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); in virtnet_commit_rss_command()
3913 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
3920 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); in virtnet_commit_rss_command()
3927 vi->rss.hash_types = vi->rss_hash_types_supported; in virtnet_init_default_rss()
3928 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
3929 vi->rss.indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
3930 ? vi->rss_indir_table_size - 1 : 0; in virtnet_init_default_rss()
3931 vi->rss.unclassified_queue = 0; in virtnet_init_default_rss()
3933 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
3935 vi->rss.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
3937 netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); in virtnet_init_default_rss()
3942 info->data = 0; in virtnet_get_hashflow()
3943 switch (info->flow_type) { in virtnet_get_hashflow()
3945 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
3946 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
3948 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
3949 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3953 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
3954 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
3956 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
3957 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3961 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
3962 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
3964 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
3965 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3969 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
3970 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
3972 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
3973 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3977 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
3978 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3982 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
3983 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
3987 info->data = 0; in virtnet_get_hashflow()
3994 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
3995 bool is_disable = info->data & RXH_DISCARD; in virtnet_set_hashflow()
3996 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); in virtnet_set_hashflow()
3999 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) in virtnet_set_hashflow()
4002 switch (info->flow_type) { in virtnet_set_hashflow()
4043 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4046 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4047 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4048 vi->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4049 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4060 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4062 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in virtnet_get_drvinfo()
4063 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); in virtnet_get_drvinfo()
4064 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); in virtnet_get_drvinfo()
4073 u16 queue_pairs = channels->combined_count; in virtnet_set_channels()
4079 if (channels->rx_count || channels->tx_count || channels->other_count) in virtnet_set_channels()
4080 return -EINVAL; in virtnet_set_channels()
4082 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4083 return -EINVAL; in virtnet_set_channels()
4089 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4090 return -EINVAL; in virtnet_set_channels()
4108 int num, int qid, const struct virtnet_stat_desc *desc) in virtnet_stats_sprintf() argument
4113 for (i = 0; i < num; ++i) in virtnet_stats_sprintf()
4116 for (i = 0; i < num; ++i) in virtnet_stats_sprintf()
4121 /* qid == -1: for rx/tx queue total field */
4127 u32 num; in virtnet_get_stats_string() local
4132 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4134 num = ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_get_stats_string()
4136 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); in virtnet_get_stats_string()
4145 num = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_get_stats_string()
4147 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4152 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4154 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_get_stats_string()
4156 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4159 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4161 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_get_stats_string()
4163 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4166 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4168 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_get_stats_string()
4170 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4179 num = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_get_stats_string()
4181 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4186 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4188 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_get_stats_string()
4190 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4193 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4195 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_get_stats_string()
4197 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4200 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4202 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_get_stats_string()
4204 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4212 /* The stats are write to qstats or ethtool -S */
4234 ctx->data = data; in virtnet_stats_ctx_init()
4235 ctx->to_qstat = to_qstat; in virtnet_stats_ctx_init()
4238 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_stats_ctx_init()
4239 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_stats_ctx_init()
4243 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4244 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4245 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_stats_ctx_init()
4246 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4249 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4250 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4251 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_stats_ctx_init()
4252 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4255 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4256 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; in virtnet_stats_ctx_init()
4257 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_stats_ctx_init()
4258 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); in virtnet_stats_ctx_init()
4261 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4262 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4263 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_stats_ctx_init()
4264 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4269 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4270 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4271 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_stats_ctx_init()
4272 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4275 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4276 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; in virtnet_stats_ctx_init()
4277 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_stats_ctx_init()
4278 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); in virtnet_stats_ctx_init()
4281 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4282 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4283 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_stats_ctx_init()
4284 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4287 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4288 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4289 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_stats_ctx_init()
4290 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4296 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_stats_ctx_init()
4297 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_stats_ctx_init()
4299 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4302 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; in virtnet_stats_ctx_init()
4303 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_stats_ctx_init()
4304 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); in virtnet_stats_ctx_init()
4309 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4310 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4311 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_stats_ctx_init()
4312 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4315 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4316 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4317 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_stats_ctx_init()
4318 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4321 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4322 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4323 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_stats_ctx_init()
4324 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4329 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4330 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4331 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_stats_ctx_init()
4332 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4335 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4336 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4337 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_stats_ctx_init()
4338 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4341 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4342 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4343 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_stats_ctx_init()
4344 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4348 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4350 * @num: field num
4354 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num) in stats_sum_queue() argument
4356 u32 step = num; in stats_sum_queue()
4360 for (i = 0; i < num; ++i) { in stats_sum_queue()
4375 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_total_fields()
4376 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_total_fields()
4377 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_total_fields()
4379 first_rx_q = ctx->data + num_rx + num_tx + num_cq; in virtnet_fill_total_fields()
4380 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4382 data = ctx->data; in virtnet_fill_total_fields()
4384 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4386 data = ctx->data + num_rx; in virtnet_fill_total_fields()
4388 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4400 int i, num; in virtnet_fill_stats_qstat() local
4403 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats_qstat()
4408 num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_fill_stats_qstat()
4411 num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_fill_stats_qstat()
4414 for (i = 0; i < num; ++i) { in virtnet_fill_stats_qstat()
4415 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4417 ctx->data[offset] = u64_stats_read(v_stat); in virtnet_fill_stats_qstat()
4424 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_fill_stats_qstat()
4431 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_fill_stats_qstat()
4438 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_fill_stats_qstat()
4445 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_fill_stats_qstat()
4452 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_fill_stats_qstat()
4459 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_fill_stats_qstat()
4466 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_fill_stats_qstat()
4473 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_fill_stats_qstat()
4481 for (i = 0; i < num; ++i) { in virtnet_fill_stats_qstat()
4482 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4484 ctx->data[offset] = le64_to_cpu(*v); in virtnet_fill_stats_qstat()
4488 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4507 int i, num; in virtnet_fill_stats() local
4509 if (ctx->to_qstat) in virtnet_fill_stats()
4512 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_stats()
4513 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_stats()
4514 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_stats()
4517 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats()
4523 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4525 num = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_fill_stats()
4531 offset += num; in virtnet_fill_stats()
4536 num = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_fill_stats()
4542 offset += num; in virtnet_fill_stats()
4547 num = ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_fill_stats()
4551 offset += num; in virtnet_fill_stats()
4556 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_fill_stats()
4560 offset += num; in virtnet_fill_stats()
4565 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_fill_stats()
4569 offset += num; in virtnet_fill_stats()
4574 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_fill_stats()
4578 offset += num; in virtnet_fill_stats()
4583 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_fill_stats()
4587 offset += num; in virtnet_fill_stats()
4592 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_fill_stats()
4596 offset += num; in virtnet_fill_stats()
4601 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_fill_stats()
4605 offset += num; in virtnet_fill_stats()
4611 for (i = 0; i < num; ++i) { in virtnet_fill_stats()
4613 ctx->data[offset + i] = le64_to_cpu(*v); in virtnet_fill_stats()
4619 for (i = 0; i < num; ++i) { in virtnet_fill_stats()
4621 ctx->data[offset + i] = u64_stats_read(v_stat); in virtnet_fill_stats()
4646 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { in __virtnet_get_hw_stats()
4648 qid = le16_to_cpu(hdr->vq_index); in __virtnet_get_hw_stats()
4649 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
4661 u64 bitmap = ctx->bitmap[qtype]; in virtnet_make_stat_req()
4666 req->stats[*idx].vq_index = cpu_to_le16(qid); in virtnet_make_stat_req()
4667 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); in virtnet_make_stat_req()
4671 /* qid: -1: get stats of all vq.
4683 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
4686 if (qid == -1) { in virtnet_get_hw_stats()
4687 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
4700 if (ctx->bitmap[qtype]) { in virtnet_get_hw_stats()
4702 res_size += ctx->size[qtype]; in virtnet_get_hw_stats()
4706 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { in virtnet_get_hw_stats()
4707 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; in virtnet_get_hw_stats()
4713 return -ENOMEM; in virtnet_get_hw_stats()
4718 return -ENOMEM; in virtnet_get_hw_stats()
4726 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
4745 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
4746 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
4750 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4753 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4772 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
4774 return -EOPNOTSUPP; in virtnet_get_sset_count()
4787 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
4788 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
4790 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
4791 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
4792 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
4794 stats_base = (const u8 *)&rq->stats; in virtnet_get_ethtool_stats()
4796 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_get_ethtool_stats()
4798 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_get_ethtool_stats()
4800 stats_base = (const u8 *)&sq->stats; in virtnet_get_ethtool_stats()
4802 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_get_ethtool_stats()
4804 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
4815 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
4816 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
4817 channels->max_other = 0; in virtnet_get_channels()
4818 channels->rx_count = 0; in virtnet_get_channels()
4819 channels->tx_count = 0; in virtnet_get_channels()
4820 channels->other_count = 0; in virtnet_get_channels()
4829 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
4837 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
4838 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
4839 cmd->base.port = PORT_OTHER; in virtnet_get_link_ksettings()
4853 return -ENOMEM; in virtnet_send_tx_notf_coal_cmds()
4855 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
4856 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
4862 return -EINVAL; in virtnet_send_tx_notf_coal_cmds()
4864 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
4865 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
4866 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
4867 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
4868 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
4878 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_cmds()
4882 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
4883 return -EOPNOTSUPP; in virtnet_send_rx_notf_coal_cmds()
4885 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
4886 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
4887 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
4889 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
4890 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
4891 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
4892 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4893 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
4894 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4901 return -ENOMEM; in virtnet_send_rx_notf_coal_cmds()
4903 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
4904 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
4905 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
4906 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4907 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
4908 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4912 /* Since the per-queue coalescing params can be set, in virtnet_send_rx_notf_coal_cmds()
4916 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
4917 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
4923 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
4925 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
4926 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
4927 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
4928 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4929 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
4930 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
4931 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
4957 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_vq_cmds()
4962 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
4963 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
4964 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
4965 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
4967 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || in virtnet_send_rx_notf_coal_vq_cmds()
4968 ec->rx_max_coalesced_frames != max_packets)) { in virtnet_send_rx_notf_coal_vq_cmds()
4969 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
4970 return -EINVAL; in virtnet_send_rx_notf_coal_vq_cmds()
4974 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
4975 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
4980 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
4986 ec->rx_coalesce_usecs, in virtnet_send_rx_notf_coal_vq_cmds()
4987 ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_vq_cmds()
4988 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5003 ec->tx_coalesce_usecs, in virtnet_send_notf_coal_vq_cmds()
5004 ec->tx_max_coalesced_frames); in virtnet_send_notf_coal_vq_cmds()
5016 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work()
5017 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5021 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5023 mutex_lock(&rq->dim_lock); in virtnet_rx_dim_work()
5024 if (!rq->dim_enabled) in virtnet_rx_dim_work()
5028 if (update_moder.usec != rq->intr_coal.max_usecs || in virtnet_rx_dim_work()
5029 update_moder.pkts != rq->intr_coal.max_packets) { in virtnet_rx_dim_work()
5035 dev->name, qnum); in virtnet_rx_dim_work()
5038 dim->state = DIM_START_MEASURE; in virtnet_rx_dim_work()
5039 mutex_unlock(&rq->dim_lock); in virtnet_rx_dim_work()
5047 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) in virtnet_coal_params_supported()
5048 return -EOPNOTSUPP; in virtnet_coal_params_supported()
5050 if (ec->tx_max_coalesced_frames > 1 || in virtnet_coal_params_supported()
5051 ec->rx_max_coalesced_frames != 1) in virtnet_coal_params_supported()
5052 return -EINVAL; in virtnet_coal_params_supported()
5062 return -EBUSY; in virtnet_should_update_vq_weight()
5079 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_coalesce()
5080 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5081 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_coalesce()
5082 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5088 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5095 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5104 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5105 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5118 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5119 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5120 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5121 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5122 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5123 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5125 ec->rx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5127 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5128 ec->tx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5142 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5143 return -EINVAL; in virtnet_set_per_queue_coalesce()
5146 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_per_queue_coalesce()
5147 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_per_queue_coalesce()
5148 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5162 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5173 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5174 return -EINVAL; in virtnet_get_per_queue_coalesce()
5176 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5177 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5178 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5179 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5180 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5181 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5182 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5183 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5185 ec->rx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5187 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5188 ec->tx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5198 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5199 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5204 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; in virtnet_get_rxfh_key_size()
5209 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; in virtnet_get_rxfh_indir_size()
5218 if (rxfh->indir) { in virtnet_get_rxfh()
5219 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5220 rxfh->indir[i] = vi->rss.indirection_table[i]; in virtnet_get_rxfh()
5223 if (rxfh->key) in virtnet_get_rxfh()
5224 memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); in virtnet_get_rxfh()
5226 rxfh->hfunc = ETH_RSS_HASH_TOP; in virtnet_get_rxfh()
5239 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in virtnet_set_rxfh()
5240 rxfh->hfunc != ETH_RSS_HASH_TOP) in virtnet_set_rxfh()
5241 return -EOPNOTSUPP; in virtnet_set_rxfh()
5243 if (rxfh->indir) { in virtnet_set_rxfh()
5244 if (!vi->has_rss) in virtnet_set_rxfh()
5245 return -EOPNOTSUPP; in virtnet_set_rxfh()
5247 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5248 vi->rss.indirection_table[i] = rxfh->indir[i]; in virtnet_set_rxfh()
5252 if (rxfh->key) { in virtnet_set_rxfh()
5257 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5258 return -EOPNOTSUPP; in virtnet_set_rxfh()
5260 memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5275 switch (info->cmd) { in virtnet_get_rxnfc()
5277 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
5283 rc = -EOPNOTSUPP; in virtnet_get_rxnfc()
5294 switch (info->cmd) { in virtnet_set_rxnfc()
5297 rc = -EINVAL; in virtnet_set_rxnfc()
5301 rc = -EOPNOTSUPP; in virtnet_set_rxnfc()
5338 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5344 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5351 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5357 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5366 /* The queue stats of the virtio-net will not be reset. So here we in virtnet_get_base_stats()
5369 rx->bytes = 0; in virtnet_get_base_stats()
5370 rx->packets = 0; in virtnet_get_base_stats()
5372 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5373 rx->hw_drops = 0; in virtnet_get_base_stats()
5374 rx->hw_drop_overruns = 0; in virtnet_get_base_stats()
5377 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5378 rx->csum_unnecessary = 0; in virtnet_get_base_stats()
5379 rx->csum_none = 0; in virtnet_get_base_stats()
5380 rx->csum_bad = 0; in virtnet_get_base_stats()
5383 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5384 rx->hw_gro_packets = 0; in virtnet_get_base_stats()
5385 rx->hw_gro_bytes = 0; in virtnet_get_base_stats()
5386 rx->hw_gro_wire_packets = 0; in virtnet_get_base_stats()
5387 rx->hw_gro_wire_bytes = 0; in virtnet_get_base_stats()
5390 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5391 rx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5393 tx->bytes = 0; in virtnet_get_base_stats()
5394 tx->packets = 0; in virtnet_get_base_stats()
5395 tx->stop = 0; in virtnet_get_base_stats()
5396 tx->wake = 0; in virtnet_get_base_stats()
5398 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5399 tx->hw_drops = 0; in virtnet_get_base_stats()
5400 tx->hw_drop_errors = 0; in virtnet_get_base_stats()
5403 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5404 tx->csum_none = 0; in virtnet_get_base_stats()
5405 tx->needs_csum = 0; in virtnet_get_base_stats()
5408 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5409 tx->hw_gso_packets = 0; in virtnet_get_base_stats()
5410 tx->hw_gso_bytes = 0; in virtnet_get_base_stats()
5411 tx->hw_gso_wire_packets = 0; in virtnet_get_base_stats()
5412 tx->hw_gso_wire_bytes = 0; in virtnet_get_base_stats()
5415 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5416 tx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5427 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down()
5430 flush_work(&vi->config_work); in virtnet_freeze_down()
5432 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5434 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5435 netif_device_detach(vi->dev); in virtnet_freeze_down()
5436 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5437 if (netif_running(vi->dev)) in virtnet_freeze_down()
5438 virtnet_close(vi->dev); in virtnet_freeze_down()
5445 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up()
5457 if (netif_running(vi->dev)) { in virtnet_restore_up()
5458 err = virtnet_open(vi->dev); in virtnet_restore_up()
5463 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5464 netif_device_attach(vi->dev); in virtnet_restore_up()
5465 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5476 return -ENOMEM; in virtnet_set_guest_offloads()
5478 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5484 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5485 return -EINVAL; in virtnet_set_guest_offloads()
5495 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5503 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5505 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5516 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5519 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5523 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, in virtnet_rq_bind_xsk_pool()
5528 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5533 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf); in virtnet_rq_bind_xsk_pool()
5535 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5540 rq->xsk_pool = pool; in virtnet_rq_bind_xsk_pool()
5548 xdp_rxq_info_unreg(&rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5562 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5563 return -EINVAL; in virtnet_xsk_pool_enable()
5568 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5569 return -ENOENT; in virtnet_xsk_pool_enable()
5571 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5572 return -EINVAL; in virtnet_xsk_pool_enable()
5574 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5575 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5577 /* xsk assumes that tx and rx must have the same dma device. The af-xdp in virtnet_xsk_pool_enable()
5581 * But vq->dma_dev allows every vq has the respective dma dev. So I in virtnet_xsk_pool_enable()
5584 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) in virtnet_xsk_pool_enable()
5585 return -EINVAL; in virtnet_xsk_pool_enable()
5587 dma_dev = virtqueue_dma_dev(rq->vq); in virtnet_xsk_pool_enable()
5589 return -EINVAL; in virtnet_xsk_pool_enable()
5591 size = virtqueue_get_vring_size(rq->vq); in virtnet_xsk_pool_enable()
5593 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); in virtnet_xsk_pool_enable()
5594 if (!rq->xsk_buffs) in virtnet_xsk_pool_enable()
5595 return -ENOMEM; in virtnet_xsk_pool_enable()
5620 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
5621 return -EINVAL; in virtnet_xsk_pool_disable()
5623 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
5625 pool = rq->xsk_pool; in virtnet_xsk_pool_disable()
5631 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_disable()
5638 if (xdp->xsk.pool) in virtnet_xsk_pool_setup()
5639 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, in virtnet_xsk_pool_setup()
5640 xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5642 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5650 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; in virtnet_xdp_set()
5656 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
5657 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
5658 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
5659 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
5660 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
5661 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
5662 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
5663 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
5665 return -EOPNOTSUPP; in virtnet_xdp_set()
5668 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
5670 return -EINVAL; in virtnet_xdp_set()
5673 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { in virtnet_xdp_set()
5675 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); in virtnet_xdp_set()
5676 return -EINVAL; in virtnet_xdp_set()
5679 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
5684 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
5686 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
5690 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
5695 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
5699 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5700 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
5701 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
5706 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5707 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5718 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
5721 vi->xdp_enabled = true; in virtnet_xdp_set()
5722 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5723 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5731 vi->xdp_enabled = false; in virtnet_xdp_set()
5734 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5738 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5739 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5740 &vi->sq[i].napi); in virtnet_xdp_set()
5749 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
5750 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
5754 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5755 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5756 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5757 &vi->sq[i].napi); in virtnet_xdp_set()
5761 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
5767 switch (xdp->command) { in virtnet_xdp()
5769 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); in virtnet_xdp()
5773 return -EINVAL; in virtnet_xdp()
5783 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
5784 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
5788 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
5800 if ((dev->features ^ features) & NETIF_F_GRO_HW) { in virtnet_set_features()
5801 if (vi->xdp_enabled) in virtnet_set_features()
5802 return -EBUSY; in virtnet_set_features()
5805 offloads = vi->guest_offloads_capable; in virtnet_set_features()
5807 offloads = vi->guest_offloads_capable & in virtnet_set_features()
5813 vi->guest_offloads = offloads; in virtnet_set_features()
5816 if ((dev->features ^ features) & NETIF_F_RXHASH) { in virtnet_set_features()
5818 vi->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_features()
5820 vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; in virtnet_set_features()
5823 return -EINVAL; in virtnet_set_features()
5832 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout()
5833 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); in virtnet_tx_timeout() local
5835 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
5836 u64_stats_inc(&sq->stats.tx_timeouts); in virtnet_tx_timeout()
5837 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
5840 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
5841 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); in virtnet_tx_timeout()
5851 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
5858 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
5859 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
5866 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
5870 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
5899 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
5904 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
5911 if (vi->status == v) in virtnet_config_changed_work()
5914 vi->status = v; in virtnet_config_changed_work()
5916 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
5918 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
5919 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
5921 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
5922 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
5928 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed()
5930 schedule_work(&vi->config_work); in virtnet_config_changed()
5937 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
5938 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
5939 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
5943 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
5947 kfree(vi->rq); in virtnet_free_queues()
5948 kfree(vi->sq); in virtnet_free_queues()
5949 kfree(vi->ctrl); in virtnet_free_queues()
5957 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
5958 while (vi->rq[i].pages) in _free_receive_bufs()
5959 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
5961 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
5962 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
5978 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
5979 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
5980 if (vi->rq[i].do_dma && vi->rq[i].last_dma) in free_receive_page_frags()
5981 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
5982 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
5999 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6000 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6006 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6007 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6017 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6021 vdev->config->del_vqs(vdev); in virtnet_del_vqs()
6032 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6034 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6038 return max(max(min_buf_len, hdr_len) - hdr_len, in mergeable_min_buf_len()
6046 int ret = -ENOMEM; in virtnet_find_vqs()
6052 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by in virtnet_find_vqs()
6055 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6056 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6065 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6074 if (vi->has_cvq) { in virtnet_find_vqs()
6075 vqs_info[total_vqs - 1].name = "control"; in virtnet_find_vqs()
6079 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6082 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6083 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6084 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6085 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6090 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6094 if (vi->has_cvq) { in virtnet_find_vqs()
6095 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6096 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6097 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6100 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6101 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6102 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6103 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6123 if (vi->has_cvq) { in virtnet_alloc_queues()
6124 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6125 if (!vi->ctrl) in virtnet_alloc_queues()
6128 vi->ctrl = NULL; in virtnet_alloc_queues()
6130 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6131 if (!vi->sq) in virtnet_alloc_queues()
6133 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6134 if (!vi->rq) in virtnet_alloc_queues()
6137 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6138 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6139 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6140 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6142 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6146 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6147 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6148 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6150 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6151 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6152 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6158 kfree(vi->sq); in virtnet_alloc_queues()
6160 kfree(vi->ctrl); in virtnet_alloc_queues()
6162 return -ENOMEM; in virtnet_alloc_queues()
6194 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show()
6200 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6201 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6203 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6228 dev_err(&vdev->dev, "device advertises feature %s but not %s", in virtnet_fail_on_feature()
6268 if (!vdev->config->get) { in virtnet_validate()
6269 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtnet_validate()
6271 return -EINVAL; in virtnet_validate()
6275 return -EINVAL; in virtnet_validate()
6287 …dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, dis… in virtnet_validate()
6296 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6297 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6298 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6299 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6300 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6301 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6313 vi->big_packets = true; in virtnet_set_big_packets()
6314 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6341 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) in virtnet_xdp_rx_hash()
6342 return -ENODATA; in virtnet_xdp_rx_hash()
6344 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6345 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6346 hash_report = __le16_to_cpu(hdr_hash->hash_report); in virtnet_xdp_rx_hash()
6352 *hash = __le32_to_cpu(hdr_hash->hash_value); in virtnet_xdp_rx_hash()
6362 int i, err = -ENOMEM; in virtnet_probe()
6383 return -ENOMEM; in virtnet_probe()
6386 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | in virtnet_probe()
6388 dev->netdev_ops = &virtnet_netdev; in virtnet_probe()
6389 dev->stat_ops = &virtnet_stat_ops; in virtnet_probe()
6390 dev->features = NETIF_F_HIGHDMA; in virtnet_probe()
6392 dev->ethtool_ops = &virtnet_ethtool_ops; in virtnet_probe()
6393 SET_NETDEV_DEV(dev, &vdev->dev); in virtnet_probe()
6398 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6400 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6403 dev->hw_features |= NETIF_F_TSO in virtnet_probe()
6408 dev->hw_features |= NETIF_F_TSO; in virtnet_probe()
6410 dev->hw_features |= NETIF_F_TSO6; in virtnet_probe()
6412 dev->hw_features |= NETIF_F_TSO_ECN; in virtnet_probe()
6414 dev->hw_features |= NETIF_F_GSO_UDP_L4; in virtnet_probe()
6416 dev->features |= NETIF_F_GSO_ROBUST; in virtnet_probe()
6419 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; in virtnet_probe()
6430 dev->features |= NETIF_F_RXCSUM; in virtnet_probe()
6434 dev->features |= NETIF_F_GRO_HW; in virtnet_probe()
6436 dev->hw_features |= NETIF_F_GRO_HW; in virtnet_probe()
6438 dev->vlan_features = dev->features; in virtnet_probe()
6439 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; in virtnet_probe()
6441 /* MTU range: 68 - 65535 */ in virtnet_probe()
6442 dev->min_mtu = MIN_MTU; in virtnet_probe()
6443 dev->max_mtu = MAX_MTU; in virtnet_probe()
6455 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", in virtnet_probe()
6456 dev->dev_addr); in virtnet_probe()
6459 /* Set up our device-specific information */ in virtnet_probe()
6461 vi->dev = dev; in virtnet_probe()
6462 vi->vdev = vdev; in virtnet_probe()
6463 vdev->priv = vi; in virtnet_probe()
6465 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6466 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6467 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6470 vi->mergeable_rx_bufs = true; in virtnet_probe()
6471 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; in virtnet_probe()
6475 vi->has_rss_hash_report = true; in virtnet_probe()
6478 vi->has_rss = true; in virtnet_probe()
6480 vi->rss_indir_table_size = in virtnet_probe()
6484 err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size); in virtnet_probe()
6488 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6489 vi->rss_key_size = in virtnet_probe()
6491 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6492 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", in virtnet_probe()
6493 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6494 err = -EINVAL; in virtnet_probe()
6498 vi->rss_hash_types_supported = in virtnet_probe()
6500 vi->rss_hash_types_supported &= in virtnet_probe()
6505 dev->hw_features |= NETIF_F_RXHASH; in virtnet_probe()
6506 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; in virtnet_probe()
6509 if (vi->has_rss_hash_report) in virtnet_probe()
6510 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6513 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6515 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6519 vi->any_header_sg = true; in virtnet_probe()
6522 vi->has_cvq = true; in virtnet_probe()
6524 mutex_init(&vi->cvq_lock); in virtnet_probe()
6530 if (mtu < dev->min_mtu) { in virtnet_probe()
6534 dev_err(&vdev->dev, in virtnet_probe()
6536 mtu, dev->min_mtu); in virtnet_probe()
6537 err = -EINVAL; in virtnet_probe()
6541 dev->mtu = mtu; in virtnet_probe()
6542 dev->max_mtu = mtu; in virtnet_probe()
6547 if (vi->any_header_sg) in virtnet_probe()
6548 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6552 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6554 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6555 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6562 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6563 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6564 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6565 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
6570 if (vi->sq[0].napi.weight) in virtnet_probe()
6571 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
6573 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
6576 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
6578 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
6579 if (vi->sq[i].napi.weight) in virtnet_probe()
6580 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
6588 if (vi->mergeable_rx_bufs) in virtnet_probe()
6589 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; in virtnet_probe()
6591 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6592 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6597 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
6598 if (IS_ERR(vi->failover)) { in virtnet_probe()
6599 err = PTR_ERR(vi->failover); in virtnet_probe()
6604 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
6620 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
6624 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6626 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); in virtnet_probe()
6627 dev->hw_features &= ~NETIF_F_RXHASH; in virtnet_probe()
6628 vi->has_rss_hash_report = false; in virtnet_probe()
6629 vi->has_rss = false; in virtnet_probe()
6633 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
6640 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
6643 sg_init_one(&sg, dev->dev_addr, dev->addr_len); in virtnet_probe()
6648 err = -EINVAL; in virtnet_probe()
6653 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
6661 err = -ENOMEM; in virtnet_probe()
6672 err = -EINVAL; in virtnet_probe()
6676 v = stats_cap->supported_stats_types[0]; in virtnet_probe()
6677 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
6683 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
6684 virtnet_config_changed_work(&vi->config_work); in virtnet_probe()
6686 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
6692 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
6693 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
6694 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
6705 dev->name, max_queue_pairs); in virtnet_probe()
6712 net_failover_destroy(vi->failover); in virtnet_probe()
6715 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
6725 virtio_reset_device(vi->vdev); in remove_vq_common()
6739 struct virtnet_info *vi = vdev->priv; in virtnet_remove()
6744 flush_work(&vi->config_work); in virtnet_remove()
6746 flush_work(&vi->rx_mode_work); in virtnet_remove()
6750 unregister_netdev(vi->dev); in virtnet_remove()
6752 net_failover_destroy(vi->failover); in virtnet_remove()
6756 rss_indirection_table_free(&vi->rss); in virtnet_remove()
6758 free_netdev(vi->dev); in virtnet_remove()
6763 struct virtnet_info *vi = vdev->priv; in virtnet_freeze()
6774 struct virtnet_info *vi = vdev->priv; in virtnet_restore()
6780 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()