Lines Matching +full:rx +full:- +full:shared
4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 * Maintained by: pv-drivers@vmware.com
77 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_enable_all_intrs()
80 !adapter->queuesExtEnabled) { in vmxnet3_enable_all_intrs()
81 adapter->shared->devRead.intrConf.intrCtrl &= in vmxnet3_enable_all_intrs()
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &= in vmxnet3_enable_all_intrs()
96 !adapter->queuesExtEnabled) { in vmxnet3_disable_all_intrs()
97 adapter->shared->devRead.intrConf.intrCtrl |= in vmxnet3_disable_all_intrs()
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |= in vmxnet3_disable_all_intrs()
103 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_disable_all_intrs()
118 return tq->stopped; in vmxnet3_tq_stopped()
125 tq->stopped = false; in vmxnet3_tq_start()
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
133 tq->stopped = false; in vmxnet3_tq_wake()
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
141 tq->stopped = true; in vmxnet3_tq_stop()
142 tq->num_stop++; in vmxnet3_tq_stop()
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
161 if (tq->tsPktCount == 1) { in vmxnet3_apply_timestamp()
163 tq->tsPktCount = rate; in vmxnet3_apply_timestamp()
166 tq->tsPktCount--; in vmxnet3_apply_timestamp()
197 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_check_link()
200 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_check_link()
202 adapter->link_speed = ret >> 16; in vmxnet3_check_link()
204 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", in vmxnet3_check_link()
205 adapter->link_speed); in vmxnet3_check_link()
206 netif_carrier_on(adapter->netdev); in vmxnet3_check_link()
209 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
210 vmxnet3_tq_start(&adapter->tx_queue[i], in vmxnet3_check_link()
214 netdev_info(adapter->netdev, "NIC Link is Down\n"); in vmxnet3_check_link()
215 netif_carrier_off(adapter->netdev); in vmxnet3_check_link()
218 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
219 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); in vmxnet3_check_link()
229 u32 events = le32_to_cpu(adapter->shared->ecr); in vmxnet3_process_events()
241 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_process_events()
244 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_process_events()
246 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events()
247 if (adapter->tqd_start[i].status.stopped) in vmxnet3_process_events()
248 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
250 adapter->netdev->name, i, le32_to_cpu( in vmxnet3_process_events()
251 adapter->tqd_start[i].status.error)); in vmxnet3_process_events()
252 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_process_events()
253 if (adapter->rqd_start[i].status.stopped) in vmxnet3_process_events()
254 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
256 adapter->netdev->name, i, in vmxnet3_process_events()
257 adapter->rqd_start[i].status.error); in vmxnet3_process_events()
259 schedule_work(&adapter->work); in vmxnet3_process_events()
265 * The device expects the bitfields in shared structures to be written in
273 * In order to avoid touching bits in shared structure more than once, temporary
281 dstDesc->addr = le64_to_cpu(srcDesc->addr); in vmxnet3_RxDescToCPU()
283 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); in vmxnet3_RxDescToCPU()
294 for (i = 2; i > 0; i--) { in vmxnet3_TxDescToLe()
295 src--; in vmxnet3_TxDescToLe()
296 dst--; in vmxnet3_TxDescToLe()
320 u32 mask = ((1 << size) - 1) << pos; in get_bitfield32()
354 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
355 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
356 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
357 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
368 u32 map_type = tbi->map_type; in vmxnet3_unmap_tx_buf()
371 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
374 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
379 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ in vmxnet3_unmap_tx_buf()
393 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
394 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
396 tbi = &tq->buf_info[eop_idx]; in vmxnet3_unmap_pkt()
397 BUG_ON(!tbi->skb); in vmxnet3_unmap_pkt()
398 map_type = tbi->map_type; in vmxnet3_unmap_pkt()
399 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
401 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
402 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
407 * that the tx routine incorrectly re-queues a pkt due to in vmxnet3_unmap_pkt()
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
415 xdp_return_frame_bulk(tbi->xdpf, bq); in vmxnet3_unmap_pkt()
417 dev_kfree_skb_any(tbi->skb); in vmxnet3_unmap_pkt()
420 tbi->skb = NULL; in vmxnet3_unmap_pkt()
437 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
438 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
439 /* Prevent any &gdesc->tcd field from being (speculatively) in vmxnet3_tq_tx_complete()
440 * read before (&gdesc->tcd)->gen is read. in vmxnet3_tq_tx_complete()
445 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
448 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
449 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
455 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
457 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
459 netif_carrier_ok(adapter->netdev))) { in vmxnet3_tq_tx_complete()
462 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
479 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
482 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
483 map_type = tbi->map_type; in vmxnet3_tq_cleanup()
485 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); in vmxnet3_tq_cleanup()
486 if (tbi->skb) { in vmxnet3_tq_cleanup()
488 xdp_return_frame_bulk(tbi->xdpf, &bq); in vmxnet3_tq_cleanup()
490 dev_kfree_skb_any(tbi->skb); in vmxnet3_tq_cleanup()
491 tbi->skb = NULL; in vmxnet3_tq_cleanup()
493 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
500 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_cleanup()
501 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
503 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
504 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
506 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
507 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
515 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
516 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
518 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
519 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
521 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
522 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
523 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
524 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
525 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
527 if (tq->ts_ring.base) { in vmxnet3_tq_destroy()
528 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
529 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_destroy()
530 tq->ts_ring.base, tq->ts_ring.basePA); in vmxnet3_tq_destroy()
531 tq->ts_ring.base = NULL; in vmxnet3_tq_destroy()
533 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
534 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
536 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
537 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
539 kfree(tq->buf_info); in vmxnet3_tq_destroy()
540 tq->buf_info = NULL; in vmxnet3_tq_destroy()
550 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all()
551 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); in vmxnet3_tq_destroy_all()
562 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
564 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
565 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
567 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
568 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
570 if (tq->ts_ring.base) in vmxnet3_tq_init()
571 memset(tq->ts_ring.base, 0, in vmxnet3_tq_init()
572 tq->tx_ring.size * tq->tx_ts_desc_size); in vmxnet3_tq_init()
575 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
577 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
578 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
581 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
582 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
583 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
593 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
594 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
596 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
597 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
598 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
599 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
600 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); in vmxnet3_tq_create()
604 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
605 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
606 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
607 if (!tq->data_ring.base) { in vmxnet3_tq_create()
608 netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); in vmxnet3_tq_create()
612 if (tq->tx_ts_desc_size != 0) { in vmxnet3_tq_create()
613 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
614 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_create()
615 &tq->ts_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
616 if (!tq->ts_ring.base) { in vmxnet3_tq_create()
617 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n"); in vmxnet3_tq_create()
618 tq->tx_ts_desc_size = 0; in vmxnet3_tq_create()
621 tq->ts_ring.base = NULL; in vmxnet3_tq_create()
624 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
625 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
626 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
627 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
628 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); in vmxnet3_tq_create()
632 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]), in vmxnet3_tq_create()
634 dev_to_node(&adapter->pdev->dev)); in vmxnet3_tq_create()
635 if (!tq->buf_info) in vmxnet3_tq_create()
642 return -ENOMEM; in vmxnet3_tq_create()
650 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all()
651 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); in vmxnet3_tq_cleanup_all()
655 * starting from ring->next2fill, allocate rx buffers for the given ring
656 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
665 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
666 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
673 rbi = rbi_base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
674 gd = ring->base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
675 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_alloc_rx_buf()
677 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_alloc_rx_buf()
678 void *data = vmxnet3_pp_get_buff(rq->page_pool, in vmxnet3_rq_alloc_rx_buf()
679 &rbi->dma_addr, in vmxnet3_rq_alloc_rx_buf()
682 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
685 rbi->page = virt_to_page(data); in vmxnet3_rq_alloc_rx_buf()
687 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { in vmxnet3_rq_alloc_rx_buf()
688 if (rbi->skb == NULL) { in vmxnet3_rq_alloc_rx_buf()
689 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
690 rbi->len, in vmxnet3_rq_alloc_rx_buf()
692 if (unlikely(rbi->skb == NULL)) { in vmxnet3_rq_alloc_rx_buf()
693 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
697 rbi->dma_addr = dma_map_single( in vmxnet3_rq_alloc_rx_buf()
698 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
699 rbi->skb->data, rbi->len, in vmxnet3_rq_alloc_rx_buf()
701 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
702 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
703 dev_kfree_skb_any(rbi->skb); in vmxnet3_rq_alloc_rx_buf()
704 rbi->skb = NULL; in vmxnet3_rq_alloc_rx_buf()
705 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
709 /* rx buffer skipped by the device */ in vmxnet3_rq_alloc_rx_buf()
713 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || in vmxnet3_rq_alloc_rx_buf()
714 rbi->len != PAGE_SIZE); in vmxnet3_rq_alloc_rx_buf()
716 if (rbi->page == NULL) { in vmxnet3_rq_alloc_rx_buf()
717 rbi->page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_alloc_rx_buf()
718 if (unlikely(rbi->page == NULL)) { in vmxnet3_rq_alloc_rx_buf()
719 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
722 rbi->dma_addr = dma_map_page( in vmxnet3_rq_alloc_rx_buf()
723 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
724 rbi->page, 0, PAGE_SIZE, in vmxnet3_rq_alloc_rx_buf()
726 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
727 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
728 put_page(rbi->page); in vmxnet3_rq_alloc_rx_buf()
729 rbi->page = NULL; in vmxnet3_rq_alloc_rx_buf()
730 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
734 /* rx buffers skipped by the device */ in vmxnet3_rq_alloc_rx_buf()
739 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_alloc_rx_buf()
740 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) in vmxnet3_rq_alloc_rx_buf()
741 | val | rbi->len); in vmxnet3_rq_alloc_rx_buf()
746 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_alloc_rx_buf()
750 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); in vmxnet3_rq_alloc_rx_buf()
755 netdev_dbg(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
757 num_allocated, ring->next2fill, ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
760 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
770 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in vmxnet3_append_frag()
772 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); in vmxnet3_append_frag()
774 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len); in vmxnet3_append_frag()
775 skb->data_len += rcd->len; in vmxnet3_append_frag()
776 skb->truesize += PAGE_SIZE; in vmxnet3_append_frag()
777 skb_shinfo(skb)->nr_frags++; in vmxnet3_append_frag()
792 BUG_ON(ctx->copy_size > skb_headlen(skb)); in vmxnet3_map_pkt()
795 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
797 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
798 gdesc = ctx->sop_txd; /* both loops below can be skipped */ in vmxnet3_map_pkt()
801 if (ctx->copy_size) { in vmxnet3_map_pkt()
802 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
803 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
804 tq->txdata_desc_size); in vmxnet3_map_pkt()
805 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); in vmxnet3_map_pkt()
806 ctx->sop_txd->dword[3] = 0; in vmxnet3_map_pkt()
808 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
809 tbi->map_type = VMXNET3_MAP_NONE; in vmxnet3_map_pkt()
811 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
813 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
814 le64_to_cpu(ctx->sop_txd->txd.addr), in vmxnet3_map_pkt()
815 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); in vmxnet3_map_pkt()
816 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
818 /* use the right gen for non-SOP desc */ in vmxnet3_map_pkt()
819 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
823 len = skb_headlen(skb) - ctx->copy_size; in vmxnet3_map_pkt()
824 buf_offset = ctx->copy_size; in vmxnet3_map_pkt()
836 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
837 tbi->map_type = VMXNET3_MAP_SINGLE; in vmxnet3_map_pkt()
838 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_map_pkt()
839 skb->data + buf_offset, buf_size, in vmxnet3_map_pkt()
841 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
842 return -EFAULT; in vmxnet3_map_pkt()
844 tbi->len = buf_size; in vmxnet3_map_pkt()
846 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
847 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
849 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
850 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
851 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
853 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
855 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
856 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
857 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
858 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
860 len -= buf_size; in vmxnet3_map_pkt()
864 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vmxnet3_map_pkt()
865 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in vmxnet3_map_pkt()
871 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
879 tbi->map_type = VMXNET3_MAP_PAGE; in vmxnet3_map_pkt()
880 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, in vmxnet3_map_pkt()
883 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
884 return -EFAULT; in vmxnet3_map_pkt()
886 tbi->len = buf_size; in vmxnet3_map_pkt()
888 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
889 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
891 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
892 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
893 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
895 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
897 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
898 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
899 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
900 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
902 len -= buf_size; in vmxnet3_map_pkt()
907 ctx->eop_txd = gdesc; in vmxnet3_map_pkt()
910 tbi->skb = skb; in vmxnet3_map_pkt()
911 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
912 if (tq->tx_ts_desc_size != 0) { in vmxnet3_map_pkt()
913 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base + in vmxnet3_map_pkt()
914 tbi->sop_idx * tq->tx_ts_desc_size); in vmxnet3_map_pkt()
915 ctx->ts_txd->ts.tsi = 0; in vmxnet3_map_pkt()
928 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all()
929 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); in vmxnet3_tq_init_all()
940 * -1: error happens during parsing
946 * 2. ctx->copy_size is # of bytes copied
957 if (ctx->mss) { /* TSO */ in vmxnet3_parse_hdr()
958 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_parse_hdr()
959 ctx->l4_offset = skb_inner_transport_offset(skb); in vmxnet3_parse_hdr()
960 ctx->l4_hdr_size = inner_tcp_hdrlen(skb); in vmxnet3_parse_hdr()
961 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
963 ctx->l4_offset = skb_transport_offset(skb); in vmxnet3_parse_hdr()
964 ctx->l4_hdr_size = tcp_hdrlen(skb); in vmxnet3_parse_hdr()
965 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
968 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_parse_hdr()
971 * well as non-encap case in vmxnet3_parse_hdr()
973 ctx->l4_offset = skb_checksum_start_offset(skb); in vmxnet3_parse_hdr()
976 skb->encapsulation) { in vmxnet3_parse_hdr()
979 if (iph->version == 4) { in vmxnet3_parse_hdr()
980 protocol = iph->protocol; in vmxnet3_parse_hdr()
985 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
988 if (ctx->ipv4) { in vmxnet3_parse_hdr()
991 protocol = iph->protocol; in vmxnet3_parse_hdr()
992 } else if (ctx->ipv6) { in vmxnet3_parse_hdr()
996 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
1002 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : in vmxnet3_parse_hdr()
1006 ctx->l4_hdr_size = sizeof(struct udphdr); in vmxnet3_parse_hdr()
1009 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
1013 ctx->copy_size = min(ctx->l4_offset + in vmxnet3_parse_hdr()
1014 ctx->l4_hdr_size, skb->len); in vmxnet3_parse_hdr()
1016 ctx->l4_offset = 0; in vmxnet3_parse_hdr()
1017 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
1019 ctx->copy_size = min_t(unsigned int, in vmxnet3_parse_hdr()
1020 tq->txdata_desc_size, in vmxnet3_parse_hdr()
1024 if (skb->len <= tq->txdata_desc_size) in vmxnet3_parse_hdr()
1025 ctx->copy_size = skb->len; in vmxnet3_parse_hdr()
1028 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) in vmxnet3_parse_hdr()
1032 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
1033 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
1034 ctx->copy_size = 0; in vmxnet3_parse_hdr()
1040 return -1; in vmxnet3_parse_hdr()
1060 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
1061 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
1062 tq->txdata_desc_size); in vmxnet3_copy_hdr()
1064 memcpy(tdd->data, skb->data, ctx->copy_size); in vmxnet3_copy_hdr()
1065 netdev_dbg(adapter->netdev, in vmxnet3_copy_hdr()
1067 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
1078 if (iph->version == 4) { in vmxnet3_prepare_inner_tso()
1079 iph->check = 0; in vmxnet3_prepare_inner_tso()
1080 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1085 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1096 if (ctx->ipv4) { in vmxnet3_prepare_tso()
1099 iph->check = 0; in vmxnet3_prepare_tso()
1100 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_tso()
1102 } else if (ctx->ipv6) { in vmxnet3_prepare_tso()
1112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in txd_estimate()
1113 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in txd_estimate()
1127 * Side-effects:
1130 * 3. shared->txNumDeferred may be updated
1154 ctx.mss = skb_shinfo(skb)->gso_size; in vmxnet3_tq_xmit()
1159 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1162 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1169 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1172 tq->stats.linearized++; in vmxnet3_tq_xmit()
1177 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1181 if (skb->encapsulation) { in vmxnet3_tq_xmit()
1189 /* non-tso pkts must not use more than in vmxnet3_tq_xmit()
1193 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1196 tq->stats.linearized++; in vmxnet3_tq_xmit()
1210 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1214 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1216 skb->csum_offset > in vmxnet3_tq_xmit()
1218 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1224 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1228 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1230 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1231 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1232 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1234 " next2fill %u\n", adapter->netdev->name, in vmxnet3_tq_xmit()
1235 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1238 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1246 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1250 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); in vmxnet3_tq_xmit()
1255 gdesc->dword[2] = ctx.sop_txd->dword[2]; in vmxnet3_tq_xmit()
1256 gdesc->dword[3] = ctx.sop_txd->dword[3]; in vmxnet3_tq_xmit()
1260 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1262 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_tq_xmit()
1263 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1265 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1266 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1268 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1270 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1272 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in vmxnet3_tq_xmit()
1273 gdesc->txd.oco = 1; in vmxnet3_tq_xmit()
1275 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1276 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1277 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1279 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; in vmxnet3_tq_xmit()
1281 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1283 skb->encapsulation) { in vmxnet3_tq_xmit()
1284 gdesc->txd.hlen = ctx.l4_offset + in vmxnet3_tq_xmit()
1287 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1288 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1289 skb->csum_offset; in vmxnet3_tq_xmit()
1290 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1292 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1293 gdesc->txd.msscof = 0; /* Reserved */ in vmxnet3_tq_xmit()
1296 gdesc->txd.hlen = ctx.l4_offset; in vmxnet3_tq_xmit()
1297 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1298 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1299 skb->csum_offset; in vmxnet3_tq_xmit()
1302 gdesc->txd.om = 0; in vmxnet3_tq_xmit()
1303 gdesc->txd.msscof = 0; in vmxnet3_tq_xmit()
1307 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1311 gdesc->txd.ti = 1; in vmxnet3_tq_xmit()
1312 gdesc->txd.tci = skb_vlan_tag_get(skb); in vmxnet3_tq_xmit()
1315 if (tq->tx_ts_desc_size != 0 && in vmxnet3_tq_xmit()
1316 adapter->latencyConf->sampleRate != 0) { in vmxnet3_tq_xmit()
1317 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) { in vmxnet3_tq_xmit()
1318 ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); in vmxnet3_tq_xmit()
1319 ctx.ts_txd->ts.tsi = 1; in vmxnet3_tq_xmit()
1323 /* Ensure that the write to (&gdesc->txd)->gen will be observed after in vmxnet3_tq_xmit()
1324 * all other writes to &gdesc->txd. in vmxnet3_tq_xmit()
1329 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ in vmxnet3_tq_xmit()
1339 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1341 (u32)(ctx.sop_txd - in vmxnet3_tq_xmit()
1342 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1343 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); in vmxnet3_tq_xmit()
1345 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1347 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1348 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1350 adapter->tx_prod_offset + tq->qid * 8, in vmxnet3_tq_xmit()
1351 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1357 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1359 tq->stats.drop_total++; in vmxnet3_tq_xmit()
1374 .dev = &adapter->pdev->dev, in vmxnet3_create_pp()
1386 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid, in vmxnet3_create_pp()
1387 rq->napi.napi_id); in vmxnet3_create_pp()
1391 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp); in vmxnet3_create_pp()
1395 rq->page_pool = pp; in vmxnet3_create_pp()
1400 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_create_pp()
1417 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset; in vmxnet3_pp_get_buff()
1427 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame()
1429 &adapter->tx_queue[skb->queue_mapping], in vmxnet3_xmit_frame()
1439 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1440 if (gdesc->rcd.v4 && in vmxnet3_rx_csum()
1441 (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1443 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1444 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1446 skb->csum_level = 1; in vmxnet3_rx_csum()
1448 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1449 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1451 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1452 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1454 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1456 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1457 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1459 skb->csum_level = 1; in vmxnet3_rx_csum()
1461 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1462 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1464 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1465 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1468 if (gdesc->rcd.csum) { in vmxnet3_rx_csum()
1469 skb->csum = htons(gdesc->rcd.csum); in vmxnet3_rx_csum()
1470 skb->ip_summed = CHECKSUM_PARTIAL; in vmxnet3_rx_csum()
1485 rq->stats.drop_err++; in vmxnet3_rx_error()
1486 if (!rcd->fcs) in vmxnet3_rx_error()
1487 rq->stats.drop_fcs++; in vmxnet3_rx_error()
1489 rq->stats.drop_total++; in vmxnet3_rx_error()
1492 * We do not unmap and chain the rx buffer to the skb. in vmxnet3_rx_error()
1498 * ctx->skb may be NULL if this is the first and the only one in vmxnet3_rx_error()
1501 if (ctx->skb) in vmxnet3_rx_error()
1502 dev_kfree_skb_irq(ctx->skb); in vmxnet3_rx_error()
1504 ctx->skb = NULL; in vmxnet3_rx_error()
1521 BUG_ON(gdesc->rcd.tcp == 0); in vmxnet3_get_hdr_len()
1527 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || in vmxnet3_get_hdr_len()
1528 skb->protocol == cpu_to_be16(ETH_P_8021AD)) in vmxnet3_get_hdr_len()
1534 if (gdesc->rcd.v4) { in vmxnet3_get_hdr_len()
1535 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && in vmxnet3_get_hdr_len()
1536 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); in vmxnet3_get_hdr_len()
1538 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); in vmxnet3_get_hdr_len()
1539 hlen = hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1540 hdr.ptr += hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1541 } else if (gdesc->rcd.v6) { in vmxnet3_get_hdr_len()
1542 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && in vmxnet3_get_hdr_len()
1543 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); in vmxnet3_get_hdr_len()
1548 if (hdr.ipv6->nexthdr != IPPROTO_TCP) in vmxnet3_get_hdr_len()
1553 /* Non-IP pkt, dont estimate header length */ in vmxnet3_get_hdr_len()
1560 return (hlen + (hdr.tcp->doff << 2)); in vmxnet3_get_hdr_len()
1568 adapter->rx_prod_offset, adapter->rx_prod2_offset in vmxnet3_rq_rx_complete()
1574 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; in vmxnet3_rq_rx_complete()
1583 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, in vmxnet3_rq_rx_complete()
1585 while (rcd->gen == rq->comp_ring.gen) { in vmxnet3_rq_rx_complete()
1602 * rcd->gen is read. in vmxnet3_rq_rx_complete()
1606 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && in vmxnet3_rq_rx_complete()
1607 rcd->rqID != rq->dataRingQid); in vmxnet3_rq_rx_complete()
1608 idx = rcd->rxdIdx; in vmxnet3_rq_rx_complete()
1609 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1610 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1611 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, in vmxnet3_rq_rx_complete()
1613 rbi = rq->buf_info[ring_idx] + idx; in vmxnet3_rq_rx_complete()
1615 BUG_ON(rxd->addr != rbi->dma_addr || in vmxnet3_rq_rx_complete()
1616 rxd->len != rbi->len); in vmxnet3_rq_rx_complete()
1618 if (unlikely(rcd->eop && rcd->err)) { in vmxnet3_rq_rx_complete()
1623 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete()
1627 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) { in vmxnet3_rq_rx_complete()
1628 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1632 if (rbi->buf_type != VMXNET3_RX_BUF_XDP) in vmxnet3_rq_rx_complete()
1638 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1641 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1648 if (rcd->sop) { /* first buf of the pkt */ in vmxnet3_rq_rx_complete()
1652 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || in vmxnet3_rq_rx_complete()
1653 (rcd->rqID != rq->qid && in vmxnet3_rq_rx_complete()
1654 rcd->rqID != rq->dataRingQid)); in vmxnet3_rq_rx_complete()
1656 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB && in vmxnet3_rq_rx_complete()
1657 rbi->buf_type != VMXNET3_RX_BUF_XDP); in vmxnet3_rq_rx_complete()
1658 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); in vmxnet3_rq_rx_complete()
1660 if (unlikely(rcd->len == 0)) { in vmxnet3_rq_rx_complete()
1661 /* Pretend the rx buffer is skipped. */ in vmxnet3_rq_rx_complete()
1662 BUG_ON(!(rcd->sop && rcd->eop)); in vmxnet3_rq_rx_complete()
1663 netdev_dbg(adapter->netdev, in vmxnet3_rq_rx_complete()
1670 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1672 if (rq->rx_ts_desc_size != 0 && rcd->ext2) { in vmxnet3_rq_rx_complete()
1675 ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base + in vmxnet3_rq_rx_complete()
1676 idx * rq->rx_ts_desc_size); in vmxnet3_rq_rx_complete()
1677 ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); in vmxnet3_rq_rx_complete()
1678 ts_rxd->ts.tsi = 1; in vmxnet3_rq_rx_complete()
1682 VMXNET3_RX_DATA_RING(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1683 len = rxDataRingUsed ? rcd->len : rbi->len; in vmxnet3_rq_rx_complete()
1690 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1692 &rq->data_ring.base[sz], in vmxnet3_rq_rx_complete()
1693 rcd->len, in vmxnet3_rq_rx_complete()
1696 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1703 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_rx_complete()
1709 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1710 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1711 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1716 if (rxDataRingUsed && adapter->rxdataring_enabled) { in vmxnet3_rq_rx_complete()
1719 BUG_ON(rcd->len > rq->data_ring.desc_size); in vmxnet3_rq_rx_complete()
1721 ctx->skb = new_skb; in vmxnet3_rq_rx_complete()
1722 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1723 memcpy(new_skb->data, in vmxnet3_rq_rx_complete()
1724 &rq->data_ring.base[sz], rcd->len); in vmxnet3_rq_rx_complete()
1726 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1729 dma_map_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1730 new_skb->data, rbi->len, in vmxnet3_rq_rx_complete()
1732 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1739 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1740 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1741 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1746 dma_unmap_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1747 rbi->dma_addr, in vmxnet3_rq_rx_complete()
1748 rbi->len, in vmxnet3_rq_rx_complete()
1752 rbi->skb = new_skb; in vmxnet3_rq_rx_complete()
1753 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1754 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1755 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1758 skb_record_rx_queue(ctx->skb, rq->qid); in vmxnet3_rq_rx_complete()
1759 skb_put(ctx->skb, rcd->len); in vmxnet3_rq_rx_complete()
1762 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { in vmxnet3_rq_rx_complete()
1769 segCnt = rcdlro->segCnt; in vmxnet3_rq_rx_complete()
1771 mss = rcdlro->mss; in vmxnet3_rq_rx_complete()
1774 encap_lro = (le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rq_rx_complete()
1780 BUG_ON(ctx->skb == NULL && !skip_page_frags); in vmxnet3_rq_rx_complete()
1783 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); in vmxnet3_rq_rx_complete()
1784 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); in vmxnet3_rq_rx_complete()
1787 * following non-sop fragments. They will be reused. in vmxnet3_rq_rx_complete()
1792 if (rcd->len) { in vmxnet3_rq_rx_complete()
1797 * processing all the following non-sop frags. in vmxnet3_rq_rx_complete()
1800 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1801 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1802 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1806 new_dma_addr = dma_map_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1810 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1813 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1814 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1815 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1820 dma_unmap_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1821 rbi->dma_addr, rbi->len, in vmxnet3_rq_rx_complete()
1824 vmxnet3_append_frag(ctx->skb, rcd, rbi); in vmxnet3_rq_rx_complete()
1827 rbi->page = new_page; in vmxnet3_rq_rx_complete()
1828 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1829 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1830 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1836 skb = ctx->skb; in vmxnet3_rq_rx_complete()
1837 if (rcd->eop) { in vmxnet3_rq_rx_complete()
1838 u32 mtu = adapter->netdev->mtu; in vmxnet3_rq_rx_complete()
1839 skb->len += skb->data_len; in vmxnet3_rq_rx_complete()
1842 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && in vmxnet3_rq_rx_complete()
1843 (adapter->netdev->features & NETIF_F_RXHASH)) { in vmxnet3_rq_rx_complete()
1846 switch (rcd->rssType) { in vmxnet3_rq_rx_complete()
1862 le32_to_cpu(rcd->rssHash), in vmxnet3_rq_rx_complete()
1868 skb->protocol = eth_type_trans(skb, adapter->netdev); in vmxnet3_rq_rx_complete()
1869 if ((!rcd->tcp && !encap_lro) || in vmxnet3_rq_rx_complete()
1870 !(adapter->netdev->features & NETIF_F_LRO)) in vmxnet3_rq_rx_complete()
1874 skb_shinfo(skb)->gso_type = rcd->v4 ? in vmxnet3_rq_rx_complete()
1876 skb_shinfo(skb)->gso_size = mss; in vmxnet3_rq_rx_complete()
1877 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1878 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { in vmxnet3_rq_rx_complete()
1886 skb_shinfo(skb)->gso_type = in vmxnet3_rq_rx_complete()
1887 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; in vmxnet3_rq_rx_complete()
1889 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1890 skb_shinfo(skb)->gso_size = in vmxnet3_rq_rx_complete()
1891 DIV_ROUND_UP(skb->len - in vmxnet3_rq_rx_complete()
1894 skb_shinfo(skb)->gso_size = mtu - hlen; in vmxnet3_rq_rx_complete()
1898 if (unlikely(rcd->ts)) in vmxnet3_rq_rx_complete()
1899 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); in vmxnet3_rq_rx_complete()
1902 if ((adapter->netdev->features & NETIF_F_LRO) && in vmxnet3_rq_rx_complete()
1903 !rq->shared->updateRxProd) in vmxnet3_rq_rx_complete()
1906 napi_gro_receive(&rq->napi, skb); in vmxnet3_rq_rx_complete()
1908 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1914 /* device may have skipped some rx descs */ in vmxnet3_rq_rx_complete()
1915 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1916 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_rx_complete()
1919 fill_offset = (idx > ring->next2fill ? 0 : ring->size) + in vmxnet3_rq_rx_complete()
1920 idx - ring->next2fill - 1; in vmxnet3_rq_rx_complete()
1921 if (!ring->isOutOfOrder || fill_offset >= comp_offset) in vmxnet3_rq_rx_complete()
1922 ring->next2comp = idx; in vmxnet3_rq_rx_complete()
1925 /* Ensure that the writes to rxd->gen bits will be observed in vmxnet3_rq_rx_complete()
1931 rbi = rq->buf_info[ring_idx] + ring->next2fill; in vmxnet3_rq_rx_complete()
1932 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP))) in vmxnet3_rq_rx_complete()
1935 /* ring0 Type1 buffers can get skipped; re-fill them */ in vmxnet3_rq_rx_complete()
1936 if (rbi->buf_type != VMXNET3_RX_BUF_SKB) in vmxnet3_rq_rx_complete()
1939 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) { in vmxnet3_rq_rx_complete()
1941 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, in vmxnet3_rq_rx_complete()
1943 WARN_ON(!rxd->addr); in vmxnet3_rq_rx_complete()
1946 rxd->gen = ring->gen; in vmxnet3_rq_rx_complete()
1948 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_rx_complete()
1949 num_to_alloc--; in vmxnet3_rq_rx_complete()
1951 /* rx completion hasn't occurred */ in vmxnet3_rq_rx_complete()
1952 ring->isOutOfOrder = 1; in vmxnet3_rq_rx_complete()
1958 ring->isOutOfOrder = 0; in vmxnet3_rq_rx_complete()
1962 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) { in vmxnet3_rq_rx_complete()
1964 rxprod_reg[ring_idx] + rq->qid * 8, in vmxnet3_rq_rx_complete()
1965 ring->next2fill); in vmxnet3_rq_rx_complete()
1968 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); in vmxnet3_rq_rx_complete()
1970 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); in vmxnet3_rq_rx_complete()
1987 if (!rq->rx_ring[0].base) in vmxnet3_rq_cleanup()
1991 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { in vmxnet3_rq_cleanup()
1997 rbi = &rq->buf_info[ring_idx][i]; in vmxnet3_rq_cleanup()
1999 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); in vmxnet3_rq_cleanup()
2001 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
2002 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_cleanup()
2003 page_pool_recycle_direct(rq->page_pool, in vmxnet3_rq_cleanup()
2004 rbi->page); in vmxnet3_rq_cleanup()
2005 rbi->page = NULL; in vmxnet3_rq_cleanup()
2006 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
2007 rbi->skb) { in vmxnet3_rq_cleanup()
2008 dma_unmap_single(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2009 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
2010 dev_kfree_skb(rbi->skb); in vmxnet3_rq_cleanup()
2011 rbi->skb = NULL; in vmxnet3_rq_cleanup()
2012 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && in vmxnet3_rq_cleanup()
2013 rbi->page) { in vmxnet3_rq_cleanup()
2014 dma_unmap_page(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2015 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
2016 put_page(rbi->page); in vmxnet3_rq_cleanup()
2017 rbi->page = NULL; in vmxnet3_rq_cleanup()
2021 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
2022 rq->rx_ring[ring_idx].next2fill = in vmxnet3_rq_cleanup()
2023 rq->rx_ring[ring_idx].next2comp = 0; in vmxnet3_rq_cleanup()
2026 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
2027 rq->comp_ring.next2proc = 0; in vmxnet3_rq_cleanup()
2036 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_cleanup_all()
2037 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all()
2038 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL); in vmxnet3_rq_cleanup_all()
2048 /* all rx buffers must have already been freed */ in vmxnet3_rq_destroy()
2050 if (rq->buf_info[i]) { in vmxnet3_rq_destroy()
2051 for (j = 0; j < rq->rx_ring[i].size; j++) in vmxnet3_rq_destroy()
2052 BUG_ON(rq->buf_info[i][j].page != NULL); in vmxnet3_rq_destroy()
2058 if (rq->rx_ring[i].base) { in vmxnet3_rq_destroy()
2059 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2060 rq->rx_ring[i].size in vmxnet3_rq_destroy()
2062 rq->rx_ring[i].base, in vmxnet3_rq_destroy()
2063 rq->rx_ring[i].basePA); in vmxnet3_rq_destroy()
2064 rq->rx_ring[i].base = NULL; in vmxnet3_rq_destroy()
2068 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) in vmxnet3_rq_destroy()
2069 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_destroy()
2070 page_pool_destroy(rq->page_pool); in vmxnet3_rq_destroy()
2071 rq->page_pool = NULL; in vmxnet3_rq_destroy()
2073 if (rq->data_ring.base) { in vmxnet3_rq_destroy()
2074 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2075 rq->rx_ring[0].size * rq->data_ring.desc_size, in vmxnet3_rq_destroy()
2076 rq->data_ring.base, rq->data_ring.basePA); in vmxnet3_rq_destroy()
2077 rq->data_ring.base = NULL; in vmxnet3_rq_destroy()
2080 if (rq->ts_ring.base) { in vmxnet3_rq_destroy()
2081 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2082 rq->rx_ring[0].size * rq->rx_ts_desc_size, in vmxnet3_rq_destroy()
2083 rq->ts_ring.base, rq->ts_ring.basePA); in vmxnet3_rq_destroy()
2084 rq->ts_ring.base = NULL; in vmxnet3_rq_destroy()
2087 if (rq->comp_ring.base) { in vmxnet3_rq_destroy()
2088 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
2090 rq->comp_ring.base, rq->comp_ring.basePA); in vmxnet3_rq_destroy()
2091 rq->comp_ring.base = NULL; in vmxnet3_rq_destroy()
2094 kfree(rq->buf_info[0]); in vmxnet3_rq_destroy()
2095 rq->buf_info[0] = NULL; in vmxnet3_rq_destroy()
2096 rq->buf_info[1] = NULL; in vmxnet3_rq_destroy()
2104 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_destroy_all_rxdataring()
2105 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_rq_destroy_all_rxdataring()
2107 if (rq->data_ring.base) { in vmxnet3_rq_destroy_all_rxdataring()
2108 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy_all_rxdataring()
2109 (rq->rx_ring[0].size * in vmxnet3_rq_destroy_all_rxdataring()
2110 rq->data_ring.desc_size), in vmxnet3_rq_destroy_all_rxdataring()
2111 rq->data_ring.base, in vmxnet3_rq_destroy_all_rxdataring()
2112 rq->data_ring.basePA); in vmxnet3_rq_destroy_all_rxdataring()
2113 rq->data_ring.base = NULL; in vmxnet3_rq_destroy_all_rxdataring()
2115 rq->data_ring.desc_size = 0; in vmxnet3_rq_destroy_all_rxdataring()
2126 for (i = 0; i < rq->rx_ring[0].size; i++) { in vmxnet3_rq_init()
2129 if (i % adapter->rx_buf_per_pkt == 0) { in vmxnet3_rq_init()
2130 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ? in vmxnet3_rq_init()
2133 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
2135 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2136 rq->buf_info[0][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2139 for (i = 0; i < rq->rx_ring[1].size; i++) { in vmxnet3_rq_init()
2140 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2141 rq->buf_info[1][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2146 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; in vmxnet3_rq_init()
2148 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * in vmxnet3_rq_init()
2150 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2151 rq->rx_ring[i].isOutOfOrder = 0; in vmxnet3_rq_init()
2155 rq->rx_ring[0].size + rq->rx_ring[1].size); in vmxnet3_rq_init()
2159 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, in vmxnet3_rq_init()
2161 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_init()
2162 page_pool_destroy(rq->page_pool); in vmxnet3_rq_init()
2163 rq->page_pool = NULL; in vmxnet3_rq_init()
2165 /* at least has 1 rx buffer for the 1st ring */ in vmxnet3_rq_init()
2166 return -ENOMEM; in vmxnet3_rq_init()
2168 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
2170 if (rq->ts_ring.base) in vmxnet3_rq_init()
2171 memset(rq->ts_ring.base, 0, in vmxnet3_rq_init()
2172 rq->rx_ring[0].size * rq->rx_ts_desc_size); in vmxnet3_rq_init()
2175 rq->comp_ring.next2proc = 0; in vmxnet3_rq_init()
2176 memset(rq->comp_ring.base, 0, rq->comp_ring.size * in vmxnet3_rq_init()
2178 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2181 rq->rx_ctx.skb = NULL; in vmxnet3_rq_init()
2193 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_init_all()
2194 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all()
2196 dev_err(&adapter->netdev->dev, "%s: failed to " in vmxnet3_rq_init_all()
2197 "initialize rx queue%i\n", in vmxnet3_rq_init_all()
2198 adapter->netdev->name, i); in vmxnet3_rq_init_all()
2216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); in vmxnet3_rq_create()
2217 rq->rx_ring[i].base = dma_alloc_coherent( in vmxnet3_rq_create()
2218 &adapter->pdev->dev, sz, in vmxnet3_rq_create()
2219 &rq->rx_ring[i].basePA, in vmxnet3_rq_create()
2221 if (!rq->rx_ring[i].base) { in vmxnet3_rq_create()
2222 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2223 "failed to allocate rx ring %d\n", i); in vmxnet3_rq_create()
2228 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { in vmxnet3_rq_create()
2229 sz = rq->rx_ring[0].size * rq->data_ring.desc_size; in vmxnet3_rq_create()
2230 rq->data_ring.base = in vmxnet3_rq_create()
2231 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2232 &rq->data_ring.basePA, in vmxnet3_rq_create()
2234 if (!rq->data_ring.base) { in vmxnet3_rq_create()
2235 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2236 "rx data ring will be disabled\n"); in vmxnet3_rq_create()
2237 adapter->rxdataring_enabled = false; in vmxnet3_rq_create()
2240 rq->data_ring.base = NULL; in vmxnet3_rq_create()
2241 rq->data_ring.desc_size = 0; in vmxnet3_rq_create()
2244 if (rq->rx_ts_desc_size != 0) { in vmxnet3_rq_create()
2245 sz = rq->rx_ring[0].size * rq->rx_ts_desc_size; in vmxnet3_rq_create()
2246 rq->ts_ring.base = in vmxnet3_rq_create()
2247 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2248 &rq->ts_ring.basePA, in vmxnet3_rq_create()
2250 if (!rq->ts_ring.base) { in vmxnet3_rq_create()
2251 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2252 "rx ts ring will be disabled\n"); in vmxnet3_rq_create()
2253 rq->rx_ts_desc_size = 0; in vmxnet3_rq_create()
2256 rq->ts_ring.base = NULL; in vmxnet3_rq_create()
2259 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); in vmxnet3_rq_create()
2260 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2261 &rq->comp_ring.basePA, in vmxnet3_rq_create()
2263 if (!rq->comp_ring.base) { in vmxnet3_rq_create()
2264 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); in vmxnet3_rq_create()
2268 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size, in vmxnet3_rq_create()
2269 sizeof(rq->buf_info[0][0]), GFP_KERNEL, in vmxnet3_rq_create()
2270 dev_to_node(&adapter->pdev->dev)); in vmxnet3_rq_create()
2274 rq->buf_info[0] = bi; in vmxnet3_rq_create()
2275 rq->buf_info[1] = bi + rq->rx_ring[0].size; in vmxnet3_rq_create()
2281 return -ENOMEM; in vmxnet3_rq_create()
2290 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_rq_create_all()
2292 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_create_all()
2293 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all()
2295 dev_err(&adapter->netdev->dev, in vmxnet3_rq_create_all()
2296 "%s: failed to create rx queue%i\n", in vmxnet3_rq_create_all()
2297 adapter->netdev->name, i); in vmxnet3_rq_create_all()
2302 if (!adapter->rxdataring_enabled) in vmxnet3_rq_create_all()
2312 /* Multiple queue aware polling function for tx and rx */
2318 if (unlikely(adapter->shared->ecr)) in vmxnet3_do_poll()
2320 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll()
2321 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); in vmxnet3_do_poll()
2323 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_do_poll()
2324 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll()
2337 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll()
2341 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll()
2347 * NAPI polling function for MSI-X mode with multiple Rx queues
2348 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2356 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only()
2362 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_poll_rx_only()
2364 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
2372 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
2389 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
2391 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_tx()
2392 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2395 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_msix_tx()
2397 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx()
2398 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx()
2404 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2411 * Handle completion interrupts on rx queues. Returns whether or not the
2419 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx()
2422 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_rx()
2423 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
2424 napi_schedule(&rq->napi); in vmxnet3_msix_rx()
2430 *----------------------------------------------------------------------------
2432 * vmxnet3_msix_event --
2439 *----------------------------------------------------------------------------
2449 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_event()
2450 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2452 if (adapter->shared->ecr) in vmxnet3_msix_event()
2455 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2470 if (adapter->intr.type == VMXNET3_IT_INTX) { in vmxnet3_intr()
2479 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_intr()
2482 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr()
2495 switch (adapter->intr.type) { in vmxnet3_netpoll()
2499 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_netpoll()
2500 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); in vmxnet3_netpoll()
2506 vmxnet3_intr(0, adapter->netdev); in vmxnet3_netpoll()
2516 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_request_irqs()
2521 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2522 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs()
2523 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_request_irqs()
2524 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", in vmxnet3_request_irqs()
2525 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2527 intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2529 adapter->tx_queue[i].name, in vmxnet3_request_irqs()
2530 &adapter->tx_queue[i]); in vmxnet3_request_irqs()
2532 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2533 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2536 dev_err(&adapter->netdev->dev, in vmxnet3_request_irqs()
2539 adapter->tx_queue[i].name, err); in vmxnet3_request_irqs()
2545 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_request_irqs()
2546 for (; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2547 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2552 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2556 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2559 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2560 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2561 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", in vmxnet3_request_irqs()
2562 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2564 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2565 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2566 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2568 adapter->rx_queue[i].name, in vmxnet3_request_irqs()
2569 &(adapter->rx_queue[i])); in vmxnet3_request_irqs()
2571 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2574 adapter->rx_queue[i].name, err); in vmxnet3_request_irqs()
2578 adapter->rx_queue[i].comp_ring.intr_idx = vector++; in vmxnet3_request_irqs()
2581 sprintf(intr->event_msi_vector_name, "%s-event-%d", in vmxnet3_request_irqs()
2582 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2583 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2585 intr->event_msi_vector_name, adapter->netdev); in vmxnet3_request_irqs()
2586 intr->event_intr_idx = vector; in vmxnet3_request_irqs()
2588 } else if (intr->type == VMXNET3_IT_MSI) { in vmxnet3_request_irqs()
2589 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2590 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, in vmxnet3_request_irqs()
2591 adapter->netdev->name, adapter->netdev); in vmxnet3_request_irqs()
2594 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2595 err = request_irq(adapter->pdev->irq, vmxnet3_intr, in vmxnet3_request_irqs()
2596 IRQF_SHARED, adapter->netdev->name, in vmxnet3_request_irqs()
2597 adapter->netdev); in vmxnet3_request_irqs()
2601 intr->num_intrs = vector + 1; in vmxnet3_request_irqs()
2603 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2605 intr->type, err); in vmxnet3_request_irqs()
2607 /* Number of rx queues will not change after this */ in vmxnet3_request_irqs()
2608 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2609 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs()
2610 rq->qid = i; in vmxnet3_request_irqs()
2611 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2612 rq->dataRingQid = i + 2 * adapter->num_rx_queues; in vmxnet3_request_irqs()
2616 for (i = 0; i < intr->num_intrs; i++) in vmxnet3_request_irqs()
2617 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; in vmxnet3_request_irqs()
2618 if (adapter->intr.type != VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2619 adapter->intr.event_intr_idx = 0; in vmxnet3_request_irqs()
2620 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2621 adapter->tx_queue[i].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2622 adapter->rx_queue[0].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2625 netdev_info(adapter->netdev, in vmxnet3_request_irqs()
2627 intr->type, intr->mask_mode, intr->num_intrs); in vmxnet3_request_irqs()
2637 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_free_irqs()
2638 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); in vmxnet3_free_irqs()
2640 switch (intr->type) { in vmxnet3_free_irqs()
2646 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_free_irqs()
2647 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_free_irqs()
2648 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2649 &(adapter->tx_queue[i])); in vmxnet3_free_irqs()
2650 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) in vmxnet3_free_irqs()
2655 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_free_irqs()
2656 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2657 &(adapter->rx_queue[i])); in vmxnet3_free_irqs()
2660 free_irq(intr->msix_entries[vector].vector, in vmxnet3_free_irqs()
2661 adapter->netdev); in vmxnet3_free_irqs()
2662 BUG_ON(vector >= intr->num_intrs); in vmxnet3_free_irqs()
2667 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2670 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2681 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_restore_vlan()
2687 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in vmxnet3_restore_vlan()
2697 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_add_vid()
2698 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_add_vid()
2702 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2705 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2708 set_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_add_vid()
2719 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_kill_vid()
2720 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_kill_vid()
2724 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2727 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2730 clear_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_kill_vid()
2751 memcpy(buf + i++ * ETH_ALEN, ha->addr, in vmxnet3_copy_mc()
2765 &adapter->shared->devRead.rxFilterConf; in vmxnet3_set_mc()
2771 if (netdev->flags & IFF_PROMISC) { in vmxnet3_set_mc()
2772 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_set_mc()
2780 if (netdev->flags & IFF_BROADCAST) in vmxnet3_set_mc()
2783 if (netdev->flags & IFF_ALLMULTI) in vmxnet3_set_mc()
2791 rxConf->mfTableLen = cpu_to_le16(sz); in vmxnet3_set_mc()
2793 &adapter->pdev->dev, in vmxnet3_set_mc()
2797 if (!dma_mapping_error(&adapter->pdev->dev, in vmxnet3_set_mc()
2801 rxConf->mfTablePA = cpu_to_le64( in vmxnet3_set_mc()
2813 rxConf->mfTableLen = 0; in vmxnet3_set_mc()
2814 rxConf->mfTablePA = 0; in vmxnet3_set_mc()
2817 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2818 if (new_mode != rxConf->rxMode) { in vmxnet3_set_mc()
2819 rxConf->rxMode = cpu_to_le32(new_mode); in vmxnet3_set_mc()
2828 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2831 dma_unmap_single(&adapter->pdev->dev, new_table_pa, in vmxnet3_set_mc()
2832 rxConf->mfTableLen, DMA_TO_DEVICE); in vmxnet3_set_mc()
2841 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_destroy_all()
2842 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); in vmxnet3_rq_destroy_all()
2853 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_setup_driver_shared() local
2854 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; in vmxnet3_setup_driver_shared()
2855 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; in vmxnet3_setup_driver_shared()
2862 memset(shared, 0, sizeof(*shared)); in vmxnet3_setup_driver_shared()
2865 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); in vmxnet3_setup_driver_shared()
2866 devRead->misc.driverInfo.version = cpu_to_le32( in vmxnet3_setup_driver_shared()
2868 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? in vmxnet3_setup_driver_shared()
2870 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; in vmxnet3_setup_driver_shared()
2871 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( in vmxnet3_setup_driver_shared()
2872 *((u32 *)&devRead->misc.driverInfo.gos)); in vmxnet3_setup_driver_shared()
2873 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2874 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2876 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); in vmxnet3_setup_driver_shared()
2877 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); in vmxnet3_setup_driver_shared()
2880 if (adapter->netdev->features & NETIF_F_RXCSUM) in vmxnet3_setup_driver_shared()
2881 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; in vmxnet3_setup_driver_shared()
2883 if (adapter->netdev->features & NETIF_F_LRO) { in vmxnet3_setup_driver_shared()
2884 devRead->misc.uptFeatures |= UPT1_F_LRO; in vmxnet3_setup_driver_shared()
2885 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); in vmxnet3_setup_driver_shared()
2887 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in vmxnet3_setup_driver_shared()
2888 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; in vmxnet3_setup_driver_shared()
2890 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_setup_driver_shared()
2892 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; in vmxnet3_setup_driver_shared()
2894 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); in vmxnet3_setup_driver_shared()
2895 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); in vmxnet3_setup_driver_shared()
2896 devRead->misc.queueDescLen = cpu_to_le32( in vmxnet3_setup_driver_shared()
2897 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + in vmxnet3_setup_driver_shared()
2898 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); in vmxnet3_setup_driver_shared()
2901 devRead->misc.numTxQueues = adapter->num_tx_queues; in vmxnet3_setup_driver_shared()
2902 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_setup_driver_shared()
2903 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared()
2904 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); in vmxnet3_setup_driver_shared()
2905 tqc = &adapter->tqd_start[i].conf; in vmxnet3_setup_driver_shared()
2906 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2907 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2908 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2909 tqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2910 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2911 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2912 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2913 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2914 tqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2915 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2917 tqtsc = &adapter->tqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2918 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA); in vmxnet3_setup_driver_shared()
2919 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size); in vmxnet3_setup_driver_shared()
2923 /* rx queue settings */ in vmxnet3_setup_driver_shared()
2924 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2925 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_setup_driver_shared()
2926 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared()
2927 rqc = &adapter->rqd_start[i].conf; in vmxnet3_setup_driver_shared()
2928 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); in vmxnet3_setup_driver_shared()
2929 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); in vmxnet3_setup_driver_shared()
2930 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2931 rqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2932 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); in vmxnet3_setup_driver_shared()
2933 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); in vmxnet3_setup_driver_shared()
2934 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); in vmxnet3_setup_driver_shared()
2935 rqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2936 rqc->intrIdx = rq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2938 rqc->rxDataRingBasePA = in vmxnet3_setup_driver_shared()
2939 cpu_to_le64(rq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2940 rqc->rxDataRingDescSize = in vmxnet3_setup_driver_shared()
2941 cpu_to_le16(rq->data_ring.desc_size); in vmxnet3_setup_driver_shared()
2944 rqtsc = &adapter->rqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2945 rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA); in vmxnet3_setup_driver_shared()
2946 rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size); in vmxnet3_setup_driver_shared()
2951 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); in vmxnet3_setup_driver_shared()
2953 if (adapter->rss) { in vmxnet3_setup_driver_shared()
2954 struct UPT1_RSSConf *rssConf = adapter->rss_conf; in vmxnet3_setup_driver_shared()
2956 devRead->misc.uptFeatures |= UPT1_F_RSS; in vmxnet3_setup_driver_shared()
2957 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2958 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | in vmxnet3_setup_driver_shared()
2962 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; in vmxnet3_setup_driver_shared()
2963 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; in vmxnet3_setup_driver_shared()
2964 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; in vmxnet3_setup_driver_shared()
2965 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); in vmxnet3_setup_driver_shared()
2967 for (i = 0; i < rssConf->indTableSize; i++) in vmxnet3_setup_driver_shared()
2968 rssConf->indTable[i] = ethtool_rxfh_indir_default( in vmxnet3_setup_driver_shared()
2969 i, adapter->num_rx_queues); in vmxnet3_setup_driver_shared()
2971 devRead->rssConfDesc.confVer = 1; in vmxnet3_setup_driver_shared()
2972 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); in vmxnet3_setup_driver_shared()
2973 devRead->rssConfDesc.confPA = in vmxnet3_setup_driver_shared()
2974 cpu_to_le64(adapter->rss_conf_pa); in vmxnet3_setup_driver_shared()
2981 !adapter->queuesExtEnabled) { in vmxnet3_setup_driver_shared()
2982 devRead->intrConf.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2984 devRead->intrConf.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2985 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2986 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2988 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2989 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
2991 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2993 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2994 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2995 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2997 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2998 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
3001 /* rx filter settings */ in vmxnet3_setup_driver_shared()
3002 devRead->rxFilterConf.rxMode = 0; in vmxnet3_setup_driver_shared()
3004 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); in vmxnet3_setup_driver_shared()
3012 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_bufsize() local
3013 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_bufsize()
3019 cmdInfo->ringBufSize = adapter->ringBufSize; in vmxnet3_init_bufsize()
3020 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3023 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3029 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_coalesce() local
3030 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_coalesce()
3036 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3037 cmdInfo->varConf.confVer = 1; in vmxnet3_init_coalesce()
3038 cmdInfo->varConf.confLen = in vmxnet3_init_coalesce()
3039 cpu_to_le32(sizeof(*adapter->coal_conf)); in vmxnet3_init_coalesce()
3040 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); in vmxnet3_init_coalesce()
3042 if (adapter->default_coal_mode) { in vmxnet3_init_coalesce()
3050 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3056 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_rssfields() local
3057 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_rssfields()
3063 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3065 if (adapter->default_rss_fields) { in vmxnet3_init_rssfields()
3068 adapter->rss_fields = in vmxnet3_init_rssfields()
3072 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || in vmxnet3_init_rssfields()
3073 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && in vmxnet3_init_rssfields()
3074 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3076 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; in vmxnet3_init_rssfields()
3078 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); in vmxnet3_init_rssfields()
3081 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && in vmxnet3_init_rssfields()
3082 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3084 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; in vmxnet3_init_rssfields()
3086 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); in vmxnet3_init_rssfields()
3089 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && in vmxnet3_init_rssfields()
3090 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3092 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; in vmxnet3_init_rssfields()
3094 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); in vmxnet3_init_rssfields()
3097 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_init_rssfields()
3099 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
3101 cmdInfo->setRssFields = adapter->rss_fields; in vmxnet3_init_rssfields()
3109 adapter->rss_fields = in vmxnet3_init_rssfields()
3113 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3123 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," in vmxnet3_activate_dev()
3124 " ring sizes %u %u %u\n", adapter->netdev->name, in vmxnet3_activate_dev()
3125 adapter->skb_buf_size, adapter->rx_buf_per_pkt, in vmxnet3_activate_dev()
3126 adapter->tx_queue[0].tx_ring.size, in vmxnet3_activate_dev()
3127 adapter->rx_queue[0].rx_ring[0].size, in vmxnet3_activate_dev()
3128 adapter->rx_queue[0].rx_ring[1].size); in vmxnet3_activate_dev()
3133 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3134 "Failed to init rx queue error %d\n", err); in vmxnet3_activate_dev()
3140 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3148 adapter->shared_pa)); in vmxnet3_activate_dev()
3150 adapter->shared_pa)); in vmxnet3_activate_dev()
3151 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3155 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3158 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3160 err = -EINVAL; in vmxnet3_activate_dev()
3168 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_activate_dev()
3170 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN, in vmxnet3_activate_dev()
3171 adapter->rx_queue[i].rx_ring[0].next2fill); in vmxnet3_activate_dev()
3172 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset + in vmxnet3_activate_dev()
3174 adapter->rx_queue[i].rx_ring[1].next2fill); in vmxnet3_activate_dev()
3177 /* Apply the rx filter settins last. */ in vmxnet3_activate_dev()
3178 vmxnet3_set_mc(adapter->netdev); in vmxnet3_activate_dev()
3185 netif_tx_wake_all_queues(adapter->netdev); in vmxnet3_activate_dev()
3186 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_activate_dev()
3187 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_activate_dev()
3189 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_activate_dev()
3208 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3210 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3219 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) in vmxnet3_quiesce_dev()
3223 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3226 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3229 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_quiesce_dev()
3230 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_quiesce_dev()
3231 netif_tx_disable(adapter->netdev); in vmxnet3_quiesce_dev()
3232 adapter->link_speed = 0; in vmxnet3_quiesce_dev()
3233 netif_carrier_off(adapter->netdev); in vmxnet3_quiesce_dev()
3261 dev_addr_set(netdev, addr->sa_data); in vmxnet3_set_mac_addr()
3262 vmxnet3_write_mac_addr(adapter, addr->sa_data); in vmxnet3_set_mac_addr()
3275 struct pci_dev *pdev = adapter->pdev; in vmxnet3_alloc_pci_resources()
3279 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); in vmxnet3_alloc_pci_resources()
3283 err = pci_request_selected_regions(pdev, (1 << 2) - 1, in vmxnet3_alloc_pci_resources()
3286 dev_err(&pdev->dev, in vmxnet3_alloc_pci_resources()
3295 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3296 if (!adapter->hw_addr0) { in vmxnet3_alloc_pci_resources()
3297 dev_err(&pdev->dev, "Failed to map bar0\n"); in vmxnet3_alloc_pci_resources()
3298 err = -EIO; in vmxnet3_alloc_pci_resources()
3304 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3305 if (!adapter->hw_addr1) { in vmxnet3_alloc_pci_resources()
3306 dev_err(&pdev->dev, "Failed to map bar1\n"); in vmxnet3_alloc_pci_resources()
3307 err = -EIO; in vmxnet3_alloc_pci_resources()
3313 iounmap(adapter->hw_addr0); in vmxnet3_alloc_pci_resources()
3315 pci_release_selected_regions(pdev, (1 << 2) - 1); in vmxnet3_alloc_pci_resources()
3325 BUG_ON(!adapter->pdev); in vmxnet3_free_pci_resources()
3327 iounmap(adapter->hw_addr0); in vmxnet3_free_pci_resources()
3328 iounmap(adapter->hw_addr1); in vmxnet3_free_pci_resources()
3329 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); in vmxnet3_free_pci_resources()
3330 pci_disable_device(adapter->pdev); in vmxnet3_free_pci_resources()
3340 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - in vmxnet3_adjust_rx_ring_size()
3342 adapter->skb_buf_size = adapter->netdev->mtu + in vmxnet3_adjust_rx_ring_size()
3344 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) in vmxnet3_adjust_rx_ring_size()
3345 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3347 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3349 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3350 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + in vmxnet3_adjust_rx_ring_size()
3352 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; in vmxnet3_adjust_rx_ring_size()
3355 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE, in vmxnet3_adjust_rx_ring_size()
3357 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3358 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size); in vmxnet3_adjust_rx_ring_size()
3359 adapter->ringBufSize.ring1BufSizeType1 = 0; in vmxnet3_adjust_rx_ring_size()
3360 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE); in vmxnet3_adjust_rx_ring_size()
3367 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; in vmxnet3_adjust_rx_ring_size()
3368 ring0_size = adapter->rx_queue[0].rx_ring[0].size; in vmxnet3_adjust_rx_ring_size()
3369 ring0_size = (ring0_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3372 ring1_size = adapter->rx_queue[0].rx_ring[1].size; in vmxnet3_adjust_rx_ring_size()
3373 ring1_size = (ring1_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3383 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_adjust_rx_ring_size()
3384 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
3386 rq->rx_ring[0].size = ring0_size; in vmxnet3_adjust_rx_ring_size()
3387 rq->rx_ring[1].size = ring1_size; in vmxnet3_adjust_rx_ring_size()
3388 rq->comp_ring.size = comp_size; in vmxnet3_adjust_rx_ring_size()
3400 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_create_queues()
3401 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues()
3402 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
3403 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
3404 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
3405 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
3406 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
3407 tq->stopped = true; in vmxnet3_create_queues()
3408 tq->adapter = adapter; in vmxnet3_create_queues()
3409 tq->qid = i; in vmxnet3_create_queues()
3410 tq->tx_ts_desc_size = adapter->tx_ts_desc_size; in vmxnet3_create_queues()
3411 tq->tsPktCount = 1; in vmxnet3_create_queues()
3421 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; in vmxnet3_create_queues()
3422 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; in vmxnet3_create_queues()
3425 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_create_queues()
3426 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_create_queues()
3427 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues()
3428 /* qid and qid2 for rx queues will be assigned later when num in vmxnet3_create_queues()
3429 * of rx queues is finalized after allocating intrs */ in vmxnet3_create_queues()
3430 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
3431 rq->adapter = adapter; in vmxnet3_create_queues()
3432 rq->data_ring.desc_size = rxdata_desc_size; in vmxnet3_create_queues()
3433 rq->rx_ts_desc_size = adapter->rx_ts_desc_size; in vmxnet3_create_queues()
3437 netdev_err(adapter->netdev, in vmxnet3_create_queues()
3438 "Could not allocate any rx queues. " in vmxnet3_create_queues()
3442 netdev_info(adapter->netdev, in vmxnet3_create_queues()
3443 "Number of rx queues changed " in vmxnet3_create_queues()
3445 adapter->num_rx_queues = i; in vmxnet3_create_queues()
3452 if (!adapter->rxdataring_enabled) in vmxnet3_create_queues()
3469 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_open()
3470 spin_lock_init(&adapter->tx_queue[i].tx_lock); in vmxnet3_open()
3477 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3481 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3487 adapter->txdata_desc_size = in vmxnet3_open()
3490 adapter->txdata_desc_size = txdata_desc_size; in vmxnet3_open()
3493 adapter->rxdata_desc_size = (ret >> 16) & 0xffff; in vmxnet3_open()
3495 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); in vmxnet3_open()
3504 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3508 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3519 adapter->tx_ts_desc_size = tx_ts_desc_size; in vmxnet3_open()
3520 adapter->rx_ts_desc_size = rx_ts_desc_size; in vmxnet3_open()
3522 adapter->tx_ts_desc_size = 0; in vmxnet3_open()
3523 adapter->rx_ts_desc_size = 0; in vmxnet3_open()
3527 adapter->tx_ring_size, in vmxnet3_open()
3528 adapter->rx_ring_size, in vmxnet3_open()
3529 adapter->rx_ring2_size, in vmxnet3_open()
3530 adapter->txdata_desc_size, in vmxnet3_open()
3531 adapter->rxdata_desc_size); in vmxnet3_open()
3558 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_close()
3566 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_close()
3582 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); in vmxnet3_force_close()
3585 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_force_close()
3586 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_force_close()
3591 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_force_close()
3592 dev_close(adapter->netdev); in vmxnet3_force_close()
3602 WRITE_ONCE(netdev->mtu, new_mtu); in vmxnet3_change_mtu()
3608 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_change_mtu()
3615 /* we need to re-create the rx queue based on the new mtu */ in vmxnet3_change_mtu()
3621 "failed to re-create rx queues, " in vmxnet3_change_mtu()
3629 "failed to re-activate, error %d. " in vmxnet3_change_mtu()
3636 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_change_mtu()
3647 struct net_device *netdev = adapter->netdev; in vmxnet3_declare_features()
3651 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3654 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3655 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3658 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3664 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_declare_features()
3667 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3674 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) { in vmxnet3_declare_features()
3675 netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); in vmxnet3_declare_features()
3676 netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); in vmxnet3_declare_features()
3679 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) { in vmxnet3_declare_features()
3680 netdev->hw_features &= ~(NETIF_F_LRO); in vmxnet3_declare_features()
3681 netdev->hw_enc_features &= ~(NETIF_F_LRO); in vmxnet3_declare_features()
3687 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3689 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3691 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3693 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3695 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3697 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; in vmxnet3_declare_features()
3699 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3701 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; in vmxnet3_declare_features()
3703 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3705 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3707 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3709 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3712 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_declare_features()
3713 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3715 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3716 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3718 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3719 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3720 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && in vmxnet3_declare_features()
3721 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { in vmxnet3_declare_features()
3722 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3723 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3725 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3726 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { in vmxnet3_declare_features()
3727 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3728 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3732 netdev->vlan_features = netdev->hw_features & in vmxnet3_declare_features()
3735 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in vmxnet3_declare_features()
3766 int ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3767 adapter->intr.msix_entries, nvec, nvec); in vmxnet3_acquire_msix_vectors()
3769 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { in vmxnet3_acquire_msix_vectors()
3770 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3771 "Failed to enable %d MSI-X, trying %d\n", in vmxnet3_acquire_msix_vectors()
3774 ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3775 adapter->intr.msix_entries, in vmxnet3_acquire_msix_vectors()
3781 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3782 "Failed to enable MSI-X, error: %d\n", ret); in vmxnet3_acquire_msix_vectors()
3798 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3802 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3803 adapter->intr.type = cfg & 0x3; in vmxnet3_alloc_intr_resources()
3804 adapter->intr.mask_mode = (cfg >> 2) & 0x3; in vmxnet3_alloc_intr_resources()
3806 if (adapter->intr.type == VMXNET3_IT_AUTO) { in vmxnet3_alloc_intr_resources()
3807 adapter->intr.type = VMXNET3_IT_MSIX; in vmxnet3_alloc_intr_resources()
3811 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_alloc_intr_resources()
3814 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? in vmxnet3_alloc_intr_resources()
3815 1 : adapter->num_tx_queues; in vmxnet3_alloc_intr_resources()
3816 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? in vmxnet3_alloc_intr_resources()
3817 0 : adapter->num_rx_queues; in vmxnet3_alloc_intr_resources()
3823 adapter->intr.msix_entries[i].entry = i; in vmxnet3_alloc_intr_resources()
3830 * then limit the number of rx queues to 1 in vmxnet3_alloc_intr_resources()
3834 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE in vmxnet3_alloc_intr_resources()
3835 || adapter->num_rx_queues != 1) { in vmxnet3_alloc_intr_resources()
3836 adapter->share_intr = VMXNET3_INTR_TXSHARE; in vmxnet3_alloc_intr_resources()
3837 netdev_err(adapter->netdev, in vmxnet3_alloc_intr_resources()
3838 "Number of rx queues : 1\n"); in vmxnet3_alloc_intr_resources()
3839 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3843 adapter->intr.num_intrs = nvec_allocated; in vmxnet3_alloc_intr_resources()
3847 /* If we cannot allocate MSIx vectors use only one rx queue */ in vmxnet3_alloc_intr_resources()
3848 dev_info(&adapter->pdev->dev, in vmxnet3_alloc_intr_resources()
3849 "Failed to enable MSI-X, error %d. " in vmxnet3_alloc_intr_resources()
3850 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); in vmxnet3_alloc_intr_resources()
3852 adapter->intr.type = VMXNET3_IT_MSI; in vmxnet3_alloc_intr_resources()
3855 if (adapter->intr.type == VMXNET3_IT_MSI) { in vmxnet3_alloc_intr_resources()
3856 if (!pci_enable_msi(adapter->pdev)) { in vmxnet3_alloc_intr_resources()
3857 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3858 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3864 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3865 dev_info(&adapter->netdev->dev, in vmxnet3_alloc_intr_resources()
3866 "Using INTx interrupt, #Rx queues: 1.\n"); in vmxnet3_alloc_intr_resources()
3867 adapter->intr.type = VMXNET3_IT_INTX; in vmxnet3_alloc_intr_resources()
3869 /* INT-X related setting */ in vmxnet3_alloc_intr_resources()
3870 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3877 if (adapter->intr.type == VMXNET3_IT_MSIX) in vmxnet3_free_intr_resources()
3878 pci_disable_msix(adapter->pdev); in vmxnet3_free_intr_resources()
3879 else if (adapter->intr.type == VMXNET3_IT_MSI) in vmxnet3_free_intr_resources()
3880 pci_disable_msi(adapter->pdev); in vmxnet3_free_intr_resources()
3882 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); in vmxnet3_free_intr_resources()
3890 adapter->tx_timeout_count++; in vmxnet3_tx_timeout()
3892 netdev_err(adapter->netdev, "tx hang\n"); in vmxnet3_tx_timeout()
3893 schedule_work(&adapter->work); in vmxnet3_tx_timeout()
3905 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_reset_work()
3910 if (netif_running(adapter->netdev)) { in vmxnet3_reset_work()
3911 netdev_notice(adapter->netdev, "resetting\n"); in vmxnet3_reset_work()
3916 netdev_info(adapter->netdev, "already closed\n"); in vmxnet3_reset_work()
3920 netif_wake_queue(adapter->netdev); in vmxnet3_reset_work()
3921 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_reset_work()
3980 return -ENOMEM; in vmxnet3_probe_device()
3984 adapter->netdev = netdev; in vmxnet3_probe_device()
3985 adapter->pdev = pdev; in vmxnet3_probe_device()
3987 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; in vmxnet3_probe_device()
3988 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; in vmxnet3_probe_device()
3989 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; in vmxnet3_probe_device()
3991 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in vmxnet3_probe_device()
3993 dev_err(&pdev->dev, "dma_set_mask failed\n"); in vmxnet3_probe_device()
3997 spin_lock_init(&adapter->cmd_lock); in vmxnet3_probe_device()
3998 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, in vmxnet3_probe_device()
4001 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { in vmxnet3_probe_device()
4002 dev_err(&pdev->dev, "Failed to map dma\n"); in vmxnet3_probe_device()
4003 err = -EFAULT; in vmxnet3_probe_device()
4006 adapter->shared = dma_alloc_coherent( in vmxnet3_probe_device()
4007 &adapter->pdev->dev, in vmxnet3_probe_device()
4009 &adapter->shared_pa, GFP_KERNEL); in vmxnet3_probe_device()
4010 if (!adapter->shared) { in vmxnet3_probe_device()
4011 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
4012 err = -ENOMEM; in vmxnet3_probe_device()
4021 for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) { in vmxnet3_probe_device()
4024 adapter->version = i + 1; in vmxnet3_probe_device()
4029 dev_err(&pdev->dev, in vmxnet3_probe_device()
4031 err = -EBUSY; in vmxnet3_probe_device()
4034 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); in vmxnet3_probe_device()
4040 dev_err(&pdev->dev, in vmxnet3_probe_device()
4042 err = -EBUSY; in vmxnet3_probe_device()
4047 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR); in vmxnet3_probe_device()
4048 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR); in vmxnet3_probe_device()
4049 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4050 adapter->dev_caps[0] = adapter->devcap_supported[0] & in vmxnet3_probe_device()
4053 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) && in vmxnet3_probe_device()
4054 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) && in vmxnet3_probe_device()
4055 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) { in vmxnet3_probe_device()
4056 adapter->dev_caps[0] |= adapter->devcap_supported[0] & in vmxnet3_probe_device()
4059 if (adapter->dev_caps[0]) in vmxnet3_probe_device()
4060 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_probe_device()
4062 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4064 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_probe_device()
4065 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4069 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4070 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD; in vmxnet3_probe_device()
4071 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD; in vmxnet3_probe_device()
4072 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2; in vmxnet3_probe_device()
4074 adapter->tx_prod_offset = VMXNET3_REG_TXPROD; in vmxnet3_probe_device()
4075 adapter->rx_prod_offset = VMXNET3_REG_RXPROD; in vmxnet3_probe_device()
4076 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2; in vmxnet3_probe_device()
4080 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4084 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4086 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff)); in vmxnet3_probe_device()
4087 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff)); in vmxnet3_probe_device()
4089 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4091 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4094 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES || in vmxnet3_probe_device()
4095 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) { in vmxnet3_probe_device()
4096 adapter->queuesExtEnabled = true; in vmxnet3_probe_device()
4098 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4101 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4104 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4106 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4109 dev_info(&pdev->dev, in vmxnet3_probe_device()
4110 "# of Tx queues : %d, # of Rx queues : %d\n", in vmxnet3_probe_device()
4111 adapter->num_tx_queues, adapter->num_rx_queues); in vmxnet3_probe_device()
4113 adapter->rx_buf_per_pkt = 1; in vmxnet3_probe_device()
4115 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_probe_device()
4116 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; in vmxnet3_probe_device()
4117 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, in vmxnet3_probe_device()
4118 &adapter->queue_desc_pa, in vmxnet3_probe_device()
4121 if (!adapter->tqd_start) { in vmxnet3_probe_device()
4122 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
4123 err = -ENOMEM; in vmxnet3_probe_device()
4126 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + in vmxnet3_probe_device()
4127 adapter->num_tx_queues); in vmxnet3_probe_device()
4129 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf; in vmxnet3_probe_device()
4131 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4133 &adapter->pm_conf_pa, in vmxnet3_probe_device()
4135 if (adapter->pm_conf == NULL) { in vmxnet3_probe_device()
4136 err = -ENOMEM; in vmxnet3_probe_device()
4142 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4144 &adapter->rss_conf_pa, in vmxnet3_probe_device()
4146 if (adapter->rss_conf == NULL) { in vmxnet3_probe_device()
4147 err = -ENOMEM; in vmxnet3_probe_device()
4153 adapter->coal_conf = in vmxnet3_probe_device()
4154 dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4157 &adapter->coal_conf_pa, in vmxnet3_probe_device()
4159 if (!adapter->coal_conf) { in vmxnet3_probe_device()
4160 err = -ENOMEM; in vmxnet3_probe_device()
4163 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; in vmxnet3_probe_device()
4164 adapter->default_coal_mode = true; in vmxnet3_probe_device()
4168 adapter->default_rss_fields = true; in vmxnet3_probe_device()
4169 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; in vmxnet3_probe_device()
4172 SET_NETDEV_DEV(netdev, &pdev->dev); in vmxnet3_probe_device()
4174 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in vmxnet3_probe_device()
4177 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? in vmxnet3_probe_device()
4180 if (adapter->num_tx_queues == adapter->num_rx_queues) in vmxnet3_probe_device()
4181 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; in vmxnet3_probe_device()
4183 adapter->share_intr = VMXNET3_INTR_DONTSHARE; in vmxnet3_probe_device()
4188 if (adapter->num_rx_queues > 1 && in vmxnet3_probe_device()
4189 adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4190 adapter->rss = true; in vmxnet3_probe_device()
4191 netdev->hw_features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4192 netdev->features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4193 dev_dbg(&pdev->dev, "RSS is enabled.\n"); in vmxnet3_probe_device()
4195 adapter->rss = false; in vmxnet3_probe_device()
4202 netdev->netdev_ops = &vmxnet3_netdev_ops; in vmxnet3_probe_device()
4204 netdev->watchdog_timeo = 5 * HZ; in vmxnet3_probe_device()
4206 /* MTU range: 60 - 9190 */ in vmxnet3_probe_device()
4207 netdev->min_mtu = VMXNET3_MIN_MTU; in vmxnet3_probe_device()
4209 netdev->max_mtu = VMXNET3_V6_MAX_MTU; in vmxnet3_probe_device()
4211 netdev->max_mtu = VMXNET3_MAX_MTU; in vmxnet3_probe_device()
4213 INIT_WORK(&adapter->work, vmxnet3_reset_work); in vmxnet3_probe_device()
4214 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_probe_device()
4216 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4218 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_probe_device()
4219 netif_napi_add(adapter->netdev, in vmxnet3_probe_device()
4220 &adapter->rx_queue[i].napi, in vmxnet3_probe_device()
4224 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, in vmxnet3_probe_device()
4228 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); in vmxnet3_probe_device()
4229 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); in vmxnet3_probe_device()
4235 dev_err(&pdev->dev, "Failed to register adapter\n"); in vmxnet3_probe_device()
4244 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4246 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_probe_device()
4251 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_probe_device()
4252 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_probe_device()
4255 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_probe_device()
4256 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_probe_device()
4258 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_probe_device()
4259 adapter->queue_desc_pa); in vmxnet3_probe_device()
4263 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4265 adapter->shared, adapter->shared_pa); in vmxnet3_probe_device()
4267 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_probe_device()
4295 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4299 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4310 cancel_work_sync(&adapter->work); in vmxnet3_remove_device()
4317 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4319 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_remove_device()
4322 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_remove_device()
4323 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_remove_device()
4325 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_remove_device()
4326 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_remove_device()
4328 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_remove_device()
4330 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_remove_device()
4331 adapter->queue_desc_pa); in vmxnet3_remove_device()
4332 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4334 adapter->shared, adapter->shared_pa); in vmxnet3_remove_device()
4335 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_remove_device()
4349 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_shutdown_device()
4353 &adapter->state)) { in vmxnet3_shutdown_device()
4354 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4357 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4360 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4363 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4387 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_suspend()
4388 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_suspend()
4396 /* Create wake-up filters. */ in vmxnet3_suspend()
4397 pmConf = adapter->pm_conf; in vmxnet3_suspend()
4400 if (adapter->wol & WAKE_UCAST) { in vmxnet3_suspend()
4401 pmConf->filters[i].patternSize = ETH_ALEN; in vmxnet3_suspend()
4402 pmConf->filters[i].maskSize = 1; in vmxnet3_suspend()
4403 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); in vmxnet3_suspend()
4404 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ in vmxnet3_suspend()
4406 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4410 if (adapter->wol & WAKE_ARP) { in vmxnet3_suspend()
4419 ifa = rcu_dereference(in_dev->ifa_list); in vmxnet3_suspend()
4425 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ in vmxnet3_suspend()
4429 pmConf->filters[i].maskSize = in vmxnet3_suspend()
4430 (pmConf->filters[i].patternSize - 1) / 8 + 1; in vmxnet3_suspend()
4433 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; in vmxnet3_suspend()
4434 ehdr->h_proto = htons(ETH_P_ARP); in vmxnet3_suspend()
4437 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; in vmxnet3_suspend()
4438 ahdr->ar_op = htons(ARPOP_REQUEST); in vmxnet3_suspend()
4443 *(__be32 *)arpreq = ifa->ifa_address; in vmxnet3_suspend()
4448 pmConf->filters[i].mask[0] = 0x00; in vmxnet3_suspend()
4449 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ in vmxnet3_suspend()
4450 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ in vmxnet3_suspend()
4451 pmConf->filters[i].mask[3] = 0x00; in vmxnet3_suspend()
4452 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ in vmxnet3_suspend()
4453 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ in vmxnet3_suspend()
4455 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4460 if (adapter->wol & WAKE_MAGIC) in vmxnet3_suspend()
4461 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; in vmxnet3_suspend()
4463 pmConf->numFilters = i; in vmxnet3_suspend()
4465 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); in vmxnet3_suspend()
4466 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( in vmxnet3_suspend()
4468 adapter->shared->devRead.pmConfDesc.confPA = in vmxnet3_suspend()
4469 cpu_to_le64(adapter->pm_conf_pa); in vmxnet3_suspend()
4471 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4474 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4478 adapter->wol); in vmxnet3_suspend()
4515 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_resume()
4518 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_resume()
4526 "failed to re-activate on resume, error: %d", err); in vmxnet3_resume()
4558 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, in vmxnet3_init_module()