Lines Matching refs:adapter

50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) in vmxnet3_enable_intr() argument
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); in vmxnet3_enable_intr()
63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) in vmxnet3_disable_intr() argument
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); in vmxnet3_disable_intr()
73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) in vmxnet3_enable_all_intrs() argument
77 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_enable_all_intrs()
78 vmxnet3_enable_intr(adapter, i); in vmxnet3_enable_all_intrs()
79 if (!VMXNET3_VERSION_GE_6(adapter) || in vmxnet3_enable_all_intrs()
80 !adapter->queuesExtEnabled) { in vmxnet3_enable_all_intrs()
81 adapter->shared->devRead.intrConf.intrCtrl &= in vmxnet3_enable_all_intrs()
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &= in vmxnet3_enable_all_intrs()
91 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) in vmxnet3_disable_all_intrs() argument
95 if (!VMXNET3_VERSION_GE_6(adapter) || in vmxnet3_disable_all_intrs()
96 !adapter->queuesExtEnabled) { in vmxnet3_disable_all_intrs()
97 adapter->shared->devRead.intrConf.intrCtrl |= in vmxnet3_disable_all_intrs()
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |= in vmxnet3_disable_all_intrs()
103 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_disable_all_intrs()
104 vmxnet3_disable_intr(adapter, i); in vmxnet3_disable_all_intrs()
109 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) in vmxnet3_ack_events() argument
111 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); in vmxnet3_ack_events()
116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stopped() argument
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
191 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) in vmxnet3_check_link() argument
197 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_check_link()
198 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); in vmxnet3_check_link()
199 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_check_link()
200 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_check_link()
202 adapter->link_speed = ret >> 16; in vmxnet3_check_link()
204 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", in vmxnet3_check_link()
205 adapter->link_speed); in vmxnet3_check_link()
206 netif_carrier_on(adapter->netdev); in vmxnet3_check_link()
209 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
210 vmxnet3_tq_start(&adapter->tx_queue[i], in vmxnet3_check_link()
211 adapter); in vmxnet3_check_link()
214 netdev_info(adapter->netdev, "NIC Link is Down\n"); in vmxnet3_check_link()
215 netif_carrier_off(adapter->netdev); in vmxnet3_check_link()
218 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
219 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); in vmxnet3_check_link()
225 vmxnet3_process_events(struct vmxnet3_adapter *adapter) in vmxnet3_process_events() argument
229 u32 events = le32_to_cpu(adapter->shared->ecr); in vmxnet3_process_events()
233 vmxnet3_ack_events(adapter, events); in vmxnet3_process_events()
237 vmxnet3_check_link(adapter, true); in vmxnet3_process_events()
241 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_process_events()
242 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_process_events()
244 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_process_events()
246 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events()
247 if (adapter->tqd_start[i].status.stopped) in vmxnet3_process_events()
248 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
250 adapter->netdev->name, i, le32_to_cpu( in vmxnet3_process_events()
251 adapter->tqd_start[i].status.error)); in vmxnet3_process_events()
252 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_process_events()
253 if (adapter->rqd_start[i].status.stopped) in vmxnet3_process_events()
254 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
256 adapter->netdev->name, i, in vmxnet3_process_events()
257 adapter->rqd_start[i].status.error); in vmxnet3_process_events()
259 schedule_work(&adapter->work); in vmxnet3_process_events()
385 struct pci_dev *pdev, struct vmxnet3_adapter *adapter, in vmxnet3_unmap_pkt() argument
428 struct vmxnet3_adapter *adapter) in vmxnet3_tq_tx_complete() argument
445 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
446 adapter, &bq); in vmxnet3_tq_tx_complete()
456 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && in vmxnet3_tq_tx_complete()
459 netif_carrier_ok(adapter->netdev))) { in vmxnet3_tq_tx_complete()
460 vmxnet3_tq_wake(tq, adapter); in vmxnet3_tq_tx_complete()
470 struct vmxnet3_adapter *adapter) in vmxnet3_tq_cleanup() argument
485 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); in vmxnet3_tq_cleanup()
513 struct vmxnet3_adapter *adapter) in vmxnet3_tq_destroy() argument
516 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
522 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
528 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
534 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
546 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_destroy_all() argument
550 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all()
551 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); in vmxnet3_tq_destroy_all()
557 struct vmxnet3_adapter *adapter) in vmxnet3_tq_init() argument
591 struct vmxnet3_adapter *adapter) in vmxnet3_tq_create() argument
596 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
600 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); in vmxnet3_tq_create()
604 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
608 netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); in vmxnet3_tq_create()
613 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
617 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n"); in vmxnet3_tq_create()
624 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
628 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); in vmxnet3_tq_create()
634 dev_to_node(&adapter->pdev->dev)); in vmxnet3_tq_create()
641 vmxnet3_tq_destroy(tq, adapter); in vmxnet3_tq_create()
646 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_cleanup_all() argument
650 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all()
651 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); in vmxnet3_tq_cleanup_all()
662 int num_to_alloc, struct vmxnet3_adapter *adapter) in vmxnet3_rq_alloc_rx_buf() argument
689 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
698 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
701 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
723 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
726 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
755 netdev_dbg(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
784 struct vmxnet3_adapter *adapter) in vmxnet3_map_pkt() argument
811 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
838 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_map_pkt()
841 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
853 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
880 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, in vmxnet3_map_pkt()
883 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
895 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
924 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) in vmxnet3_tq_init_all() argument
928 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all()
929 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); in vmxnet3_tq_init_all()
953 struct vmxnet3_adapter *adapter) in vmxnet3_parse_hdr() argument
958 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_parse_hdr()
975 if (VMXNET3_VERSION_GE_4(adapter) && in vmxnet3_parse_hdr()
1056 struct vmxnet3_adapter *adapter) in vmxnet3_copy_hdr() argument
1065 netdev_dbg(adapter->netdev, in vmxnet3_copy_hdr()
1135 struct vmxnet3_adapter *adapter, struct net_device *netdev) in vmxnet3_tq_xmit() argument
1203 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1232 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1234 " next2fill %u\n", adapter->netdev->name, in vmxnet3_tq_xmit()
1237 vmxnet3_tq_stop(tq, adapter); in vmxnet3_tq_xmit()
1243 vmxnet3_copy_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1246 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1262 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_tq_xmit()
1264 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_tq_xmit()
1282 if (VMXNET3_VERSION_GE_4(adapter) && in vmxnet3_tq_xmit()
1286 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_tq_xmit()
1316 adapter->latencyConf->sampleRate != 0) { in vmxnet3_tq_xmit()
1317 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) { in vmxnet3_tq_xmit()
1339 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1349 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_tq_xmit()
1350 adapter->tx_prod_offset + tq->qid * 8, in vmxnet3_tq_xmit()
1365 vmxnet3_create_pp(struct vmxnet3_adapter *adapter, in vmxnet3_create_pp() argument
1368 bool xdp_prog = vmxnet3_xdp_enabled(adapter); in vmxnet3_create_pp()
1374 .dev = &adapter->pdev->dev, in vmxnet3_create_pp()
1386 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid, in vmxnet3_create_pp()
1425 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_xmit_frame() local
1427 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame()
1429 &adapter->tx_queue[skb->queue_mapping], in vmxnet3_xmit_frame()
1430 adapter, netdev); in vmxnet3_xmit_frame()
1435 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, in vmxnet3_rx_csum() argument
1439 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1483 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) in vmxnet3_rx_error() argument
1509 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, in vmxnet3_get_hdr_len() argument
1565 struct vmxnet3_adapter *adapter, int quota) in vmxnet3_rq_rx_complete() argument
1568 adapter->rx_prod_offset, adapter->rx_prod2_offset in vmxnet3_rq_rx_complete()
1609 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1619 vmxnet3_rx_error(rq, rcd, ctx, adapter); in vmxnet3_rq_rx_complete()
1623 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete()
1627 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) { in vmxnet3_rq_rx_complete()
1635 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd, in vmxnet3_rq_rx_complete()
1663 netdev_dbg(adapter->netdev, in vmxnet3_rq_rx_complete()
1682 VMXNET3_RX_DATA_RING(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1685 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete()
1691 act = vmxnet3_process_xdp_small(adapter, rq, in vmxnet3_rq_rx_complete()
1703 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_rx_complete()
1716 if (rxDataRingUsed && adapter->rxdataring_enabled) { in vmxnet3_rq_rx_complete()
1729 dma_map_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1732 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1746 dma_unmap_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1761 if (VMXNET3_VERSION_GE_2(adapter) && in vmxnet3_rq_rx_complete()
1806 new_dma_addr = dma_map_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1810 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1820 dma_unmap_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1838 u32 mtu = adapter->netdev->mtu; in vmxnet3_rq_rx_complete()
1843 (adapter->netdev->features & NETIF_F_RXHASH)) { in vmxnet3_rq_rx_complete()
1866 vmxnet3_rx_csum(adapter, skb, in vmxnet3_rq_rx_complete()
1868 skb->protocol = eth_type_trans(skb, adapter->netdev); in vmxnet3_rq_rx_complete()
1870 !(adapter->netdev->features & NETIF_F_LRO)) in vmxnet3_rq_rx_complete()
1881 hlen = vmxnet3_get_hdr_len(adapter, skb, in vmxnet3_rq_rx_complete()
1902 if ((adapter->netdev->features & NETIF_F_LRO) && in vmxnet3_rq_rx_complete()
1932 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP))) in vmxnet3_rq_rx_complete()
1963 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_rq_rx_complete()
1981 struct vmxnet3_adapter *adapter) in vmxnet3_rq_cleanup() argument
2008 dma_unmap_single(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2014 dma_unmap_page(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2032 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_cleanup_all() argument
2036 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_cleanup_all()
2037 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all()
2038 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL); in vmxnet3_rq_cleanup_all()
2043 struct vmxnet3_adapter *adapter) in vmxnet3_rq_destroy() argument
2059 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2074 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2081 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2088 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
2100 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) in vmxnet3_rq_destroy_all_rxdataring() argument
2104 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_destroy_all_rxdataring()
2105 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_rq_destroy_all_rxdataring()
2108 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy_all_rxdataring()
2121 struct vmxnet3_adapter *adapter) in vmxnet3_rq_init() argument
2129 if (i % adapter->rx_buf_per_pkt == 0) { in vmxnet3_rq_init()
2130 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ? in vmxnet3_rq_init()
2133 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
2154 err = vmxnet3_create_pp(adapter, rq, in vmxnet3_rq_init()
2160 adapter) == 0) { in vmxnet3_rq_init()
2168 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
2189 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_init_all() argument
2193 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_init_all()
2194 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all()
2196 dev_err(&adapter->netdev->dev, "%s: failed to " in vmxnet3_rq_init_all()
2198 adapter->netdev->name, i); in vmxnet3_rq_init_all()
2208 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) in vmxnet3_rq_create() argument
2218 &adapter->pdev->dev, sz, in vmxnet3_rq_create()
2222 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2228 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { in vmxnet3_rq_create()
2231 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2235 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2237 adapter->rxdataring_enabled = false; in vmxnet3_rq_create()
2247 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2251 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2260 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2264 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); in vmxnet3_rq_create()
2270 dev_to_node(&adapter->pdev->dev)); in vmxnet3_rq_create()
2280 vmxnet3_rq_destroy(rq, adapter); in vmxnet3_rq_create()
2286 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_create_all() argument
2290 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_rq_create_all()
2292 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_create_all()
2293 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all()
2295 dev_err(&adapter->netdev->dev, in vmxnet3_rq_create_all()
2297 adapter->netdev->name, i); in vmxnet3_rq_create_all()
2302 if (!adapter->rxdataring_enabled) in vmxnet3_rq_create_all()
2303 vmxnet3_rq_destroy_all_rxdataring(adapter); in vmxnet3_rq_create_all()
2307 vmxnet3_rq_destroy_all(adapter); in vmxnet3_rq_create_all()
2315 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) in vmxnet3_do_poll() argument
2318 if (unlikely(adapter->shared->ecr)) in vmxnet3_do_poll()
2319 vmxnet3_process_events(adapter); in vmxnet3_do_poll()
2320 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll()
2321 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); in vmxnet3_do_poll()
2323 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_do_poll()
2324 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll()
2325 adapter, budget); in vmxnet3_do_poll()
2337 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll()
2341 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll()
2356 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only() local
2362 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_poll_rx_only()
2364 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
2365 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_poll_rx_only()
2368 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); in vmxnet3_poll_rx_only()
2372 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
2389 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx() local
2391 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_tx()
2392 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2395 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_msix_tx()
2397 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx()
2398 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx()
2399 vmxnet3_tq_tx_complete(txq, adapter); in vmxnet3_msix_tx()
2402 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_msix_tx()
2404 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2419 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx() local
2422 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_rx()
2423 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
2446 struct vmxnet3_adapter *adapter = netdev_priv(dev); in vmxnet3_msix_event() local
2449 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_event()
2450 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2452 if (adapter->shared->ecr) in vmxnet3_msix_event()
2453 vmxnet3_process_events(adapter); in vmxnet3_msix_event()
2455 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2468 struct vmxnet3_adapter *adapter = netdev_priv(dev); in vmxnet3_intr() local
2470 if (adapter->intr.type == VMXNET3_IT_INTX) { in vmxnet3_intr()
2471 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); in vmxnet3_intr()
2479 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_intr()
2480 vmxnet3_disable_all_intrs(adapter); in vmxnet3_intr()
2482 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr()
2493 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_netpoll() local
2495 switch (adapter->intr.type) { in vmxnet3_netpoll()
2499 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_netpoll()
2500 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); in vmxnet3_netpoll()
2506 vmxnet3_intr(0, adapter->netdev); in vmxnet3_netpoll()
2514 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) in vmxnet3_request_irqs() argument
2516 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_request_irqs()
2521 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2522 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs()
2523 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_request_irqs()
2524 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", in vmxnet3_request_irqs()
2525 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2529 adapter->tx_queue[i].name, in vmxnet3_request_irqs()
2530 &adapter->tx_queue[i]); in vmxnet3_request_irqs()
2532 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2533 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2536 dev_err(&adapter->netdev->dev, in vmxnet3_request_irqs()
2539 adapter->tx_queue[i].name, err); in vmxnet3_request_irqs()
2545 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_request_irqs()
2546 for (; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2547 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2552 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2556 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2559 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2560 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2561 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", in vmxnet3_request_irqs()
2562 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2564 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2565 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2568 adapter->rx_queue[i].name, in vmxnet3_request_irqs()
2569 &(adapter->rx_queue[i])); in vmxnet3_request_irqs()
2571 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2574 adapter->rx_queue[i].name, err); in vmxnet3_request_irqs()
2578 adapter->rx_queue[i].comp_ring.intr_idx = vector++; in vmxnet3_request_irqs()
2582 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2585 intr->event_msi_vector_name, adapter->netdev); in vmxnet3_request_irqs()
2589 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2590 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, in vmxnet3_request_irqs()
2591 adapter->netdev->name, adapter->netdev); in vmxnet3_request_irqs()
2594 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2595 err = request_irq(adapter->pdev->irq, vmxnet3_intr, in vmxnet3_request_irqs()
2596 IRQF_SHARED, adapter->netdev->name, in vmxnet3_request_irqs()
2597 adapter->netdev); in vmxnet3_request_irqs()
2603 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2608 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2609 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs()
2611 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2612 rq->dataRingQid = i + 2 * adapter->num_rx_queues; in vmxnet3_request_irqs()
2618 if (adapter->intr.type != VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2619 adapter->intr.event_intr_idx = 0; in vmxnet3_request_irqs()
2620 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2621 adapter->tx_queue[i].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2622 adapter->rx_queue[0].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2625 netdev_info(adapter->netdev, in vmxnet3_request_irqs()
2635 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) in vmxnet3_free_irqs() argument
2637 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_free_irqs()
2646 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_free_irqs()
2647 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_free_irqs()
2649 &(adapter->tx_queue[i])); in vmxnet3_free_irqs()
2650 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) in vmxnet3_free_irqs()
2655 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_free_irqs()
2657 &(adapter->rx_queue[i])); in vmxnet3_free_irqs()
2661 adapter->netdev); in vmxnet3_free_irqs()
2667 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2670 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2679 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) in vmxnet3_restore_vlan() argument
2681 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_restore_vlan()
2687 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in vmxnet3_restore_vlan()
2695 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_vlan_rx_add_vid() local
2698 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_add_vid()
2702 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2703 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_vlan_rx_add_vid()
2705 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2708 set_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_add_vid()
2717 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_vlan_rx_kill_vid() local
2720 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_kill_vid()
2724 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2725 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_vlan_rx_kill_vid()
2727 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2730 clear_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_kill_vid()
2762 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_set_mc() local
2765 &adapter->shared->devRead.rxFilterConf; in vmxnet3_set_mc()
2772 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_set_mc()
2777 vmxnet3_restore_vlan(adapter); in vmxnet3_set_mc()
2793 &adapter->pdev->dev, in vmxnet3_set_mc()
2797 if (!dma_mapping_error(&adapter->pdev->dev, in vmxnet3_set_mc()
2817 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2820 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2822 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2826 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_set_mc()
2828 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2831 dma_unmap_single(&adapter->pdev->dev, new_table_pa, in vmxnet3_set_mc()
2837 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) in vmxnet3_rq_destroy_all() argument
2841 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_destroy_all()
2842 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); in vmxnet3_rq_destroy_all()
2851 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) in vmxnet3_setup_driver_shared() argument
2853 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_setup_driver_shared()
2876 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); in vmxnet3_setup_driver_shared()
2880 if (adapter->netdev->features & NETIF_F_RXCSUM) in vmxnet3_setup_driver_shared()
2883 if (adapter->netdev->features & NETIF_F_LRO) { in vmxnet3_setup_driver_shared()
2887 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in vmxnet3_setup_driver_shared()
2890 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_setup_driver_shared()
2894 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); in vmxnet3_setup_driver_shared()
2895 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); in vmxnet3_setup_driver_shared()
2897 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + in vmxnet3_setup_driver_shared()
2898 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); in vmxnet3_setup_driver_shared()
2901 devRead->misc.numTxQueues = adapter->num_tx_queues; in vmxnet3_setup_driver_shared()
2902 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_setup_driver_shared()
2903 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared()
2904 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); in vmxnet3_setup_driver_shared()
2905 tqc = &adapter->tqd_start[i].conf; in vmxnet3_setup_driver_shared()
2916 if (VMXNET3_VERSION_GE_9(adapter)) { in vmxnet3_setup_driver_shared()
2917 tqtsc = &adapter->tqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2924 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2925 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_setup_driver_shared()
2926 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared()
2927 rqc = &adapter->rqd_start[i].conf; in vmxnet3_setup_driver_shared()
2937 if (VMXNET3_VERSION_GE_3(adapter)) { in vmxnet3_setup_driver_shared()
2943 if (VMXNET3_VERSION_GE_9(adapter)) { in vmxnet3_setup_driver_shared()
2944 rqtsc = &adapter->rqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2951 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); in vmxnet3_setup_driver_shared()
2953 if (adapter->rss) { in vmxnet3_setup_driver_shared()
2954 struct UPT1_RSSConf *rssConf = adapter->rss_conf; in vmxnet3_setup_driver_shared()
2957 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2969 i, adapter->num_rx_queues); in vmxnet3_setup_driver_shared()
2974 cpu_to_le64(adapter->rss_conf_pa); in vmxnet3_setup_driver_shared()
2980 if (!VMXNET3_VERSION_GE_6(adapter) || in vmxnet3_setup_driver_shared()
2981 !adapter->queuesExtEnabled) { in vmxnet3_setup_driver_shared()
2982 devRead->intrConf.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2984 devRead->intrConf.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2985 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2986 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2988 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
2991 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
2993 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
2994 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
2995 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
2997 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
3003 vmxnet3_restore_vlan(adapter); in vmxnet3_setup_driver_shared()
3004 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); in vmxnet3_setup_driver_shared()
3010 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter) in vmxnet3_init_bufsize() argument
3012 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_bufsize()
3016 if (!VMXNET3_VERSION_GE_7(adapter)) in vmxnet3_init_bufsize()
3019 cmdInfo->ringBufSize = adapter->ringBufSize; in vmxnet3_init_bufsize()
3020 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3021 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_bufsize()
3023 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3027 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter) in vmxnet3_init_coalesce() argument
3029 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_coalesce()
3033 if (!VMXNET3_VERSION_GE_3(adapter)) in vmxnet3_init_coalesce()
3036 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3039 cpu_to_le32(sizeof(*adapter->coal_conf)); in vmxnet3_init_coalesce()
3040 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); in vmxnet3_init_coalesce()
3042 if (adapter->default_coal_mode) { in vmxnet3_init_coalesce()
3043 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_coalesce()
3046 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_coalesce()
3050 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3054 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter) in vmxnet3_init_rssfields() argument
3056 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_rssfields()
3060 if (!VMXNET3_VERSION_GE_4(adapter)) in vmxnet3_init_rssfields()
3063 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3065 if (adapter->default_rss_fields) { in vmxnet3_init_rssfields()
3066 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_rssfields()
3068 adapter->rss_fields = in vmxnet3_init_rssfields()
3069 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
3071 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_init_rssfields()
3072 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || in vmxnet3_init_rssfields()
3073 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && in vmxnet3_init_rssfields()
3074 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3076 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; in vmxnet3_init_rssfields()
3078 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); in vmxnet3_init_rssfields()
3081 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && in vmxnet3_init_rssfields()
3082 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3084 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; in vmxnet3_init_rssfields()
3086 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); in vmxnet3_init_rssfields()
3089 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && in vmxnet3_init_rssfields()
3090 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3092 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; in vmxnet3_init_rssfields()
3094 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); in vmxnet3_init_rssfields()
3097 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_init_rssfields()
3098 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); in vmxnet3_init_rssfields()
3099 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
3101 cmdInfo->setRssFields = adapter->rss_fields; in vmxnet3_init_rssfields()
3102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_rssfields()
3107 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_init_rssfields()
3109 adapter->rss_fields = in vmxnet3_init_rssfields()
3110 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
3113 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3117 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) in vmxnet3_activate_dev() argument
3123 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," in vmxnet3_activate_dev()
3124 " ring sizes %u %u %u\n", adapter->netdev->name, in vmxnet3_activate_dev()
3125 adapter->skb_buf_size, adapter->rx_buf_per_pkt, in vmxnet3_activate_dev()
3126 adapter->tx_queue[0].tx_ring.size, in vmxnet3_activate_dev()
3127 adapter->rx_queue[0].rx_ring[0].size, in vmxnet3_activate_dev()
3128 adapter->rx_queue[0].rx_ring[1].size); in vmxnet3_activate_dev()
3130 vmxnet3_tq_init_all(adapter); in vmxnet3_activate_dev()
3131 err = vmxnet3_rq_init_all(adapter); in vmxnet3_activate_dev()
3133 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3138 err = vmxnet3_request_irqs(adapter); in vmxnet3_activate_dev()
3140 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3145 vmxnet3_setup_driver_shared(adapter); in vmxnet3_activate_dev()
3147 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( in vmxnet3_activate_dev()
3148 adapter->shared_pa)); in vmxnet3_activate_dev()
3149 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( in vmxnet3_activate_dev()
3150 adapter->shared_pa)); in vmxnet3_activate_dev()
3151 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3152 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_activate_dev()
3154 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_activate_dev()
3155 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3158 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3164 vmxnet3_init_bufsize(adapter); in vmxnet3_activate_dev()
3165 vmxnet3_init_coalesce(adapter); in vmxnet3_activate_dev()
3166 vmxnet3_init_rssfields(adapter); in vmxnet3_activate_dev()
3168 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_activate_dev()
3169 VMXNET3_WRITE_BAR0_REG(adapter, in vmxnet3_activate_dev()
3170 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN, in vmxnet3_activate_dev()
3171 adapter->rx_queue[i].rx_ring[0].next2fill); in vmxnet3_activate_dev()
3172 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset + in vmxnet3_activate_dev()
3174 adapter->rx_queue[i].rx_ring[1].next2fill); in vmxnet3_activate_dev()
3178 vmxnet3_set_mc(adapter->netdev); in vmxnet3_activate_dev()
3184 vmxnet3_check_link(adapter, true); in vmxnet3_activate_dev()
3185 netif_tx_wake_all_queues(adapter->netdev); in vmxnet3_activate_dev()
3186 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_activate_dev()
3187 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_activate_dev()
3188 vmxnet3_enable_all_intrs(adapter); in vmxnet3_activate_dev()
3189 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_activate_dev()
3193 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); in vmxnet3_activate_dev()
3194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); in vmxnet3_activate_dev()
3195 vmxnet3_free_irqs(adapter); in vmxnet3_activate_dev()
3199 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_activate_dev()
3205 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) in vmxnet3_reset_dev() argument
3208 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); in vmxnet3_reset_dev()
3210 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3215 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) in vmxnet3_quiesce_dev() argument
3219 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) in vmxnet3_quiesce_dev()
3223 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3224 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_quiesce_dev()
3226 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3227 vmxnet3_disable_all_intrs(adapter); in vmxnet3_quiesce_dev()
3229 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_quiesce_dev()
3230 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_quiesce_dev()
3231 netif_tx_disable(adapter->netdev); in vmxnet3_quiesce_dev()
3232 adapter->link_speed = 0; in vmxnet3_quiesce_dev()
3233 netif_carrier_off(adapter->netdev); in vmxnet3_quiesce_dev()
3235 vmxnet3_tq_cleanup_all(adapter); in vmxnet3_quiesce_dev()
3236 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_quiesce_dev()
3237 vmxnet3_free_irqs(adapter); in vmxnet3_quiesce_dev()
3243 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac) in vmxnet3_write_mac_addr() argument
3248 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); in vmxnet3_write_mac_addr()
3251 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); in vmxnet3_write_mac_addr()
3259 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_set_mac_addr() local
3262 vmxnet3_write_mac_addr(adapter, addr->sa_data); in vmxnet3_set_mac_addr()
3271 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter) in vmxnet3_alloc_pci_resources() argument
3275 struct pci_dev *pdev = adapter->pdev; in vmxnet3_alloc_pci_resources()
3295 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3296 if (!adapter->hw_addr0) { in vmxnet3_alloc_pci_resources()
3304 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3305 if (!adapter->hw_addr1) { in vmxnet3_alloc_pci_resources()
3313 iounmap(adapter->hw_addr0); in vmxnet3_alloc_pci_resources()
3323 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) in vmxnet3_free_pci_resources() argument
3325 BUG_ON(!adapter->pdev); in vmxnet3_free_pci_resources()
3327 iounmap(adapter->hw_addr0); in vmxnet3_free_pci_resources()
3328 iounmap(adapter->hw_addr1); in vmxnet3_free_pci_resources()
3329 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); in vmxnet3_free_pci_resources()
3330 pci_disable_device(adapter->pdev); in vmxnet3_free_pci_resources()
3335 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) in vmxnet3_adjust_rx_ring_size() argument
3339 if (!VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_adjust_rx_ring_size()
3340 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - in vmxnet3_adjust_rx_ring_size()
3342 adapter->skb_buf_size = adapter->netdev->mtu + in vmxnet3_adjust_rx_ring_size()
3344 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) in vmxnet3_adjust_rx_ring_size()
3345 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3347 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3349 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3350 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + in vmxnet3_adjust_rx_ring_size()
3352 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; in vmxnet3_adjust_rx_ring_size()
3355 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE, in vmxnet3_adjust_rx_ring_size()
3357 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3358 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size); in vmxnet3_adjust_rx_ring_size()
3359 adapter->ringBufSize.ring1BufSizeType1 = 0; in vmxnet3_adjust_rx_ring_size()
3360 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE); in vmxnet3_adjust_rx_ring_size()
3367 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; in vmxnet3_adjust_rx_ring_size()
3368 ring0_size = adapter->rx_queue[0].rx_ring[0].size; in vmxnet3_adjust_rx_ring_size()
3372 ring1_size = adapter->rx_queue[0].rx_ring[1].size; in vmxnet3_adjust_rx_ring_size()
3377 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_adjust_rx_ring_size()
3383 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_adjust_rx_ring_size()
3384 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
3394 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, in vmxnet3_create_queues() argument
3400 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_create_queues()
3401 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues()
3406 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
3408 tq->adapter = adapter; in vmxnet3_create_queues()
3410 tq->tx_ts_desc_size = adapter->tx_ts_desc_size; in vmxnet3_create_queues()
3412 err = vmxnet3_tq_create(tq, adapter); in vmxnet3_create_queues()
3421 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; in vmxnet3_create_queues()
3422 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; in vmxnet3_create_queues()
3423 vmxnet3_adjust_rx_ring_size(adapter); in vmxnet3_create_queues()
3425 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_create_queues()
3426 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_create_queues()
3427 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues()
3430 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
3431 rq->adapter = adapter; in vmxnet3_create_queues()
3433 rq->rx_ts_desc_size = adapter->rx_ts_desc_size; in vmxnet3_create_queues()
3434 err = vmxnet3_rq_create(rq, adapter); in vmxnet3_create_queues()
3437 netdev_err(adapter->netdev, in vmxnet3_create_queues()
3442 netdev_info(adapter->netdev, in vmxnet3_create_queues()
3445 adapter->num_rx_queues = i; in vmxnet3_create_queues()
3452 if (!adapter->rxdataring_enabled) in vmxnet3_create_queues()
3453 vmxnet3_rq_destroy_all_rxdataring(adapter); in vmxnet3_create_queues()
3457 vmxnet3_tq_destroy_all(adapter); in vmxnet3_create_queues()
3464 struct vmxnet3_adapter *adapter; in vmxnet3_open() local
3467 adapter = netdev_priv(netdev); in vmxnet3_open()
3469 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_open()
3470 spin_lock_init(&adapter->tx_queue[i].tx_lock); in vmxnet3_open()
3472 if (VMXNET3_VERSION_GE_3(adapter)) { in vmxnet3_open()
3477 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3478 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_open()
3480 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_open()
3481 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3487 adapter->txdata_desc_size = in vmxnet3_open()
3490 adapter->txdata_desc_size = txdata_desc_size; in vmxnet3_open()
3492 if (VMXNET3_VERSION_GE_9(adapter)) in vmxnet3_open()
3493 adapter->rxdata_desc_size = (ret >> 16) & 0xffff; in vmxnet3_open()
3495 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); in vmxnet3_open()
3498 if (VMXNET3_VERSION_GE_9(adapter)) { in vmxnet3_open()
3504 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3505 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_open()
3507 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_open()
3508 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3519 adapter->tx_ts_desc_size = tx_ts_desc_size; in vmxnet3_open()
3520 adapter->rx_ts_desc_size = rx_ts_desc_size; in vmxnet3_open()
3522 adapter->tx_ts_desc_size = 0; in vmxnet3_open()
3523 adapter->rx_ts_desc_size = 0; in vmxnet3_open()
3526 err = vmxnet3_create_queues(adapter, in vmxnet3_open()
3527 adapter->tx_ring_size, in vmxnet3_open()
3528 adapter->rx_ring_size, in vmxnet3_open()
3529 adapter->rx_ring2_size, in vmxnet3_open()
3530 adapter->txdata_desc_size, in vmxnet3_open()
3531 adapter->rxdata_desc_size); in vmxnet3_open()
3535 err = vmxnet3_activate_dev(adapter); in vmxnet3_open()
3542 vmxnet3_rq_destroy_all(adapter); in vmxnet3_open()
3543 vmxnet3_tq_destroy_all(adapter); in vmxnet3_open()
3552 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_close() local
3558 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_close()
3561 vmxnet3_quiesce_dev(adapter); in vmxnet3_close()
3563 vmxnet3_rq_destroy_all(adapter); in vmxnet3_close()
3564 vmxnet3_tq_destroy_all(adapter); in vmxnet3_close()
3566 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_close()
3574 vmxnet3_force_close(struct vmxnet3_adapter *adapter) in vmxnet3_force_close() argument
3582 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); in vmxnet3_force_close()
3585 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_force_close()
3586 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_force_close()
3591 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_force_close()
3592 dev_close(adapter->netdev); in vmxnet3_force_close()
3599 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_change_mtu() local
3608 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_change_mtu()
3612 vmxnet3_quiesce_dev(adapter); in vmxnet3_change_mtu()
3613 vmxnet3_reset_dev(adapter); in vmxnet3_change_mtu()
3616 vmxnet3_rq_destroy_all(adapter); in vmxnet3_change_mtu()
3617 vmxnet3_adjust_rx_ring_size(adapter); in vmxnet3_change_mtu()
3618 err = vmxnet3_rq_create_all(adapter); in vmxnet3_change_mtu()
3626 err = vmxnet3_activate_dev(adapter); in vmxnet3_change_mtu()
3636 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_change_mtu()
3638 vmxnet3_force_close(adapter); in vmxnet3_change_mtu()
3645 vmxnet3_declare_features(struct vmxnet3_adapter *adapter) in vmxnet3_declare_features() argument
3647 struct net_device *netdev = adapter->netdev; in vmxnet3_declare_features()
3650 if (VMXNET3_VERSION_GE_9(adapter)) { in vmxnet3_declare_features()
3651 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3652 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_declare_features()
3654 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3655 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3663 if (VMXNET3_VERSION_GE_4(adapter)) { in vmxnet3_declare_features()
3674 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) { in vmxnet3_declare_features()
3679 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) { in vmxnet3_declare_features()
3684 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_declare_features()
3687 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3689 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3691 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3693 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3695 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3697 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; in vmxnet3_declare_features()
3699 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3701 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; in vmxnet3_declare_features()
3703 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3705 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3707 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3709 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3712 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_declare_features()
3713 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3714 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); in vmxnet3_declare_features()
3715 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3716 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3718 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3719 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3720 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && in vmxnet3_declare_features()
3721 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { in vmxnet3_declare_features()
3725 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3726 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { in vmxnet3_declare_features()
3740 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) in vmxnet3_read_mac_addr() argument
3744 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); in vmxnet3_read_mac_addr()
3747 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); in vmxnet3_read_mac_addr()
3764 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) in vmxnet3_acquire_msix_vectors() argument
3766 int ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3767 adapter->intr.msix_entries, nvec, nvec); in vmxnet3_acquire_msix_vectors()
3770 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3774 ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3775 adapter->intr.msix_entries, in vmxnet3_acquire_msix_vectors()
3781 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3792 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) in vmxnet3_alloc_intr_resources() argument
3798 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3799 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_alloc_intr_resources()
3801 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_alloc_intr_resources()
3802 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3803 adapter->intr.type = cfg & 0x3; in vmxnet3_alloc_intr_resources()
3804 adapter->intr.mask_mode = (cfg >> 2) & 0x3; in vmxnet3_alloc_intr_resources()
3806 if (adapter->intr.type == VMXNET3_IT_AUTO) { in vmxnet3_alloc_intr_resources()
3807 adapter->intr.type = VMXNET3_IT_MSIX; in vmxnet3_alloc_intr_resources()
3811 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_alloc_intr_resources()
3814 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? in vmxnet3_alloc_intr_resources()
3815 1 : adapter->num_tx_queues; in vmxnet3_alloc_intr_resources()
3816 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? in vmxnet3_alloc_intr_resources()
3817 0 : adapter->num_rx_queues; in vmxnet3_alloc_intr_resources()
3823 adapter->intr.msix_entries[i].entry = i; in vmxnet3_alloc_intr_resources()
3825 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec); in vmxnet3_alloc_intr_resources()
3834 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE in vmxnet3_alloc_intr_resources()
3835 || adapter->num_rx_queues != 1) { in vmxnet3_alloc_intr_resources()
3836 adapter->share_intr = VMXNET3_INTR_TXSHARE; in vmxnet3_alloc_intr_resources()
3837 netdev_err(adapter->netdev, in vmxnet3_alloc_intr_resources()
3839 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3843 adapter->intr.num_intrs = nvec_allocated; in vmxnet3_alloc_intr_resources()
3848 dev_info(&adapter->pdev->dev, in vmxnet3_alloc_intr_resources()
3852 adapter->intr.type = VMXNET3_IT_MSI; in vmxnet3_alloc_intr_resources()
3855 if (adapter->intr.type == VMXNET3_IT_MSI) { in vmxnet3_alloc_intr_resources()
3856 if (!pci_enable_msi(adapter->pdev)) { in vmxnet3_alloc_intr_resources()
3857 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3858 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3864 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3865 dev_info(&adapter->netdev->dev, in vmxnet3_alloc_intr_resources()
3867 adapter->intr.type = VMXNET3_IT_INTX; in vmxnet3_alloc_intr_resources()
3870 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3875 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) in vmxnet3_free_intr_resources() argument
3877 if (adapter->intr.type == VMXNET3_IT_MSIX) in vmxnet3_free_intr_resources()
3878 pci_disable_msix(adapter->pdev); in vmxnet3_free_intr_resources()
3879 else if (adapter->intr.type == VMXNET3_IT_MSI) in vmxnet3_free_intr_resources()
3880 pci_disable_msi(adapter->pdev); in vmxnet3_free_intr_resources()
3882 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); in vmxnet3_free_intr_resources()
3889 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_tx_timeout() local
3890 adapter->tx_timeout_count++; in vmxnet3_tx_timeout()
3892 netdev_err(adapter->netdev, "tx hang\n"); in vmxnet3_tx_timeout()
3893 schedule_work(&adapter->work); in vmxnet3_tx_timeout()
3900 struct vmxnet3_adapter *adapter; in vmxnet3_reset_work() local
3902 adapter = container_of(data, struct vmxnet3_adapter, work); in vmxnet3_reset_work()
3905 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_reset_work()
3910 if (netif_running(adapter->netdev)) { in vmxnet3_reset_work()
3911 netdev_notice(adapter->netdev, "resetting\n"); in vmxnet3_reset_work()
3912 vmxnet3_quiesce_dev(adapter); in vmxnet3_reset_work()
3913 vmxnet3_reset_dev(adapter); in vmxnet3_reset_work()
3914 vmxnet3_activate_dev(adapter); in vmxnet3_reset_work()
3916 netdev_info(adapter->netdev, "already closed\n"); in vmxnet3_reset_work()
3920 netif_wake_queue(adapter->netdev); in vmxnet3_reset_work()
3921 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_reset_work()
3952 struct vmxnet3_adapter *adapter; in vmxnet3_probe_device() local
3983 adapter = netdev_priv(netdev); in vmxnet3_probe_device()
3984 adapter->netdev = netdev; in vmxnet3_probe_device()
3985 adapter->pdev = pdev; in vmxnet3_probe_device()
3987 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; in vmxnet3_probe_device()
3988 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; in vmxnet3_probe_device()
3989 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; in vmxnet3_probe_device()
3997 spin_lock_init(&adapter->cmd_lock); in vmxnet3_probe_device()
3998 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, in vmxnet3_probe_device()
4001 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { in vmxnet3_probe_device()
4006 adapter->shared = dma_alloc_coherent( in vmxnet3_probe_device()
4007 &adapter->pdev->dev, in vmxnet3_probe_device()
4009 &adapter->shared_pa, GFP_KERNEL); in vmxnet3_probe_device()
4010 if (!adapter->shared) { in vmxnet3_probe_device()
4016 err = vmxnet3_alloc_pci_resources(adapter); in vmxnet3_probe_device()
4020 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); in vmxnet3_probe_device()
4023 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i); in vmxnet3_probe_device()
4024 adapter->version = i + 1; in vmxnet3_probe_device()
4034 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); in vmxnet3_probe_device()
4036 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); in vmxnet3_probe_device()
4038 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); in vmxnet3_probe_device()
4046 if (VMXNET3_VERSION_GE_7(adapter)) { in vmxnet3_probe_device()
4047 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR); in vmxnet3_probe_device()
4048 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR); in vmxnet3_probe_device()
4049 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4050 adapter->dev_caps[0] = adapter->devcap_supported[0] & in vmxnet3_probe_device()
4053 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) && in vmxnet3_probe_device()
4054 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) && in vmxnet3_probe_device()
4055 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) { in vmxnet3_probe_device()
4056 adapter->dev_caps[0] |= adapter->devcap_supported[0] & in vmxnet3_probe_device()
4059 if (adapter->dev_caps[0]) in vmxnet3_probe_device()
4060 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_probe_device()
4062 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4063 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); in vmxnet3_probe_device()
4064 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_probe_device()
4065 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4068 if (VMXNET3_VERSION_GE_7(adapter) && in vmxnet3_probe_device()
4069 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4070 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD; in vmxnet3_probe_device()
4071 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD; in vmxnet3_probe_device()
4072 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2; in vmxnet3_probe_device()
4074 adapter->tx_prod_offset = VMXNET3_REG_TXPROD; in vmxnet3_probe_device()
4075 adapter->rx_prod_offset = VMXNET3_REG_RXPROD; in vmxnet3_probe_device()
4076 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2; in vmxnet3_probe_device()
4079 if (VMXNET3_VERSION_GE_6(adapter)) { in vmxnet3_probe_device()
4080 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4081 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_probe_device()
4083 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_probe_device()
4084 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4086 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff)); in vmxnet3_probe_device()
4087 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff)); in vmxnet3_probe_device()
4089 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4091 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4094 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES || in vmxnet3_probe_device()
4095 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) { in vmxnet3_probe_device()
4096 adapter->queuesExtEnabled = true; in vmxnet3_probe_device()
4098 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4101 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4104 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4106 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4111 adapter->num_tx_queues, adapter->num_rx_queues); in vmxnet3_probe_device()
4113 adapter->rx_buf_per_pkt = 1; in vmxnet3_probe_device()
4115 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_probe_device()
4116 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; in vmxnet3_probe_device()
4117 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, in vmxnet3_probe_device()
4118 &adapter->queue_desc_pa, in vmxnet3_probe_device()
4121 if (!adapter->tqd_start) { in vmxnet3_probe_device()
4126 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + in vmxnet3_probe_device()
4127 adapter->num_tx_queues); in vmxnet3_probe_device()
4128 if (VMXNET3_VERSION_GE_9(adapter)) in vmxnet3_probe_device()
4129 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf; in vmxnet3_probe_device()
4131 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4133 &adapter->pm_conf_pa, in vmxnet3_probe_device()
4135 if (adapter->pm_conf == NULL) { in vmxnet3_probe_device()
4142 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4144 &adapter->rss_conf_pa, in vmxnet3_probe_device()
4146 if (adapter->rss_conf == NULL) { in vmxnet3_probe_device()
4152 if (VMXNET3_VERSION_GE_3(adapter)) { in vmxnet3_probe_device()
4153 adapter->coal_conf = in vmxnet3_probe_device()
4154 dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4157 &adapter->coal_conf_pa, in vmxnet3_probe_device()
4159 if (!adapter->coal_conf) { in vmxnet3_probe_device()
4163 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; in vmxnet3_probe_device()
4164 adapter->default_coal_mode = true; in vmxnet3_probe_device()
4167 if (VMXNET3_VERSION_GE_4(adapter)) { in vmxnet3_probe_device()
4168 adapter->default_rss_fields = true; in vmxnet3_probe_device()
4169 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; in vmxnet3_probe_device()
4173 vmxnet3_declare_features(adapter); in vmxnet3_probe_device()
4177 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? in vmxnet3_probe_device()
4180 if (adapter->num_tx_queues == adapter->num_rx_queues) in vmxnet3_probe_device()
4181 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; in vmxnet3_probe_device()
4183 adapter->share_intr = VMXNET3_INTR_DONTSHARE; in vmxnet3_probe_device()
4185 vmxnet3_alloc_intr_resources(adapter); in vmxnet3_probe_device()
4188 if (adapter->num_rx_queues > 1 && in vmxnet3_probe_device()
4189 adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4190 adapter->rss = true; in vmxnet3_probe_device()
4195 adapter->rss = false; in vmxnet3_probe_device()
4199 vmxnet3_read_mac_addr(adapter, mac); in vmxnet3_probe_device()
4208 if (VMXNET3_VERSION_GE_6(adapter)) in vmxnet3_probe_device()
4213 INIT_WORK(&adapter->work, vmxnet3_reset_work); in vmxnet3_probe_device()
4214 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_probe_device()
4216 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4218 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_probe_device()
4219 netif_napi_add(adapter->netdev, in vmxnet3_probe_device()
4220 &adapter->rx_queue[i].napi, in vmxnet3_probe_device()
4224 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, in vmxnet3_probe_device()
4228 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); in vmxnet3_probe_device()
4229 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); in vmxnet3_probe_device()
4239 vmxnet3_check_link(adapter, false); in vmxnet3_probe_device()
4243 if (VMXNET3_VERSION_GE_3(adapter)) { in vmxnet3_probe_device()
4244 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4246 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_probe_device()
4248 vmxnet3_free_intr_resources(adapter); in vmxnet3_probe_device()
4251 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_probe_device()
4252 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_probe_device()
4255 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_probe_device()
4256 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_probe_device()
4258 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_probe_device()
4259 adapter->queue_desc_pa); in vmxnet3_probe_device()
4261 vmxnet3_free_pci_resources(adapter); in vmxnet3_probe_device()
4263 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4265 adapter->shared, adapter->shared_pa); in vmxnet3_probe_device()
4267 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_probe_device()
4279 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_remove_device() local
4291 if (!VMXNET3_VERSION_GE_6(adapter)) { in vmxnet3_remove_device()
4294 if (VMXNET3_VERSION_GE_6(adapter)) { in vmxnet3_remove_device()
4295 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_remove_device()
4298 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_remove_device()
4299 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4310 cancel_work_sync(&adapter->work); in vmxnet3_remove_device()
4314 vmxnet3_free_intr_resources(adapter); in vmxnet3_remove_device()
4315 vmxnet3_free_pci_resources(adapter); in vmxnet3_remove_device()
4316 if (VMXNET3_VERSION_GE_3(adapter)) { in vmxnet3_remove_device()
4317 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4319 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_remove_device()
4322 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_remove_device()
4323 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_remove_device()
4325 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_remove_device()
4326 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_remove_device()
4328 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_remove_device()
4330 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_remove_device()
4331 adapter->queue_desc_pa); in vmxnet3_remove_device()
4332 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4334 adapter->shared, adapter->shared_pa); in vmxnet3_remove_device()
4335 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_remove_device()
4343 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_shutdown_device() local
4349 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_shutdown_device()
4353 &adapter->state)) { in vmxnet3_shutdown_device()
4354 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4357 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4358 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_shutdown_device()
4360 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4361 vmxnet3_disable_all_intrs(adapter); in vmxnet3_shutdown_device()
4363 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4374 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_suspend() local
4387 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_suspend()
4388 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_suspend()
4390 vmxnet3_disable_all_intrs(adapter); in vmxnet3_suspend()
4391 vmxnet3_free_irqs(adapter); in vmxnet3_suspend()
4392 vmxnet3_free_intr_resources(adapter); in vmxnet3_suspend()
4397 pmConf = adapter->pm_conf; in vmxnet3_suspend()
4400 if (adapter->wol & WAKE_UCAST) { in vmxnet3_suspend()
4410 if (adapter->wol & WAKE_ARP) { in vmxnet3_suspend()
4460 if (adapter->wol & WAKE_MAGIC) in vmxnet3_suspend()
4465 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); in vmxnet3_suspend()
4466 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( in vmxnet3_suspend()
4468 adapter->shared->devRead.pmConfDesc.confPA = in vmxnet3_suspend()
4469 cpu_to_le64(adapter->pm_conf_pa); in vmxnet3_suspend()
4471 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4472 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_suspend()
4474 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4478 adapter->wol); in vmxnet3_suspend()
4493 struct vmxnet3_adapter *adapter = netdev_priv(netdev); in vmxnet3_resume() local
4506 vmxnet3_alloc_intr_resources(adapter); in vmxnet3_resume()
4515 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_resume()
4516 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, in vmxnet3_resume()
4518 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_resume()
4519 vmxnet3_tq_cleanup_all(adapter); in vmxnet3_resume()
4520 vmxnet3_rq_cleanup_all(adapter); in vmxnet3_resume()
4522 vmxnet3_reset_dev(adapter); in vmxnet3_resume()
4523 err = vmxnet3_activate_dev(adapter); in vmxnet3_resume()
4527 vmxnet3_force_close(adapter); in vmxnet3_resume()