Lines Matching +full:free +full:- +full:flowing
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
56 /* ixgbevf_pci_tbl - PCI Device ID Table
83 static int debug = -1;
91 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_service_event_schedule()
92 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && in ixgbevf_service_event_schedule()
93 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) in ixgbevf_service_event_schedule()
94 queue_work(ixgbevf_wq, &adapter->service_task); in ixgbevf_service_event_schedule()
99 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); in ixgbevf_service_event_complete()
103 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_service_event_complete()
116 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_remove_adapter()
118 if (!hw->hw_addr) in ixgbevf_remove_adapter()
120 hw->hw_addr = NULL; in ixgbevf_remove_adapter()
121 dev_err(&adapter->pdev->dev, "Adapter removed\n"); in ixgbevf_remove_adapter()
122 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_remove_adapter()
147 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); in ixgbevf_read_reg()
159 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
161 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_ivar()
171 if (direction == -1) { in ixgbevf_set_ivar()
191 return ring->stats.packets; in ixgbevf_get_tx_completed()
196 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending()
197 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_get_tx_pending()
199 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); in ixgbevf_get_tx_pending()
200 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); in ixgbevf_get_tx_pending()
204 tail - head : (tail + ring->count - head); in ixgbevf_get_tx_pending()
212 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
225 &tx_ring->state); in ixgbevf_check_tx_hang()
228 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
231 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
239 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_tx_timeout_reset()
240 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_tx_timeout_reset()
246 * ixgbevf_tx_timeout - Respond to a Tx Hang
258 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
266 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq()
270 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
271 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq()
273 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_clean_tx_irq()
276 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq()
278 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
281 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_irq()
291 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbevf_clean_tx_irq()
295 tx_buffer->next_to_watch = NULL; in ixgbevf_clean_tx_irq()
298 total_bytes += tx_buffer->bytecount; in ixgbevf_clean_tx_irq()
299 total_packets += tx_buffer->gso_segs; in ixgbevf_clean_tx_irq()
300 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) in ixgbevf_clean_tx_irq()
303 /* free the skb */ in ixgbevf_clean_tx_irq()
305 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_irq()
307 napi_consume_skb(tx_buffer->skb, napi_budget); in ixgbevf_clean_tx_irq()
310 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq()
324 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
325 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
331 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_irq()
344 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
345 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
353 budget--; in ixgbevf_clean_tx_irq()
356 i += tx_ring->count; in ixgbevf_clean_tx_irq()
357 tx_ring->next_to_clean = i; in ixgbevf_clean_tx_irq()
358 u64_stats_update_begin(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
359 tx_ring->stats.bytes += total_bytes; in ixgbevf_clean_tx_irq()
360 tx_ring->stats.packets += total_packets; in ixgbevf_clean_tx_irq()
361 u64_stats_update_end(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
362 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq()
363 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq()
364 adapter->tx_ipsec += total_ipsec; in ixgbevf_clean_tx_irq()
367 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_clean_tx_irq()
370 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; in ixgbevf_clean_tx_irq()
379 " eop_desc->wb.status <%x>\n" in ixgbevf_clean_tx_irq()
383 tx_ring->queue_index, in ixgbevf_clean_tx_irq()
384 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
385 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
386 tx_ring->next_to_use, i, in ixgbevf_clean_tx_irq()
387 eop_desc, (eop_desc ? eop_desc->wb.status : 0), in ixgbevf_clean_tx_irq()
388 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbevf_clean_tx_irq()
391 netif_stop_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
392 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
404 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbevf_clean_tx_irq()
411 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbevf_clean_tx_irq()
412 tx_ring->queue_index) && in ixgbevf_clean_tx_irq()
413 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_clean_tx_irq()
414 netif_wake_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
415 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
416 ++tx_ring->tx_stats.restart_queue; in ixgbevf_clean_tx_irq()
424 * ixgbevf_rx_skb - Helper function to determine proper Rx method
431 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb()
446 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbevf_rx_hash()
449 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & in ixgbevf_rx_hash()
455 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in ixgbevf_rx_hash()
461 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
473 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbevf_rx_checksum()
479 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
487 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
492 skb->ip_summed = CHECKSUM_UNNECESSARY; in ixgbevf_rx_checksum()
496 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
513 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in ixgbevf_process_skb_fields()
514 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
523 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
532 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
533 prefetchw(rx_buffer->page); in ixgbevf_get_rx_buffer()
536 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
537 rx_buffer->dma, in ixgbevf_get_rx_buffer()
538 rx_buffer->page_offset, in ixgbevf_get_rx_buffer()
542 rx_buffer->pagecnt_bias--; in ixgbevf_get_rx_buffer()
556 /* We are not reusing the buffer so unmap it and free in ixgbevf_put_rx_buffer()
559 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
563 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_put_rx_buffer()
564 rx_buffer->pagecnt_bias); in ixgbevf_put_rx_buffer()
568 rx_buffer->page = NULL; in ixgbevf_put_rx_buffer()
572 * ixgbevf_is_non_eop - process handling of non-EOP buffers
579 * that this is in fact a non-EOP buffer.
584 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
587 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
588 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
606 struct page *page = bi->page; in ixgbevf_alloc_mapped_page()
616 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
621 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
625 /* if mapping failed free memory back to system since in ixgbevf_alloc_mapped_page()
628 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
631 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
635 bi->dma = dma; in ixgbevf_alloc_mapped_page()
636 bi->page = page; in ixgbevf_alloc_mapped_page()
637 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
638 bi->pagecnt_bias = 1; in ixgbevf_alloc_mapped_page()
639 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
645 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
654 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
657 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
661 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
662 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
669 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
670 bi->page_offset, in ixgbevf_alloc_rx_buffers()
675 * because each write-back erases this info. in ixgbevf_alloc_rx_buffers()
677 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbevf_alloc_rx_buffers()
684 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
685 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
689 rx_desc->wb.upper.length = 0; in ixgbevf_alloc_rx_buffers()
691 cleaned_count--; in ixgbevf_alloc_rx_buffers()
694 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
696 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
698 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
701 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
705 * applicable for weak-ordered memory model archs, in ixgbevf_alloc_rx_buffers()
706 * such as IA-64). in ixgbevf_alloc_rx_buffers()
714 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
742 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
744 if (!(netdev->features & NETIF_F_RXALL)) { in ixgbevf_cleanup_headers()
758 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
768 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
770 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
774 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
777 new_buff->page = old_buff->page; in ixgbevf_reuse_rx_page()
778 new_buff->dma = old_buff->dma; in ixgbevf_reuse_rx_page()
779 new_buff->page_offset = old_buff->page_offset; in ixgbevf_reuse_rx_page()
780 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbevf_reuse_rx_page()
785 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbevf_can_reuse_rx_page()
786 struct page *page = rx_buffer->page; in ixgbevf_can_reuse_rx_page()
788 /* avoid re-using remote and pfmemalloc pages */ in ixgbevf_can_reuse_rx_page()
794 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in ixgbevf_can_reuse_rx_page()
798 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) in ixgbevf_can_reuse_rx_page()
800 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) in ixgbevf_can_reuse_rx_page()
811 rx_buffer->pagecnt_bias = USHRT_MAX; in ixgbevf_can_reuse_rx_page()
818 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
824 * This function will add the data contained in rx_buffer->page to the skb.
838 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in ixgbevf_add_rx_frag()
839 rx_buffer->page_offset, size, truesize); in ixgbevf_add_rx_frag()
841 rx_buffer->page_offset ^= truesize; in ixgbevf_add_rx_frag()
843 rx_buffer->page_offset += truesize; in ixgbevf_add_rx_frag()
853 unsigned int size = xdp->data_end - xdp->data; in ixgbevf_construct_skb()
857 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_construct_skb()
858 xdp->data_hard_start); in ixgbevf_construct_skb()
864 net_prefetch(xdp->data); in ixgbevf_construct_skb()
866 /* Note, we get here by enabling legacy-rx via: in ixgbevf_construct_skb()
868 * ethtool --set-priv-flags <dev> legacy-rx on in ixgbevf_construct_skb()
871 * opposed to having legacy-rx off, where we process XDP in ixgbevf_construct_skb()
875 * xdp->data_meta will always point to xdp->data, since in ixgbevf_construct_skb()
877 * changed in future for legacy-rx mode on, then lets also in ixgbevf_construct_skb()
878 * add xdp->data_meta handling here. in ixgbevf_construct_skb()
882 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
889 headlen = eth_get_headlen(skb->dev, xdp->data, in ixgbevf_construct_skb()
893 memcpy(__skb_put(skb, headlen), xdp->data, in ixgbevf_construct_skb()
897 size -= headlen; in ixgbevf_construct_skb()
899 skb_add_rx_frag(skb, 0, rx_buffer->page, in ixgbevf_construct_skb()
900 (xdp->data + headlen) - in ixgbevf_construct_skb()
901 page_address(rx_buffer->page), in ixgbevf_construct_skb()
904 rx_buffer->page_offset ^= truesize; in ixgbevf_construct_skb()
906 rx_buffer->page_offset += truesize; in ixgbevf_construct_skb()
909 rx_buffer->pagecnt_bias++; in ixgbevf_construct_skb()
918 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable_queues()
928 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbevf_build_skb()
933 SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_build_skb()
934 xdp->data_hard_start); in ixgbevf_build_skb()
938 /* Prefetch first cache line of first page. If xdp->data_meta in ixgbevf_build_skb()
939 * is unused, this points to xdp->data, otherwise, we likely in ixgbevf_build_skb()
943 net_prefetch(xdp->data_meta); in ixgbevf_build_skb()
946 skb = napi_build_skb(xdp->data_hard_start, truesize); in ixgbevf_build_skb()
951 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ixgbevf_build_skb()
952 __skb_put(skb, xdp->data_end - xdp->data); in ixgbevf_build_skb()
958 rx_buffer->page_offset ^= truesize; in ixgbevf_build_skb()
960 rx_buffer->page_offset += truesize; in ixgbevf_build_skb()
979 len = xdp->data_end - xdp->data; in ixgbevf_xmit_xdp_ring()
984 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); in ixgbevf_xmit_xdp_ring()
985 if (dma_mapping_error(ring->dev, dma)) in ixgbevf_xmit_xdp_ring()
989 i = ring->next_to_use; in ixgbevf_xmit_xdp_ring()
990 tx_buffer = &ring->tx_buffer_info[i]; in ixgbevf_xmit_xdp_ring()
994 tx_buffer->data = xdp->data; in ixgbevf_xmit_xdp_ring()
995 tx_buffer->bytecount = len; in ixgbevf_xmit_xdp_ring()
996 tx_buffer->gso_segs = 1; in ixgbevf_xmit_xdp_ring()
997 tx_buffer->protocol = 0; in ixgbevf_xmit_xdp_ring()
1002 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { in ixgbevf_xmit_xdp_ring()
1005 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_xmit_xdp_ring()
1008 context_desc->vlan_macip_lens = in ixgbevf_xmit_xdp_ring()
1010 context_desc->fceof_saidx = 0; in ixgbevf_xmit_xdp_ring()
1011 context_desc->type_tucmd_mlhl = in ixgbevf_xmit_xdp_ring()
1014 context_desc->mss_l4len_idx = 0; in ixgbevf_xmit_xdp_ring()
1026 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_xmit_xdp_ring()
1028 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbevf_xmit_xdp_ring()
1029 tx_desc->read.olinfo_status = in ixgbevf_xmit_xdp_ring()
1038 if (i == ring->count) in ixgbevf_xmit_xdp_ring()
1041 tx_buffer->next_to_watch = tx_desc; in ixgbevf_xmit_xdp_ring()
1042 ring->next_to_use = i; in ixgbevf_xmit_xdp_ring()
1056 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1066 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1072 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1076 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1083 return ERR_PTR(-result); in ixgbevf_run_xdp()
1092 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbevf_rx_frame_truesize()
1109 rx_buffer->page_offset ^= truesize; in ixgbevf_rx_buffer_flip()
1111 rx_buffer->page_offset += truesize; in ixgbevf_rx_buffer_flip()
1120 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq()
1122 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1130 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbevf_clean_rx_irq()
1143 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1144 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbevf_clean_rx_irq()
1161 hard_start = page_address(rx_buffer->page) + in ixgbevf_clean_rx_irq()
1162 rx_buffer->page_offset - offset; in ixgbevf_clean_rx_irq()
1172 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { in ixgbevf_clean_rx_irq()
1177 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1193 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1194 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1201 /* fetch next buffer in frame if non-eop */ in ixgbevf_clean_rx_irq()
1212 total_rx_bytes += skb->len; in ixgbevf_clean_rx_irq()
1217 if ((skb->pkt_type == PACKET_BROADCAST || in ixgbevf_clean_rx_irq()
1218 skb->pkt_type == PACKET_MULTICAST) && in ixgbevf_clean_rx_irq()
1219 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1220 eth_hdr(skb)->h_source)) { in ixgbevf_clean_rx_irq()
1238 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1242 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1248 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); in ixgbevf_clean_rx_irq()
1251 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1252 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1253 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1254 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1255 q_vector->rx.total_packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1256 q_vector->rx.total_bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1262 * ixgbevf_poll - NAPI polling calback
1273 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_poll()
1278 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_poll()
1289 if (q_vector->rx.count > 1) in ixgbevf_poll()
1290 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbevf_poll()
1294 ixgbevf_for_each_ring(ring, q_vector->rx) { in ixgbevf_poll()
1306 /* Exit the polling mode, but don't re-enable interrupts if stack might in ixgbevf_poll()
1307 * poll us due to busy-polling in ixgbevf_poll()
1310 if (adapter->rx_itr_setting == 1) in ixgbevf_poll()
1312 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_poll()
1313 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) in ixgbevf_poll()
1315 BIT(q_vector->v_idx)); in ixgbevf_poll()
1318 return min(work_done, budget - 1); in ixgbevf_poll()
1322 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1327 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_write_eitr()
1328 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_eitr()
1329 int v_idx = q_vector->v_idx; in ixgbevf_write_eitr()
1330 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbevf_write_eitr()
1341 * ixgbevf_configure_msix - Configure MSI-X hardware
1344 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1352 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_configure_msix()
1353 adapter->eims_enable_mask = 0; in ixgbevf_configure_msix()
1361 q_vector = adapter->q_vector[v_idx]; in ixgbevf_configure_msix()
1363 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_configure_msix()
1364 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1366 ixgbevf_for_each_ring(ring, q_vector->tx) in ixgbevf_configure_msix()
1367 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1369 if (q_vector->tx.ring && !q_vector->rx.ring) { in ixgbevf_configure_msix()
1371 if (adapter->tx_itr_setting == 1) in ixgbevf_configure_msix()
1372 q_vector->itr = IXGBE_12K_ITR; in ixgbevf_configure_msix()
1374 q_vector->itr = adapter->tx_itr_setting; in ixgbevf_configure_msix()
1377 if (adapter->rx_itr_setting == 1) in ixgbevf_configure_msix()
1378 q_vector->itr = IXGBE_20K_ITR; in ixgbevf_configure_msix()
1380 q_vector->itr = adapter->rx_itr_setting; in ixgbevf_configure_msix()
1384 adapter->eims_enable_mask |= BIT(v_idx); in ixgbevf_configure_msix()
1389 ixgbevf_set_ivar(adapter, -1, 1, v_idx); in ixgbevf_configure_msix()
1391 adapter->eims_other = BIT(v_idx); in ixgbevf_configure_msix()
1392 adapter->eims_enable_mask |= adapter->eims_other; in ixgbevf_configure_msix()
1403 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1418 int bytes = ring_container->total_bytes; in ixgbevf_update_itr()
1419 int packets = ring_container->total_packets; in ixgbevf_update_itr()
1422 u8 itr_setting = ring_container->itr; in ixgbevf_update_itr()
1428 * 0-20MB/s lowest (100000 ints/s) in ixgbevf_update_itr()
1429 * 20-100MB/s low (20000 ints/s) in ixgbevf_update_itr()
1430 * 100-1249MB/s bulk (12000 ints/s) in ixgbevf_update_itr()
1433 timepassed_us = q_vector->itr >> 2; in ixgbevf_update_itr()
1457 ring_container->total_bytes = 0; in ixgbevf_update_itr()
1458 ring_container->total_packets = 0; in ixgbevf_update_itr()
1461 ring_container->itr = itr_setting; in ixgbevf_update_itr()
1466 u32 new_itr = q_vector->itr; in ixgbevf_set_itr()
1469 ixgbevf_update_itr(q_vector, &q_vector->tx); in ixgbevf_set_itr()
1470 ixgbevf_update_itr(q_vector, &q_vector->rx); in ixgbevf_set_itr()
1472 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in ixgbevf_set_itr()
1489 if (new_itr != q_vector->itr) { in ixgbevf_set_itr()
1491 new_itr = (10 * new_itr * q_vector->itr) / in ixgbevf_set_itr()
1492 ((9 * new_itr) + q_vector->itr); in ixgbevf_set_itr()
1495 q_vector->itr = new_itr; in ixgbevf_set_itr()
1504 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_msix_other()
1506 hw->mac.get_link_status = 1; in ixgbevf_msix_other()
1510 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); in ixgbevf_msix_other()
1516 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1525 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbevf_msix_clean_rings()
1526 napi_schedule_irqoff(&q_vector->napi); in ixgbevf_msix_clean_rings()
1532 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1535 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1540 struct net_device *netdev = adapter->netdev; in ixgbevf_request_msix_irqs()
1541 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_request_msix_irqs()
1546 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; in ixgbevf_request_msix_irqs()
1547 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbevf_request_msix_irqs()
1549 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1550 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1551 "%s-TxRx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1553 } else if (q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1554 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1555 "%s-rx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1556 } else if (q_vector->tx.ring) { in ixgbevf_request_msix_irqs()
1557 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1558 "%s-tx-%u", netdev->name, ti++); in ixgbevf_request_msix_irqs()
1563 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, in ixgbevf_request_msix_irqs()
1564 q_vector->name, q_vector); in ixgbevf_request_msix_irqs()
1566 hw_dbg(&adapter->hw, in ixgbevf_request_msix_irqs()
1573 err = request_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1574 &ixgbevf_msix_other, 0, netdev->name, adapter); in ixgbevf_request_msix_irqs()
1576 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", in ixgbevf_request_msix_irqs()
1585 vector--; in ixgbevf_request_msix_irqs()
1586 free_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1587 adapter->q_vector[vector]); in ixgbevf_request_msix_irqs()
1589 /* This failure is non-recoverable - it indicates the system is in ixgbevf_request_msix_irqs()
1599 adapter->num_msix_vectors = 0; in ixgbevf_request_msix_irqs()
1604 * ixgbevf_request_irq - initialize interrupts
1615 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); in ixgbevf_request_irq()
1624 if (!adapter->msix_entries) in ixgbevf_free_irq()
1627 q_vectors = adapter->num_msix_vectors; in ixgbevf_free_irq()
1628 i = q_vectors - 1; in ixgbevf_free_irq()
1630 free_irq(adapter->msix_entries[i].vector, adapter); in ixgbevf_free_irq()
1631 i--; in ixgbevf_free_irq()
1633 for (; i >= 0; i--) { in ixgbevf_free_irq()
1634 /* free only the irqs that were actually requested */ in ixgbevf_free_irq()
1635 if (!adapter->q_vector[i]->rx.ring && in ixgbevf_free_irq()
1636 !adapter->q_vector[i]->tx.ring) in ixgbevf_free_irq()
1639 free_irq(adapter->msix_entries[i].vector, in ixgbevf_free_irq()
1640 adapter->q_vector[i]); in ixgbevf_free_irq()
1645 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1650 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_disable()
1659 for (i = 0; i < adapter->num_msix_vectors; i++) in ixgbevf_irq_disable()
1660 synchronize_irq(adapter->msix_entries[i].vector); in ixgbevf_irq_disable()
1664 * ixgbevf_irq_enable - Enable default interrupt generation settings
1669 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable()
1671 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1672 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1677 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1686 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_tx_ring()
1687 u64 tdba = ring->dma; in ixgbevf_configure_tx_ring()
1690 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_tx_ring()
1699 ring->count * sizeof(union ixgbe_adv_tx_desc)); in ixgbevf_configure_tx_ring()
1713 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); in ixgbevf_configure_tx_ring()
1716 ring->next_to_clean = 0; in ixgbevf_configure_tx_ring()
1717 ring->next_to_use = 0; in ixgbevf_configure_tx_ring()
1730 memset(ring->tx_buffer_info, 0, in ixgbevf_configure_tx_ring()
1731 sizeof(struct ixgbevf_tx_buffer) * ring->count); in ixgbevf_configure_tx_ring()
1733 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); in ixgbevf_configure_tx_ring()
1734 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_configure_tx_ring()
1742 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); in ixgbevf_configure_tx_ring()
1748 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1758 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_configure_tx()
1759 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
1760 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_configure_tx()
1761 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbevf_configure_tx()
1769 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_srrctl()
1786 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_psrtype()
1793 if (adapter->num_rx_queues > 1) in ixgbevf_setup_psrtype()
1803 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_disable_rx_queue()
1806 u8 reg_idx = ring->reg_idx; in ixgbevf_disable_rx_queue()
1808 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_disable_rx_queue()
1820 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_disable_rx_queue()
1830 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_rx_desc_queue_enable()
1833 u8 reg_idx = ring->reg_idx; in ixgbevf_rx_desc_queue_enable()
1835 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_rx_desc_queue_enable()
1840 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_rx_desc_queue_enable()
1848 * ixgbevf_init_rss_key - Initialize adapter RSS key
1857 if (!adapter->rss_key) { in ixgbevf_init_rss_key()
1860 return -ENOMEM; in ixgbevf_init_rss_key()
1863 adapter->rss_key = rss_key; in ixgbevf_init_rss_key()
1871 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_vfmrqc()
1873 u16 rss_i = adapter->num_rx_queues; in ixgbevf_setup_vfmrqc()
1878 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); in ixgbevf_setup_vfmrqc()
1884 adapter->rss_indir_tbl[i] = j; in ixgbevf_setup_vfmrqc()
1907 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx_ring()
1909 u64 rdba = ring->dma; in ixgbevf_configure_rx_ring()
1911 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_rx_ring()
1920 ring->count * sizeof(union ixgbe_adv_rx_desc)); in ixgbevf_configure_rx_ring()
1935 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); in ixgbevf_configure_rx_ring()
1938 memset(ring->rx_buffer_info, 0, in ixgbevf_configure_rx_ring()
1939 sizeof(struct ixgbevf_rx_buffer) * ring->count); in ixgbevf_configure_rx_ring()
1943 rx_desc->wb.upper.length = 0; in ixgbevf_configure_rx_ring()
1946 ring->next_to_clean = 0; in ixgbevf_configure_rx_ring()
1947 ring->next_to_use = 0; in ixgbevf_configure_rx_ring()
1948 ring->next_to_alloc = 0; in ixgbevf_configure_rx_ring()
1953 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { in ixgbevf_configure_rx_ring()
1976 struct net_device *netdev = adapter->netdev; in ixgbevf_set_rx_buffer_len()
1977 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbevf_set_rx_buffer_len()
1983 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) in ixgbevf_set_rx_buffer_len()
1991 if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) in ixgbevf_set_rx_buffer_len()
1998 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
2005 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx()
2006 struct net_device *netdev = adapter->netdev; in ixgbevf_configure_rx()
2010 if (hw->mac.type >= ixgbe_mac_X550_vf) in ixgbevf_configure_rx()
2013 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2015 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); in ixgbevf_configure_rx()
2016 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2018 dev_err(&adapter->pdev->dev, in ixgbevf_configure_rx()
2019 "Failed to set MTU at %d\n", netdev->mtu); in ixgbevf_configure_rx()
2024 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_configure_rx()
2025 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx()
2036 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_add_vid()
2039 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2042 err = hw->mac.ops.set_vfta(hw, vid, 0, true); in ixgbevf_vlan_rx_add_vid()
2044 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2051 return -EIO; in ixgbevf_vlan_rx_add_vid()
2054 return -EACCES; in ixgbevf_vlan_rx_add_vid()
2057 set_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_add_vid()
2066 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_kill_vid()
2069 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2072 err = hw->mac.ops.set_vfta(hw, vid, 0, false); in ixgbevf_vlan_rx_kill_vid()
2074 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2079 clear_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_kill_vid()
2088 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in ixgbevf_restore_vlan()
2089 ixgbevf_vlan_rx_add_vid(adapter->netdev, in ixgbevf_restore_vlan()
2096 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_uc_addr_list()
2103 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); in ixgbevf_write_uc_addr_list()
2110 hw->mac.ops.set_uc_addr(hw, 0, NULL); in ixgbevf_write_uc_addr_list()
2117 * ixgbevf_set_rx_mode - Multicast and unicast set
2128 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_rx_mode()
2129 unsigned int flags = netdev->flags; in ixgbevf_set_rx_mode()
2142 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2144 hw->mac.ops.update_xcast_mode(hw, xcast_mode); in ixgbevf_set_rx_mode()
2147 hw->mac.ops.update_mc_addr_list(hw, netdev); in ixgbevf_set_rx_mode()
2151 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2158 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_enable_all()
2161 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all()
2162 napi_enable(&q_vector->napi); in ixgbevf_napi_enable_all()
2170 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_disable_all()
2173 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
2174 napi_disable(&q_vector->napi); in ixgbevf_napi_disable_all()
2180 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_dcb()
2183 unsigned int num_rx_queues = adapter->num_rx_queues; in ixgbevf_configure_dcb()
2184 unsigned int num_tx_queues = adapter->num_tx_queues; in ixgbevf_configure_dcb()
2187 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2192 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2202 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
2209 if ((adapter->num_rx_queues != num_rx_queues) || in ixgbevf_configure_dcb()
2210 (adapter->num_tx_queues != num_tx_queues)) { in ixgbevf_configure_dcb()
2212 hw->mbx.timeout = 0; in ixgbevf_configure_dcb()
2215 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); in ixgbevf_configure_dcb()
2225 ixgbevf_set_rx_mode(adapter->netdev); in ixgbevf_configure()
2236 /* Only save pre-reset stats if there are some */ in ixgbevf_save_reset_stats()
2237 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { in ixgbevf_save_reset_stats()
2238 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - in ixgbevf_save_reset_stats()
2239 adapter->stats.base_vfgprc; in ixgbevf_save_reset_stats()
2240 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - in ixgbevf_save_reset_stats()
2241 adapter->stats.base_vfgptc; in ixgbevf_save_reset_stats()
2242 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - in ixgbevf_save_reset_stats()
2243 adapter->stats.base_vfgorc; in ixgbevf_save_reset_stats()
2244 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - in ixgbevf_save_reset_stats()
2245 adapter->stats.base_vfgotc; in ixgbevf_save_reset_stats()
2246 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - in ixgbevf_save_reset_stats()
2247 adapter->stats.base_vfmprc; in ixgbevf_save_reset_stats()
2253 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_init_last_counter_stats()
2255 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); in ixgbevf_init_last_counter_stats()
2256 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); in ixgbevf_init_last_counter_stats()
2257 adapter->stats.last_vfgorc |= in ixgbevf_init_last_counter_stats()
2259 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); in ixgbevf_init_last_counter_stats()
2260 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); in ixgbevf_init_last_counter_stats()
2261 adapter->stats.last_vfgotc |= in ixgbevf_init_last_counter_stats()
2263 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); in ixgbevf_init_last_counter_stats()
2265 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; in ixgbevf_init_last_counter_stats()
2266 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; in ixgbevf_init_last_counter_stats()
2267 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; in ixgbevf_init_last_counter_stats()
2268 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; in ixgbevf_init_last_counter_stats()
2269 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; in ixgbevf_init_last_counter_stats()
2274 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_negotiate_api()
2286 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2289 err = hw->mac.ops.negotiate_api_version(hw, api[idx]); in ixgbevf_negotiate_api()
2295 if (hw->api_version >= ixgbe_mbox_api_15) { in ixgbevf_negotiate_api()
2296 hw->mbx.ops.init_params(hw); in ixgbevf_negotiate_api()
2297 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, in ixgbevf_negotiate_api()
2301 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2306 struct net_device *netdev = adapter->netdev; in ixgbevf_up_complete()
2307 struct pci_dev *pdev = adapter->pdev; in ixgbevf_up_complete()
2308 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_up_complete()
2313 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2315 if (is_valid_ether_addr(hw->mac.addr)) in ixgbevf_up_complete()
2316 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); in ixgbevf_up_complete()
2318 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); in ixgbevf_up_complete()
2320 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2322 state = adapter->link_state; in ixgbevf_up_complete()
2323 hw->mac.ops.get_link_state(hw, &adapter->link_state); in ixgbevf_up_complete()
2324 if (state && state != adapter->link_state) in ixgbevf_up_complete()
2325 dev_info(&pdev->dev, "VF is administratively disabled\n"); in ixgbevf_up_complete()
2328 clear_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_up_complete()
2341 hw->mac.get_link_status = 1; in ixgbevf_up_complete()
2342 mod_timer(&adapter->service_timer, jiffies); in ixgbevf_up_complete()
2353 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2354 * @rx_ring: ring to free buffers from
2358 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2360 /* Free Rx ring sk_buff */ in ixgbevf_clean_rx_ring()
2361 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2362 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2363 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2366 /* Free all the Rx ring pages */ in ixgbevf_clean_rx_ring()
2367 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2370 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2375 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2376 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2377 rx_buffer->page_offset, in ixgbevf_clean_rx_ring()
2381 /* free resources associated with mapping */ in ixgbevf_clean_rx_ring()
2382 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2383 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2388 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_clean_rx_ring()
2389 rx_buffer->pagecnt_bias); in ixgbevf_clean_rx_ring()
2392 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2396 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2397 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2398 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2402 * ixgbevf_clean_tx_ring - Free Tx Buffers
2407 u16 i = tx_ring->next_to_clean; in ixgbevf_clean_tx_ring()
2408 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_ring()
2410 while (i != tx_ring->next_to_use) { in ixgbevf_clean_tx_ring()
2413 /* Free all the Tx ring sk_buffs */ in ixgbevf_clean_tx_ring()
2415 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_ring()
2417 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_clean_tx_ring()
2420 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_ring()
2426 eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_ring()
2434 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2436 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2442 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_ring()
2451 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2453 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2458 tx_ring->next_to_use = 0; in ixgbevf_clean_tx_ring()
2459 tx_ring->next_to_clean = 0; in ixgbevf_clean_tx_ring()
2464 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2471 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_clean_all_rx_rings()
2472 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2476 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2483 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_clean_all_tx_rings()
2484 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2485 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_clean_all_tx_rings()
2486 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); in ixgbevf_clean_all_tx_rings()
2491 struct net_device *netdev = adapter->netdev; in ixgbevf_down()
2492 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_down()
2496 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_down()
2500 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_down()
2501 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2515 del_timer_sync(&adapter->service_timer); in ixgbevf_down()
2518 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_down()
2519 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2525 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_down()
2526 u8 reg_idx = adapter->xdp_ring[i]->reg_idx; in ixgbevf_down()
2532 if (!pci_channel_offline(adapter->pdev)) in ixgbevf_down()
2541 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_reinit_locked()
2545 pci_set_master(adapter->pdev); in ixgbevf_reinit_locked()
2548 clear_bit(__IXGBEVF_RESETTING, &adapter->state); in ixgbevf_reinit_locked()
2553 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_reset()
2554 struct net_device *netdev = adapter->netdev; in ixgbevf_reset()
2556 if (hw->mac.ops.reset_hw(hw)) { in ixgbevf_reset()
2559 hw->mac.ops.init_hw(hw); in ixgbevf_reset()
2563 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in ixgbevf_reset()
2564 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in ixgbevf_reset()
2565 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); in ixgbevf_reset()
2568 adapter->last_reset = jiffies; in ixgbevf_reset()
2587 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbevf_acquire_msix_vectors()
2591 dev_err(&adapter->pdev->dev, in ixgbevf_acquire_msix_vectors()
2592 "Unable to allocate MSI-X interrupts\n"); in ixgbevf_acquire_msix_vectors()
2593 kfree(adapter->msix_entries); in ixgbevf_acquire_msix_vectors()
2594 adapter->msix_entries = NULL; in ixgbevf_acquire_msix_vectors()
2602 adapter->num_msix_vectors = vectors; in ixgbevf_acquire_msix_vectors()
2608 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2620 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_num_queues()
2626 adapter->num_rx_queues = 1; in ixgbevf_set_num_queues()
2627 adapter->num_tx_queues = 1; in ixgbevf_set_num_queues()
2628 adapter->num_xdp_queues = 0; in ixgbevf_set_num_queues()
2630 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2635 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2642 adapter->num_rx_queues = num_tcs; in ixgbevf_set_num_queues()
2646 switch (hw->api_version) { in ixgbevf_set_num_queues()
2652 if (adapter->xdp_prog && in ixgbevf_set_num_queues()
2653 hw->mac.max_tx_queues == rss) in ixgbevf_set_num_queues()
2656 adapter->num_rx_queues = rss; in ixgbevf_set_num_queues()
2657 adapter->num_tx_queues = rss; in ixgbevf_set_num_queues()
2658 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; in ixgbevf_set_num_queues()
2667 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2677 /* It's easy to be greedy for MSI-X vectors, but it really in ixgbevf_set_interrupt_capability()
2683 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability()
2687 adapter->msix_entries = kcalloc(v_budget, in ixgbevf_set_interrupt_capability()
2689 if (!adapter->msix_entries) in ixgbevf_set_interrupt_capability()
2690 return -ENOMEM; in ixgbevf_set_interrupt_capability()
2693 adapter->msix_entries[vector].entry = vector; in ixgbevf_set_interrupt_capability()
2695 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver in ixgbevf_set_interrupt_capability()
2697 * that we clean up the msix_entries pointer else-where. in ixgbevf_set_interrupt_capability()
2705 ring->next = head->ring; in ixgbevf_add_ring()
2706 head->ring = ring; in ixgbevf_add_ring()
2707 head->count++; in ixgbevf_add_ring()
2711 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2721 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2739 return -ENOMEM; in ixgbevf_alloc_q_vector()
2742 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll); in ixgbevf_alloc_q_vector()
2745 adapter->q_vector[v_idx] = q_vector; in ixgbevf_alloc_q_vector()
2746 q_vector->adapter = adapter; in ixgbevf_alloc_q_vector()
2747 q_vector->v_idx = v_idx; in ixgbevf_alloc_q_vector()
2750 ring = q_vector->ring; in ixgbevf_alloc_q_vector()
2754 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2755 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2758 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2761 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2764 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2765 ring->queue_index = txr_idx; in ixgbevf_alloc_q_vector()
2766 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2769 adapter->tx_ring[txr_idx] = ring; in ixgbevf_alloc_q_vector()
2772 txr_count--; in ixgbevf_alloc_q_vector()
2782 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2783 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2786 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2789 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2792 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2793 ring->queue_index = xdp_idx; in ixgbevf_alloc_q_vector()
2794 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2798 adapter->xdp_ring[xdp_idx] = ring; in ixgbevf_alloc_q_vector()
2801 xdp_count--; in ixgbevf_alloc_q_vector()
2811 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2812 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2815 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2818 ixgbevf_add_ring(ring, &q_vector->rx); in ixgbevf_alloc_q_vector()
2821 ring->count = adapter->rx_ring_count; in ixgbevf_alloc_q_vector()
2822 ring->queue_index = rxr_idx; in ixgbevf_alloc_q_vector()
2823 ring->reg_idx = rxr_idx; in ixgbevf_alloc_q_vector()
2826 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2829 rxr_count--; in ixgbevf_alloc_q_vector()
2840 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2850 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; in ixgbevf_free_q_vector()
2853 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_free_q_vector()
2855 adapter->xdp_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2857 adapter->tx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2860 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_free_q_vector()
2861 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2863 adapter->q_vector[v_idx] = NULL; in ixgbevf_free_q_vector()
2864 netif_napi_del(&q_vector->napi); in ixgbevf_free_q_vector()
2873 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2877 * return -ENOMEM.
2881 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_alloc_q_vectors()
2882 int rxr_remaining = adapter->num_rx_queues; in ixgbevf_alloc_q_vectors()
2883 int txr_remaining = adapter->num_tx_queues; in ixgbevf_alloc_q_vectors()
2884 int xdp_remaining = adapter->num_xdp_queues; in ixgbevf_alloc_q_vectors()
2889 for (; rxr_remaining; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2898 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2903 for (; q_vectors; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2917 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2919 txr_remaining -= tqpv; in ixgbevf_alloc_q_vectors()
2921 xdp_remaining -= xqpv; in ixgbevf_alloc_q_vectors()
2929 v_idx--; in ixgbevf_alloc_q_vectors()
2933 return -ENOMEM; in ixgbevf_alloc_q_vectors()
2937 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2946 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_free_q_vectors()
2949 q_vectors--; in ixgbevf_free_q_vectors()
2955 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2961 if (!adapter->msix_entries) in ixgbevf_reset_interrupt_capability()
2964 pci_disable_msix(adapter->pdev); in ixgbevf_reset_interrupt_capability()
2965 kfree(adapter->msix_entries); in ixgbevf_reset_interrupt_capability()
2966 adapter->msix_entries = NULL; in ixgbevf_reset_interrupt_capability()
2970 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2983 hw_dbg(&adapter->hw, in ixgbevf_init_interrupt_scheme()
2990 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); in ixgbevf_init_interrupt_scheme()
2994 …hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n… in ixgbevf_init_interrupt_scheme()
2995 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", in ixgbevf_init_interrupt_scheme()
2996 adapter->num_rx_queues, adapter->num_tx_queues, in ixgbevf_init_interrupt_scheme()
2997 adapter->num_xdp_queues); in ixgbevf_init_interrupt_scheme()
2999 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_init_interrupt_scheme()
3009 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
3013 * to pre-load conditions
3017 adapter->num_tx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3018 adapter->num_xdp_queues = 0; in ixgbevf_clear_interrupt_scheme()
3019 adapter->num_rx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3026 * ixgbevf_sw_init - Initialize general software structures
3035 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_sw_init()
3036 struct pci_dev *pdev = adapter->pdev; in ixgbevf_sw_init()
3037 struct net_device *netdev = adapter->netdev; in ixgbevf_sw_init()
3041 hw->vendor_id = pdev->vendor; in ixgbevf_sw_init()
3042 hw->device_id = pdev->device; in ixgbevf_sw_init()
3043 hw->revision_id = pdev->revision; in ixgbevf_sw_init()
3044 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ixgbevf_sw_init()
3045 hw->subsystem_device_id = pdev->subsystem_device; in ixgbevf_sw_init()
3047 hw->mbx.ops.init_params(hw); in ixgbevf_sw_init()
3049 if (hw->mac.type >= ixgbe_mac_X550_vf) { in ixgbevf_sw_init()
3056 hw->mac.max_tx_queues = 2; in ixgbevf_sw_init()
3057 hw->mac.max_rx_queues = 2; in ixgbevf_sw_init()
3060 spin_lock_init(&adapter->mbx_lock); in ixgbevf_sw_init()
3062 err = hw->mac.ops.reset_hw(hw); in ixgbevf_sw_init()
3064 dev_info(&pdev->dev, in ixgbevf_sw_init()
3067 err = hw->mac.ops.init_hw(hw); in ixgbevf_sw_init()
3073 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); in ixgbevf_sw_init()
3075 dev_info(&pdev->dev, "Error reading MAC address\n"); in ixgbevf_sw_init()
3076 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in ixgbevf_sw_init()
3077 dev_info(&pdev->dev, in ixgbevf_sw_init()
3079 eth_hw_addr_set(netdev, hw->mac.addr); in ixgbevf_sw_init()
3082 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_sw_init()
3083 dev_info(&pdev->dev, "Assigning random MAC address\n"); in ixgbevf_sw_init()
3085 ether_addr_copy(hw->mac.addr, netdev->dev_addr); in ixgbevf_sw_init()
3086 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); in ixgbevf_sw_init()
3090 adapter->rx_itr_setting = 1; in ixgbevf_sw_init()
3091 adapter->tx_itr_setting = 1; in ixgbevf_sw_init()
3094 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; in ixgbevf_sw_init()
3095 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; in ixgbevf_sw_init()
3097 adapter->link_state = true; in ixgbevf_sw_init()
3099 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_sw_init()
3129 * ixgbevf_update_stats - Update the board statistics counters.
3134 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_update_stats()
3139 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_update_stats()
3140 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_update_stats()
3143 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, in ixgbevf_update_stats()
3144 adapter->stats.vfgprc); in ixgbevf_update_stats()
3145 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, in ixgbevf_update_stats()
3146 adapter->stats.vfgptc); in ixgbevf_update_stats()
3148 adapter->stats.last_vfgorc, in ixgbevf_update_stats()
3149 adapter->stats.vfgorc); in ixgbevf_update_stats()
3151 adapter->stats.last_vfgotc, in ixgbevf_update_stats()
3152 adapter->stats.vfgotc); in ixgbevf_update_stats()
3153 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, in ixgbevf_update_stats()
3154 adapter->stats.vfmprc); in ixgbevf_update_stats()
3156 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_update_stats()
3157 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats()
3159 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3160 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3161 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3162 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3165 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbevf_update_stats()
3166 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbevf_update_stats()
3167 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbevf_update_stats()
3168 adapter->alloc_rx_page = alloc_rx_page; in ixgbevf_update_stats()
3172 * ixgbevf_service_timer - Timer Call-back
3181 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); in ixgbevf_service_timer()
3188 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) in ixgbevf_reset_subtask()
3193 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_reset_subtask()
3194 test_bit(__IXGBEVF_REMOVING, &adapter->state) || in ixgbevf_reset_subtask()
3195 test_bit(__IXGBEVF_RESETTING, &adapter->state)) { in ixgbevf_reset_subtask()
3200 adapter->tx_timeout_count++; in ixgbevf_reset_subtask()
3207 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3217 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_check_hang_subtask()
3222 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_check_hang_subtask()
3223 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_check_hang_subtask()
3227 if (netif_carrier_ok(adapter->netdev)) { in ixgbevf_check_hang_subtask()
3228 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_check_hang_subtask()
3229 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
3230 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_check_hang_subtask()
3231 set_check_for_tx_hang(adapter->xdp_ring[i]); in ixgbevf_check_hang_subtask()
3235 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { in ixgbevf_check_hang_subtask()
3236 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; in ixgbevf_check_hang_subtask()
3238 if (qv->rx.ring || qv->tx.ring) in ixgbevf_check_hang_subtask()
3247 * ixgbevf_watchdog_update_link - update the link status
3252 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_watchdog_update_link()
3253 u32 link_speed = adapter->link_speed; in ixgbevf_watchdog_update_link()
3254 bool link_up = adapter->link_up; in ixgbevf_watchdog_update_link()
3257 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3259 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); in ixgbevf_watchdog_update_link()
3261 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3264 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { in ixgbevf_watchdog_update_link()
3265 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_watchdog_update_link()
3269 adapter->link_up = link_up; in ixgbevf_watchdog_update_link()
3270 adapter->link_speed = link_speed; in ixgbevf_watchdog_update_link()
3274 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3280 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_up()
3286 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", in ixgbevf_watchdog_link_is_up()
3287 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3289 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3291 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? in ixgbevf_watchdog_link_is_up()
3299 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3305 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_down()
3307 adapter->link_speed = 0; in ixgbevf_watchdog_link_is_down()
3313 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); in ixgbevf_watchdog_link_is_down()
3319 * ixgbevf_watchdog_subtask - worker thread to bring link up
3325 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_watchdog_subtask()
3326 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_watchdog_subtask()
3331 if (adapter->link_up && adapter->link_state) in ixgbevf_watchdog_subtask()
3340 * ixgbevf_service_task - manages and runs subtasks
3348 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_service_task()
3350 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_service_task()
3351 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_service_task()
3368 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3371 * Free all transmit software resources
3377 vfree(tx_ring->tx_buffer_info); in ixgbevf_free_tx_resources()
3378 tx_ring->tx_buffer_info = NULL; in ixgbevf_free_tx_resources()
3380 /* if not set, then don't free */ in ixgbevf_free_tx_resources()
3381 if (!tx_ring->desc) in ixgbevf_free_tx_resources()
3384 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, in ixgbevf_free_tx_resources()
3385 tx_ring->dma); in ixgbevf_free_tx_resources()
3387 tx_ring->desc = NULL; in ixgbevf_free_tx_resources()
3391 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3394 * Free all transmit software resources
3400 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_free_all_tx_resources()
3401 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3402 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
3403 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_free_all_tx_resources()
3404 if (adapter->xdp_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3405 ixgbevf_free_tx_resources(adapter->xdp_ring[i]); in ixgbevf_free_all_tx_resources()
3409 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3416 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); in ixgbevf_setup_tx_resources()
3419 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_setup_tx_resources()
3420 tx_ring->tx_buffer_info = vmalloc(size); in ixgbevf_setup_tx_resources()
3421 if (!tx_ring->tx_buffer_info) in ixgbevf_setup_tx_resources()
3424 u64_stats_init(&tx_ring->syncp); in ixgbevf_setup_tx_resources()
3427 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbevf_setup_tx_resources()
3428 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbevf_setup_tx_resources()
3430 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, in ixgbevf_setup_tx_resources()
3431 &tx_ring->dma, GFP_KERNEL); in ixgbevf_setup_tx_resources()
3432 if (!tx_ring->desc) in ixgbevf_setup_tx_resources()
3438 vfree(tx_ring->tx_buffer_info); in ixgbevf_setup_tx_resources()
3439 tx_ring->tx_buffer_info = NULL; in ixgbevf_setup_tx_resources()
3440 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); in ixgbevf_setup_tx_resources()
3441 return -ENOMEM; in ixgbevf_setup_tx_resources()
3445 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3458 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_setup_all_tx_resources()
3459 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3462 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); in ixgbevf_setup_all_tx_resources()
3466 for (j = 0; j < adapter->num_xdp_queues; j++) { in ixgbevf_setup_all_tx_resources()
3467 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3470 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); in ixgbevf_setup_all_tx_resources()
3477 while (j--) in ixgbevf_setup_all_tx_resources()
3478 ixgbevf_free_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3479 while (i--) in ixgbevf_setup_all_tx_resources()
3480 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3486 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3497 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3498 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3499 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3502 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3505 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3506 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3508 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3509 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3511 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3514 /* XDP RX-queue info */ in ixgbevf_setup_rx_resources()
3515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3516 rx_ring->queue_index, 0) < 0) in ixgbevf_setup_rx_resources()
3519 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3523 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3524 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3525 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3526 return -ENOMEM; in ixgbevf_setup_rx_resources()
3530 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3543 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_setup_all_rx_resources()
3544 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3547 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); in ixgbevf_setup_all_rx_resources()
3554 while (i--) in ixgbevf_setup_all_rx_resources()
3555 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3560 * ixgbevf_free_rx_resources - Free Rx Resources
3563 * Free all receive software resources
3569 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3570 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3571 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3572 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3574 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3575 rx_ring->dma); in ixgbevf_free_rx_resources()
3577 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3581 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3584 * Free all receive software resources
3590 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_free_all_rx_resources()
3591 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3592 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
3596 * ixgbevf_open - Called when a network interface is made active
3610 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_open()
3619 if (!adapter->num_msix_vectors) in ixgbevf_open()
3620 return -ENOMEM; in ixgbevf_open()
3622 if (hw->adapter_stopped) { in ixgbevf_open()
3627 if (hw->adapter_stopped) { in ixgbevf_open()
3629 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); in ixgbevf_open()
3635 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) in ixgbevf_open()
3636 return -EBUSY; in ixgbevf_open()
3657 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); in ixgbevf_open()
3661 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); in ixgbevf_open()
3683 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3698 * ixgbevf_close - Disables a network interface
3703 * The close entry point is called when an interface is de-activated
3720 struct net_device *dev = adapter->netdev; in ixgbevf_queue_reset_subtask()
3723 &adapter->state)) in ixgbevf_queue_reset_subtask()
3727 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_queue_reset_subtask()
3728 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_queue_reset_subtask()
3754 u16 i = tx_ring->next_to_use; in ixgbevf_tx_ctxtdesc()
3759 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbevf_tx_ctxtdesc()
3764 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in ixgbevf_tx_ctxtdesc()
3765 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); in ixgbevf_tx_ctxtdesc()
3766 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in ixgbevf_tx_ctxtdesc()
3767 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in ixgbevf_tx_ctxtdesc()
3776 struct sk_buff *skb = first->skb; in ixgbevf_tso()
3790 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tso()
3800 if (eth_p_mpls(first->protocol)) in ixgbevf_tso()
3810 if (ip.v4->version == 4) { in ixgbevf_tso()
3812 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in ixgbevf_tso()
3813 int len = csum_start - trans_start; in ixgbevf_tso()
3819 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? in ixgbevf_tso()
3824 ip.v4->tot_len = 0; in ixgbevf_tso()
3825 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3829 ip.v6->payload_len = 0; in ixgbevf_tso()
3830 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3835 l4_offset = l4.hdr - skb->data; in ixgbevf_tso()
3838 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in ixgbevf_tso()
3841 paylen = skb->len - l4_offset; in ixgbevf_tso()
3842 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in ixgbevf_tso()
3845 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbevf_tso()
3846 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbevf_tso()
3849 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; in ixgbevf_tso()
3850 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; in ixgbevf_tso()
3853 fceof_saidx |= itd->pfsa; in ixgbevf_tso()
3854 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tso()
3857 vlan_macip_lens = l4.hdr - ip.hdr; in ixgbevf_tso()
3858 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; in ixgbevf_tso()
3859 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tso()
3871 struct sk_buff *skb = first->skb; in ixgbevf_tx_csum()
3876 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tx_csum()
3879 switch (skb->csum_offset) { in ixgbevf_tx_csum()
3897 if (first->protocol == htons(ETH_P_IP)) in ixgbevf_tx_csum()
3901 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; in ixgbevf_tx_csum()
3902 vlan_macip_lens = skb_checksum_start_offset(skb) - in ixgbevf_tx_csum()
3907 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tx_csum()
3909 fceof_saidx |= itd->pfsa; in ixgbevf_tx_csum()
3910 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tx_csum()
3960 tx_desc->read.olinfo_status = olinfo_status; in ixgbevf_tx_olinfo_status()
3967 struct sk_buff *skb = first->skb; in ixgbevf_tx_map()
3973 u32 tx_flags = first->tx_flags; in ixgbevf_tx_map()
3975 u16 i = tx_ring->next_to_use; in ixgbevf_tx_map()
3979 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); in ixgbevf_tx_map()
3982 data_len = skb->data_len; in ixgbevf_tx_map()
3984 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbevf_tx_map()
3988 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ixgbevf_tx_map()
3989 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3996 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
3999 tx_desc->read.cmd_type_len = in ixgbevf_tx_map()
4004 if (i == tx_ring->count) { in ixgbevf_tx_map()
4008 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4011 size -= IXGBE_MAX_DATA_PER_TXD; in ixgbevf_tx_map()
4013 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
4019 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); in ixgbevf_tx_map()
4023 if (i == tx_ring->count) { in ixgbevf_tx_map()
4027 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4030 data_len -= size; in ixgbevf_tx_map()
4032 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbevf_tx_map()
4035 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4040 tx_desc->read.cmd_type_len = cmd_type; in ixgbevf_tx_map()
4043 first->time_stamp = jiffies; in ixgbevf_tx_map()
4048 * are new descriptors to fetch. (Only applicable for weak-ordered in ixgbevf_tx_map()
4049 * memory model archs, such as IA-64). in ixgbevf_tx_map()
4057 first->next_to_watch = tx_desc; in ixgbevf_tx_map()
4060 if (i == tx_ring->count) in ixgbevf_tx_map()
4063 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4070 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbevf_tx_map()
4071 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4076 dma_unmap_page(tx_ring->dev, in ixgbevf_tx_map()
4082 if (i-- == 0) in ixgbevf_tx_map()
4083 i += tx_ring->count; in ixgbevf_tx_map()
4084 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4088 dma_unmap_single(tx_ring->dev, in ixgbevf_tx_map()
4094 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_tx_map()
4095 tx_buffer->skb = NULL; in ixgbevf_tx_map()
4097 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4102 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4113 return -EBUSY; in __ixgbevf_maybe_stop_tx()
4115 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ixgbevf_maybe_stop_tx()
4116 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4117 ++tx_ring->tx_stats.restart_queue; in __ixgbevf_maybe_stop_tx()
4155 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in ixgbevf_xmit_frame_ring()
4156 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in ixgbevf_xmit_frame_ring()
4161 count += skb_shinfo(skb)->nr_frags; in ixgbevf_xmit_frame_ring()
4164 tx_ring->tx_stats.tx_busy++; in ixgbevf_xmit_frame_ring()
4169 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbevf_xmit_frame_ring()
4170 first->skb = skb; in ixgbevf_xmit_frame_ring()
4171 first->bytecount = skb->len; in ixgbevf_xmit_frame_ring()
4172 first->gso_segs = 1; in ixgbevf_xmit_frame_ring()
4181 first->tx_flags = tx_flags; in ixgbevf_xmit_frame_ring()
4182 first->protocol = vlan_get_protocol(skb); in ixgbevf_xmit_frame_ring()
4201 dev_kfree_skb_any(first->skb); in ixgbevf_xmit_frame_ring()
4202 first->skb = NULL; in ixgbevf_xmit_frame_ring()
4212 if (skb->len <= 0) { in ixgbevf_xmit_frame()
4220 if (skb->len < 17) { in ixgbevf_xmit_frame()
4223 skb->len = 17; in ixgbevf_xmit_frame()
4226 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
4231 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4240 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_mac()
4244 if (!is_valid_ether_addr(addr->sa_data)) in ixgbevf_set_mac()
4245 return -EADDRNOTAVAIL; in ixgbevf_set_mac()
4247 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4249 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); in ixgbevf_set_mac()
4251 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4254 return -EPERM; in ixgbevf_set_mac()
4256 ether_addr_copy(hw->mac.addr, addr->sa_data); in ixgbevf_set_mac()
4257 ether_addr_copy(hw->mac.perm_addr, addr->sa_data); in ixgbevf_set_mac()
4258 eth_hw_addr_set(netdev, addr->sa_data); in ixgbevf_set_mac()
4264 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4273 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_change_mtu()
4278 if (adapter->xdp_prog) { in ixgbevf_change_mtu()
4279 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); in ixgbevf_change_mtu()
4280 return -EPERM; in ixgbevf_change_mtu()
4283 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4285 ret = hw->mac.ops.set_rlpml(hw, max_frame); in ixgbevf_change_mtu()
4286 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4288 return -EINVAL; in ixgbevf_change_mtu()
4291 netdev->mtu, new_mtu); in ixgbevf_change_mtu()
4294 WRITE_ONCE(netdev->mtu, new_mtu); in ixgbevf_change_mtu()
4326 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_resume()
4328 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_resume()
4348 ixgbevf_suspend(&pdev->dev); in ixgbevf_shutdown()
4359 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_tx_ring_stats()
4360 bytes = ring->stats.bytes; in ixgbevf_get_tx_ring_stats()
4361 packets = ring->stats.packets; in ixgbevf_get_tx_ring_stats()
4362 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_tx_ring_stats()
4363 stats->tx_bytes += bytes; in ixgbevf_get_tx_ring_stats()
4364 stats->tx_packets += packets; in ixgbevf_get_tx_ring_stats()
4379 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; in ixgbevf_get_stats()
4382 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_get_stats()
4383 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4385 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_stats()
4386 bytes = ring->stats.bytes; in ixgbevf_get_stats()
4387 packets = ring->stats.packets; in ixgbevf_get_stats()
4388 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_stats()
4389 stats->rx_bytes += bytes; in ixgbevf_get_stats()
4390 stats->rx_packets += packets; in ixgbevf_get_stats()
4393 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_stats()
4394 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()
4398 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_get_stats()
4399 ring = adapter->xdp_ring[i]; in ixgbevf_get_stats()
4423 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in ixgbevf_features_check()
4433 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in ixgbevf_features_check()
4441 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in ixgbevf_xdp_setup()
4446 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_xdp_setup()
4447 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4450 return -EINVAL; in ixgbevf_xdp_setup()
4453 old_prog = xchg(&adapter->xdp_prog, prog); in ixgbevf_xdp_setup()
4470 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_xdp_setup()
4471 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()
4482 switch (xdp->command) { in ixgbevf_xdp()
4484 return ixgbevf_xdp_setup(dev, xdp->prog); in ixgbevf_xdp()
4486 return -EINVAL; in ixgbevf_xdp()
4508 dev->netdev_ops = &ixgbevf_netdev_ops; in ixgbevf_assign_netdev_ops()
4510 dev->watchdog_timeo = 5 * HZ; in ixgbevf_assign_netdev_ops()
4514 * ixgbevf_probe - Device Initialization Routine
4529 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; in ixgbevf_probe()
4537 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ixgbevf_probe()
4539 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); in ixgbevf_probe()
4545 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); in ixgbevf_probe()
4554 err = -ENOMEM; in ixgbevf_probe()
4558 SET_NETDEV_DEV(netdev, &pdev->dev); in ixgbevf_probe()
4562 adapter->netdev = netdev; in ixgbevf_probe()
4563 adapter->pdev = pdev; in ixgbevf_probe()
4564 hw = &adapter->hw; in ixgbevf_probe()
4565 hw->back = adapter; in ixgbevf_probe()
4566 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbevf_probe()
4573 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), in ixgbevf_probe()
4575 adapter->io_addr = hw->hw_addr; in ixgbevf_probe()
4576 if (!hw->hw_addr) { in ixgbevf_probe()
4577 err = -EIO; in ixgbevf_probe()
4584 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); in ixgbevf_probe()
4585 hw->mac.type = ii->mac; in ixgbevf_probe()
4587 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, in ixgbevf_probe()
4596 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_probe()
4598 err = -EIO; in ixgbevf_probe()
4602 netdev->hw_features = NETIF_F_SG | in ixgbevf_probe()
4616 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4617 netdev->hw_features |= NETIF_F_GSO_PARTIAL | in ixgbevf_probe()
4620 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in ixgbevf_probe()
4622 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in ixgbevf_probe()
4623 netdev->mpls_features |= NETIF_F_SG | in ixgbevf_probe()
4627 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4628 netdev->hw_enc_features |= netdev->vlan_features; in ixgbevf_probe()
4631 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in ixgbevf_probe()
4635 netdev->priv_flags |= IFF_UNICAST_FLT; in ixgbevf_probe()
4636 netdev->xdp_features = NETDEV_XDP_ACT_BASIC; in ixgbevf_probe()
4638 /* MTU range: 68 - 1504 or 9710 */ in ixgbevf_probe()
4639 netdev->min_mtu = ETH_MIN_MTU; in ixgbevf_probe()
4640 switch (adapter->hw.api_version) { in ixgbevf_probe()
4646 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4650 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) in ixgbevf_probe()
4651 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4654 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; in ixgbevf_probe()
4658 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_probe()
4659 err = -EIO; in ixgbevf_probe()
4663 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); in ixgbevf_probe()
4665 INIT_WORK(&adapter->service_task, ixgbevf_service_task); in ixgbevf_probe()
4666 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); in ixgbevf_probe()
4667 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_probe()
4673 strcpy(netdev->name, "eth%d"); in ixgbevf_probe()
4686 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); in ixgbevf_probe()
4687 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); in ixgbevf_probe()
4689 switch (hw->mac.type) { in ixgbevf_probe()
4691 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); in ixgbevf_probe()
4694 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); in ixgbevf_probe()
4698 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); in ixgbevf_probe()
4708 iounmap(adapter->io_addr); in ixgbevf_probe()
4709 kfree(adapter->rss_key); in ixgbevf_probe()
4711 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_probe()
4723 * ixgbevf_remove - Device Removal Routine
4728 * Hot-Plug event, or because the driver is going to be removed from
4742 set_bit(__IXGBEVF_REMOVING, &adapter->state); in ixgbevf_remove()
4743 cancel_work_sync(&adapter->service_task); in ixgbevf_remove()
4745 if (netdev->reg_state == NETREG_REGISTERED) in ixgbevf_remove()
4752 iounmap(adapter->io_addr); in ixgbevf_remove()
4755 hw_dbg(&adapter->hw, "Remove complete\n"); in ixgbevf_remove()
4757 kfree(adapter->rss_key); in ixgbevf_remove()
4758 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_remove()
4766 * ixgbevf_io_error_detected - called when PCI error is detected
4779 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_io_error_detected()
4793 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) in ixgbevf_io_error_detected()
4802 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4805 * Restart the card from scratch, as if from a cold-boot. Implementation
4806 * resembles the first-half of the ixgbevf_resume routine.
4814 dev_err(&pdev->dev, in ixgbevf_io_slot_reset()
4815 "Cannot re-enable PCI device after reset.\n"); in ixgbevf_io_slot_reset()
4819 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_io_slot_reset()
4821 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_io_slot_reset()
4830 * ixgbevf_io_resume - called when traffic can start flowing again.
4835 * second-half of the ixgbevf_resume routine.
4872 * ixgbevf_init_module - Driver Registration Routine
4886 return -ENOMEM; in ixgbevf_init_module()
4901 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4917 * ixgbevf_get_hw_dev_name - return device name string
4923 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_get_hw_dev_name()
4925 return adapter->netdev->name; in ixgbevf_get_hw_dev_name()