Lines Matching +full:mss +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0
33 static int debug = -1;
81 struct net_device *dev = adapter->netdev; in igc_reset()
82 struct igc_hw *hw = &adapter->hw; in igc_reset()
83 struct igc_fc_info *fc = &hw->fc; in igc_reset()
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igc_reset()
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igc_reset()
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igc_reset()
100 fc->low_water = fc->high_water - 16; in igc_reset()
101 fc->pause_time = 0xFFFF; in igc_reset()
102 fc->send_xon = 1; in igc_reset()
103 fc->current_mode = fc->requested_mode; in igc_reset()
105 hw->mac.ops.reset_hw(hw); in igc_reset()
107 if (hw->mac.ops.init_hw(hw)) in igc_reset()
110 /* Re-establish EEE setting */ in igc_reset()
113 if (!netif_running(adapter->netdev)) in igc_reset()
114 igc_power_down_phy_copper_base(&adapter->hw); in igc_reset()
119 /* Re-enable PTP, where applicable. */ in igc_reset()
122 /* Re-enable TSN offloading, where applicable. */ in igc_reset()
129 * igc_power_up_link - Power up the phy link
134 igc_reset_phy(&adapter->hw); in igc_power_up_link()
136 igc_power_up_phy_copper(&adapter->hw); in igc_power_up_link()
138 igc_setup_link(&adapter->hw); in igc_power_up_link()
142 * igc_release_hw_control - release control of the h/w to f/w
151 struct igc_hw *hw = &adapter->hw; in igc_release_hw_control()
154 if (!pci_device_is_present(adapter->pdev)) in igc_release_hw_control()
164 * igc_get_hw_control - get control of the h/w from f/w
173 struct igc_hw *hw = &adapter->hw; in igc_get_hw_control()
191 * igc_clean_tx_ring - Free Tx Buffers
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
203 switch (tx_buffer->type) { in igc_clean_tx_ring()
208 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
212 dev_kfree_skb_any(tx_buffer->skb); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
221 eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
240 tx_buffer->next_to_watch = NULL; in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
258 memset(tx_ring->tx_buffer_info, 0, in igc_clean_tx_ring()
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); in igc_clean_tx_ring()
262 memset(tx_ring->desc, 0, tx_ring->size); in igc_clean_tx_ring()
265 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
266 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
270 * igc_free_tx_resources - Free Tx Resources per Queue
279 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
280 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
283 if (!tx_ring->desc) in igc_free_tx_resources()
286 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
287 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
289 tx_ring->desc = NULL; in igc_free_tx_resources()
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources()
303 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings()
315 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
316 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw()
322 u8 idx = ring->reg_idx; in igc_disable_tx_ring_hw()
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw()
340 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_disable_all_tx_rings_hw()
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
354 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
355 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
359 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
360 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
365 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
368 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
370 if (!tx_ring->desc) in igc_setup_tx_resources()
373 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
374 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
379 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
381 return -ENOMEM; in igc_setup_tx_resources()
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
392 struct net_device *dev = adapter->netdev; in igc_setup_all_tx_resources()
395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources()
396 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
399 for (i--; i >= 0; i--) in igc_setup_all_tx_resources()
400 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
410 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring_page_shared()
412 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring_page_shared()
413 rx_ring->skb = NULL; in igc_clean_rx_ring_page_shared()
416 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring_page_shared()
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring_page_shared()
422 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring_page_shared()
423 buffer_info->dma, in igc_clean_rx_ring_page_shared()
424 buffer_info->page_offset, in igc_clean_rx_ring_page_shared()
429 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring_page_shared()
430 buffer_info->dma, in igc_clean_rx_ring_page_shared()
434 __page_frag_cache_drain(buffer_info->page, in igc_clean_rx_ring_page_shared()
435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared()
438 if (i == rx_ring->count) in igc_clean_rx_ring_page_shared()
448 for (i = 0; i < ring->count; i++) { in igc_clean_rx_ring_xsk_pool()
449 bi = &ring->rx_buffer_info[i]; in igc_clean_rx_ring_xsk_pool()
450 if (!bi->xdp) in igc_clean_rx_ring_xsk_pool()
453 xsk_buff_free(bi->xdp); in igc_clean_rx_ring_xsk_pool()
454 bi->xdp = NULL; in igc_clean_rx_ring_xsk_pool()
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
464 if (ring->xsk_pool) in igc_clean_rx_ring()
471 ring->next_to_alloc = 0; in igc_clean_rx_ring()
472 ring->next_to_clean = 0; in igc_clean_rx_ring()
473 ring->next_to_use = 0; in igc_clean_rx_ring()
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
484 for (i = 0; i < adapter->num_rx_queues; i++) in igc_clean_all_rx_rings()
485 if (adapter->rx_ring[i]) in igc_clean_all_rx_rings()
486 igc_clean_rx_ring(adapter->rx_ring[i]); in igc_clean_all_rx_rings()
490 * igc_free_rx_resources - Free Rx Resources
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_free_rx_resources()
501 vfree(rx_ring->rx_buffer_info); in igc_free_rx_resources()
502 rx_ring->rx_buffer_info = NULL; in igc_free_rx_resources()
505 if (!rx_ring->desc) in igc_free_rx_resources()
508 dma_free_coherent(rx_ring->dev, rx_ring->size, in igc_free_rx_resources()
509 rx_ring->desc, rx_ring->dma); in igc_free_rx_resources()
511 rx_ring->desc = NULL; in igc_free_rx_resources()
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
524 for (i = 0; i < adapter->num_rx_queues; i++) in igc_free_all_rx_resources()
525 igc_free_rx_resources(adapter->rx_ring[i]); in igc_free_all_rx_resources()
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
536 struct net_device *ndev = rx_ring->netdev; in igc_setup_rx_resources()
537 struct device *dev = rx_ring->dev; in igc_setup_rx_resources()
538 u8 index = rx_ring->queue_index; in igc_setup_rx_resources()
541 /* XDP RX-queue info */ in igc_setup_rx_resources()
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igc_setup_rx_resources()
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, in igc_setup_rx_resources()
545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources()
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; in igc_setup_rx_resources()
553 rx_ring->rx_buffer_info = vzalloc(size); in igc_setup_rx_resources()
554 if (!rx_ring->rx_buffer_info) in igc_setup_rx_resources()
560 rx_ring->size = rx_ring->count * desc_len; in igc_setup_rx_resources()
561 rx_ring->size = ALIGN(rx_ring->size, 4096); in igc_setup_rx_resources()
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igc_setup_rx_resources()
564 &rx_ring->dma, GFP_KERNEL); in igc_setup_rx_resources()
566 if (!rx_ring->desc) in igc_setup_rx_resources()
569 rx_ring->next_to_alloc = 0; in igc_setup_rx_resources()
570 rx_ring->next_to_clean = 0; in igc_setup_rx_resources()
571 rx_ring->next_to_use = 0; in igc_setup_rx_resources()
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
577 vfree(rx_ring->rx_buffer_info); in igc_setup_rx_resources()
578 rx_ring->rx_buffer_info = NULL; in igc_setup_rx_resources()
580 return -ENOMEM; in igc_setup_rx_resources()
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 struct net_device *dev = adapter->netdev; in igc_setup_all_rx_resources()
595 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_setup_all_rx_resources()
596 err = igc_setup_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
599 for (i--; i >= 0; i--) in igc_setup_all_rx_resources()
600 igc_free_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) in igc_get_xsk_pool()
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); in igc_get_xsk_pool()
619 * igc_configure_rx_ring - Configure a receive ring after Reset
628 struct igc_hw *hw = &adapter->hw; in igc_configure_rx_ring()
630 int reg_idx = ring->reg_idx; in igc_configure_rx_ring()
632 u64 rdba = ring->dma; in igc_configure_rx_ring()
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igc_configure_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
659 ring->count * sizeof(union igc_adv_rx_desc)); in igc_configure_rx_ring()
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); in igc_configure_rx_ring()
664 writel(0, ring->tail); in igc_configure_rx_ring()
666 /* reset next-to- use/clean to place SW in sync with hardware */ in igc_configure_rx_ring()
667 ring->next_to_clean = 0; in igc_configure_rx_ring()
668 ring->next_to_use = 0; in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
691 memset(ring->rx_buffer_info, 0, in igc_configure_rx_ring()
692 sizeof(struct igc_rx_buffer) * ring->count); in igc_configure_rx_ring()
696 rx_desc->wb.upper.length = 0; in igc_configure_rx_ring()
705 * igc_configure_rx - Configure receive Unit after Reset
717 for (i = 0; i < adapter->num_rx_queues; i++) in igc_configure_rx()
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); in igc_configure_rx()
722 * igc_configure_tx_ring - Configure transmit ring after Reset
731 struct igc_hw *hw = &adapter->hw; in igc_configure_tx_ring()
732 int reg_idx = ring->reg_idx; in igc_configure_tx_ring()
733 u64 tdba = ring->dma; in igc_configure_tx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
743 ring->count * sizeof(union igc_adv_tx_desc)); in igc_configure_tx_ring()
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); in igc_configure_tx_ring()
750 writel(0, ring->tail); in igc_configure_tx_ring()
761 * igc_configure_tx - Configure transmit Unit after Reset
770 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx()
771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
775 * igc_setup_mrqc - configure the multiple receive queue control registers
780 struct igc_hw *hw = &adapter->hw; in igc_setup_mrqc()
789 num_rx_queues = adapter->rss_queues; in igc_setup_mrqc()
791 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igc_setup_mrqc()
793 adapter->rss_indir_tbl[j] = in igc_setup_mrqc()
795 adapter->rss_indir_tbl_init = num_rx_queues; in igc_setup_mrqc()
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) in igc_setup_mrqc()
823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) in igc_setup_mrqc()
832 * igc_setup_rctl - configure the receive control registers
837 struct igc_hw *hw = &adapter->hw; in igc_setup_rctl()
846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); in igc_setup_rctl()
859 /* disable queue 0 to prevent tail write w/o re-config */ in igc_setup_rctl()
863 if (adapter->netdev->features & NETIF_F_RXALL) { in igc_setup_rctl()
879 * igc_setup_tctl - configure the transmit control registers
884 struct igc_hw *hw = &adapter->hw; in igc_setup_tctl()
903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
908 * @queue: If non-negative, queue assignment feature is enabled and frames
916 struct net_device *dev = adapter->netdev; in igc_set_mac_filter_hw()
917 struct igc_hw *hw = &adapter->hw; in igc_set_mac_filter_hw()
920 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_set_mac_filter_hw()
946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
952 struct net_device *dev = adapter->netdev; in igc_clear_mac_filter_hw()
953 struct igc_hw *hw = &adapter->hw; in igc_clear_mac_filter_hw()
955 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_clear_mac_filter_hw()
967 struct net_device *dev = adapter->netdev; in igc_set_default_mac_filter()
968 u8 *addr = adapter->hw.mac.addr; in igc_set_default_mac_filter()
972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
976 * igc_set_mac - Change the Ethernet Address of the NIC
985 struct igc_hw *hw = &adapter->hw; in igc_set_mac()
988 if (!is_valid_ether_addr(addr->sa_data)) in igc_set_mac()
989 return -EADDRNOTAVAIL; in igc_set_mac()
991 eth_hw_addr_set(netdev, addr->sa_data); in igc_set_mac()
992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igc_set_mac()
1001 * igc_write_mc_addr_list - write multicast addresses to MTA
1005 * Returns: -ENOMEM on failure
1012 struct igc_hw *hw = &adapter->hw; in igc_write_mc_addr_list()
1025 return -ENOMEM; in igc_write_mc_addr_list()
1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igc_write_mc_addr_list()
1041 struct igc_adapter *adapter = netdev_priv(ring->netdev); in igc_tx_launchtime()
1042 ktime_t cycle_time = adapter->cycle_time; in igc_tx_launchtime()
1043 ktime_t base_time = adapter->base_time; in igc_tx_launchtime()
1055 if (baset_est != ring->last_ff_cycle) { in igc_tx_launchtime()
1057 ring->last_ff_cycle = baset_est; in igc_tx_launchtime()
1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) in igc_tx_launchtime()
1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", in igc_tx_launchtime()
1073 ring->last_tx_cycle = end_of_cycle; in igc_tx_launchtime()
1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_init_empty_frame()
1094 if (dma_mapping_error(ring->dev, dma)) { in igc_init_empty_frame()
1095 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); in igc_init_empty_frame()
1096 return -ENOMEM; in igc_init_empty_frame()
1099 buffer->skb = skb; in igc_init_empty_frame()
1100 buffer->protocol = 0; in igc_init_empty_frame()
1101 buffer->bytecount = skb->len; in igc_init_empty_frame()
1102 buffer->gso_segs = 1; in igc_init_empty_frame()
1103 buffer->time_stamp = jiffies; in igc_init_empty_frame()
1104 dma_unmap_len_set(buffer, len, skb->len); in igc_init_empty_frame()
1119 return -EBUSY; in igc_init_tx_empty_descriptor()
1127 first->bytecount; in igc_init_tx_empty_descriptor()
1128 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_init_tx_empty_descriptor()
1130 desc = IGC_TX_DESC(ring, ring->next_to_use); in igc_init_tx_empty_descriptor()
1131 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_init_tx_empty_descriptor()
1132 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_init_tx_empty_descriptor()
1133 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); in igc_init_tx_empty_descriptor()
1135 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor()
1137 first->next_to_watch = desc; in igc_init_tx_empty_descriptor()
1139 ring->next_to_use++; in igc_init_tx_empty_descriptor()
1140 if (ring->next_to_use == ring->count) in igc_init_tx_empty_descriptor()
1141 ring->next_to_use = 0; in igc_init_tx_empty_descriptor()
1154 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1159 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1165 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1166 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1171 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igc_tx_ctxtdesc()
1172 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igc_tx_ctxtdesc()
1173 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igc_tx_ctxtdesc()
1174 context_desc->launch_time = launch_time; in igc_tx_ctxtdesc()
1180 struct sk_buff *skb = first->skb; in igc_tx_csum()
1184 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igc_tx_csum()
1186 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && in igc_tx_csum()
1187 !tx_ring->launchtime_enable) in igc_tx_csum()
1192 switch (skb->csum_offset) { in igc_tx_csum()
1211 first->tx_flags |= IGC_TX_FLAGS_CSUM; in igc_tx_csum()
1212 vlan_macip_lens = skb_checksum_start_offset(skb) - in igc_tx_csum()
1216 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tx_csum()
1224 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1226 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1235 return -EBUSY; in __igc_maybe_stop_tx()
1238 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1240 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1241 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1242 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1290 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1313 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_tx_olinfo_status()
1320 struct sk_buff *skb = first->skb; in igc_tx_map()
1323 u32 tx_flags = first->tx_flags; in igc_tx_map()
1325 u16 i = tx_ring->next_to_use; in igc_tx_map()
1333 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1336 data_len = skb->data_len; in igc_tx_map()
1338 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1342 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igc_tx_map()
1343 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1350 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1353 tx_desc->read.cmd_type_len = in igc_tx_map()
1358 if (i == tx_ring->count) { in igc_tx_map()
1362 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1365 size -= IGC_MAX_DATA_PER_TXD; in igc_tx_map()
1367 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1373 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igc_tx_map()
1377 if (i == tx_ring->count) { in igc_tx_map()
1381 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1384 data_len -= size; in igc_tx_map()
1386 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1389 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1394 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_tx_map()
1396 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1399 first->time_stamp = jiffies; in igc_tx_map()
1404 * are new descriptors to fetch. (Only applicable for weak-ordered in igc_tx_map()
1405 * memory model archs, such as IA-64). in igc_tx_map()
1413 first->next_to_watch = tx_desc; in igc_tx_map()
1416 if (i == tx_ring->count) in igc_tx_map()
1419 tx_ring->next_to_use = i; in igc_tx_map()
1425 writel(i, tx_ring->tail); in igc_tx_map()
1430 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1431 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1436 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1438 if (i-- == 0) in igc_tx_map()
1439 i += tx_ring->count; in igc_tx_map()
1440 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1444 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1446 dev_kfree_skb_any(tx_buffer->skb); in igc_tx_map()
1447 tx_buffer->skb = NULL; in igc_tx_map()
1449 tx_ring->next_to_use = i; in igc_tx_map()
1451 return -1; in igc_tx_map()
1460 struct sk_buff *skb = first->skb; in igc_tso()
1474 if (skb->ip_summed != CHECKSUM_PARTIAL) in igc_tso()
1491 if (ip.v4->version == 4) { in igc_tso()
1493 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igc_tso()
1498 ip.v4->check = csum_fold(csum_partial(trans_start, in igc_tso()
1499 csum_start - trans_start, in igc_tso()
1503 ip.v4->tot_len = 0; in igc_tso()
1504 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1508 ip.v6->payload_len = 0; in igc_tso()
1509 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1514 l4_offset = l4.hdr - skb->data; in igc_tso()
1517 paylen = skb->len - l4_offset; in igc_tso()
1520 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igc_tso()
1521 csum_replace_by_diff(&l4.tcp->check, in igc_tso()
1526 csum_replace_by_diff(&l4.udp->check, in igc_tso()
1531 first->gso_segs = skb_shinfo(skb)->gso_segs; in igc_tso()
1532 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1534 /* MSS L4LEN IDX */ in igc_tso()
1535 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; in igc_tso()
1536 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; in igc_tso()
1539 vlan_macip_lens = l4.hdr - ip.hdr; in igc_tso()
1540 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; in igc_tso()
1541 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tso()
1554 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; in igc_request_tx_tstamp()
1556 if (tstamp->skb) in igc_request_tx_tstamp()
1559 tstamp->skb = skb_get(skb); in igc_request_tx_tstamp()
1560 tstamp->start = jiffies; in igc_request_tx_tstamp()
1561 *flags = tstamp->flags; in igc_request_tx_tstamp()
1572 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igc_xmit_frame_ring()
1592 &skb_shinfo(skb)->frags[f])); in igc_xmit_frame_ring()
1599 if (!tx_ring->launchtime_enable) in igc_xmit_frame_ring()
1602 txtime = skb->tstamp; in igc_xmit_frame_ring()
1603 skb->tstamp = ktime_set(0, 0); in igc_xmit_frame_ring()
1611 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1629 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1630 first->type = IGC_TX_BUFFER_TYPE_SKB; in igc_xmit_frame_ring()
1631 first->skb = skb; in igc_xmit_frame_ring()
1632 first->bytecount = skb->len; in igc_xmit_frame_ring()
1633 first->gso_segs = 1; in igc_xmit_frame_ring()
1635 if (adapter->qbv_transition || tx_ring->oper_gate_closed) in igc_xmit_frame_ring()
1638 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { in igc_xmit_frame_ring()
1639 adapter->stats.txdrop++; in igc_xmit_frame_ring()
1643 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && in igc_xmit_frame_ring()
1644 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igc_xmit_frame_ring()
1648 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1650 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igc_xmit_frame_ring()
1652 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) in igc_xmit_frame_ring()
1655 adapter->tx_hwtstamp_skipped++; in igc_xmit_frame_ring()
1658 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1667 first->tx_flags = tx_flags; in igc_xmit_frame_ring()
1668 first->protocol = protocol; in igc_xmit_frame_ring()
1681 dev_kfree_skb_any(first->skb); in igc_xmit_frame_ring()
1682 first->skb = NULL; in igc_xmit_frame_ring()
1690 unsigned int r_idx = skb->queue_mapping; in igc_tx_queue_mapping()
1692 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping()
1693 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping()
1695 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
1706 if (skb->len < 17) { in igc_xmit_frame()
1709 skb->len = 17; in igc_xmit_frame()
1726 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igc_rx_checksum()
1737 if (!(skb->len == 60 && in igc_rx_checksum()
1738 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igc_rx_checksum()
1739 u64_stats_update_begin(&ring->rx_syncp); in igc_rx_checksum()
1740 ring->rx_stats.csum_err++; in igc_rx_checksum()
1741 u64_stats_update_end(&ring->rx_syncp); in igc_rx_checksum()
1749 skb->ip_summed = CHECKSUM_UNNECESSARY; in igc_rx_checksum()
1751 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", in igc_rx_checksum()
1752 le32_to_cpu(rx_desc->wb.upper.status_error)); in igc_rx_checksum()
1768 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1779 if (ring->netdev->features & NETIF_F_RXHASH) { in igc_rx_hash()
1780 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); in igc_rx_hash()
1791 struct net_device *dev = rx_ring->netdev; in igc_rx_vlan()
1794 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igc_rx_vlan()
1797 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igc_rx_vlan()
1798 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igc_rx_vlan()
1800 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igc_rx_vlan()
1807 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1826 skb_record_rx_queue(skb, rx_ring->queue_index); in igc_process_skb_fields()
1828 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igc_process_skb_fields()
1835 struct igc_hw *hw = &adapter->hw; in igc_vlan_mode()
1852 igc_vlan_mode(adapter->netdev, adapter->netdev->features); in igc_restore_vlan()
1861 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igc_get_rx_buffer()
1864 page_count(rx_buffer->page); in igc_get_rx_buffer()
1868 prefetchw(rx_buffer->page); in igc_get_rx_buffer()
1871 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_get_rx_buffer()
1872 rx_buffer->dma, in igc_get_rx_buffer()
1873 rx_buffer->page_offset, in igc_get_rx_buffer()
1877 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer()
1886 buffer->page_offset ^= truesize; in igc_rx_buffer_flip()
1888 buffer->page_offset += truesize; in igc_rx_buffer_flip()
1909 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1915 * This function will add the data contained in rx_buffer->page to the skb.
1931 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igc_add_rx_frag()
1932 rx_buffer->page_offset, size, truesize); in igc_add_rx_frag()
1941 unsigned int size = xdp->data_end - xdp->data; in igc_build_skb()
1943 unsigned int metasize = xdp->data - xdp->data_meta; in igc_build_skb()
1947 net_prefetch(xdp->data_meta); in igc_build_skb()
1950 skb = napi_build_skb(xdp->data_hard_start, truesize); in igc_build_skb()
1955 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igc_build_skb()
1968 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb()
1969 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb()
1970 unsigned int size = xdp->data_end - xdp->data; in igc_construct_skb()
1972 void *va = xdp->data; in igc_construct_skb()
1977 net_prefetch(xdp->data_meta); in igc_construct_skb()
1980 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb()
1985 if (ctx->rx_ts) { in igc_construct_skb()
1986 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb()
1987 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb()
1993 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); in igc_construct_skb()
1996 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, in igc_construct_skb()
2005 size -= headlen; in igc_construct_skb()
2007 skb_add_rx_frag(skb, 0, rx_buffer->page, in igc_construct_skb()
2008 (va + headlen) - page_address(rx_buffer->page), in igc_construct_skb()
2012 rx_buffer->pagecnt_bias++; in igc_construct_skb()
2019 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2028 u16 nta = rx_ring->next_to_alloc; in igc_reuse_rx_page()
2031 new_buff = &rx_ring->rx_buffer_info[nta]; in igc_reuse_rx_page()
2035 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igc_reuse_rx_page()
2041 new_buff->dma = old_buff->dma; in igc_reuse_rx_page()
2042 new_buff->page = old_buff->page; in igc_reuse_rx_page()
2043 new_buff->page_offset = old_buff->page_offset; in igc_reuse_rx_page()
2044 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page()
2050 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page()
2051 struct page *page = rx_buffer->page; in igc_can_reuse_rx_page()
2053 /* avoid re-using remote and pfmemalloc pages */ in igc_can_reuse_rx_page()
2059 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2063 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) in igc_can_reuse_rx_page()
2065 if (rx_buffer->page_offset > IGC_LAST_OFFSET) in igc_can_reuse_rx_page()
2074 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2075 rx_buffer->pagecnt_bias = USHRT_MAX; in igc_can_reuse_rx_page()
2082 * igc_is_non_eop - process handling of non-EOP buffers
2089 * that this is in fact a non-EOP buffer.
2094 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2097 ntc = (ntc < rx_ring->count) ? ntc : 0; in igc_is_non_eop()
2098 rx_ring->next_to_clean = ntc; in igc_is_non_eop()
2109 * igc_cleanup_headers - Correct corrupted or empty headers
2131 struct net_device *netdev = rx_ring->netdev; in igc_cleanup_headers()
2133 if (!(netdev->features & NETIF_F_RXALL)) { in igc_cleanup_headers()
2157 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igc_put_rx_buffer()
2160 __page_frag_cache_drain(rx_buffer->page, in igc_put_rx_buffer()
2161 rx_buffer->pagecnt_bias); in igc_put_rx_buffer()
2165 rx_buffer->page = NULL; in igc_put_rx_buffer()
2170 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset()
2183 struct page *page = bi->page; in igc_alloc_mapped_page()
2193 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2194 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2199 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igc_alloc_mapped_page()
2207 if (dma_mapping_error(rx_ring->dev, dma)) { in igc_alloc_mapped_page()
2210 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2211 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_alloc_mapped_page()
2215 bi->dma = dma; in igc_alloc_mapped_page()
2216 bi->page = page; in igc_alloc_mapped_page()
2217 bi->page_offset = igc_rx_offset(rx_ring); in igc_alloc_mapped_page()
2218 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2219 bi->pagecnt_bias = USHRT_MAX; in igc_alloc_mapped_page()
2225 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2232 u16 i = rx_ring->next_to_use; in igc_alloc_rx_buffers()
2241 bi = &rx_ring->rx_buffer_info[i]; in igc_alloc_rx_buffers()
2242 i -= rx_ring->count; in igc_alloc_rx_buffers()
2251 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igc_alloc_rx_buffers()
2252 bi->page_offset, bufsz, in igc_alloc_rx_buffers()
2256 * because each write-back erases this info. in igc_alloc_rx_buffers()
2258 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igc_alloc_rx_buffers()
2265 bi = rx_ring->rx_buffer_info; in igc_alloc_rx_buffers()
2266 i -= rx_ring->count; in igc_alloc_rx_buffers()
2270 rx_desc->wb.upper.length = 0; in igc_alloc_rx_buffers()
2272 cleaned_count--; in igc_alloc_rx_buffers()
2275 i += rx_ring->count; in igc_alloc_rx_buffers()
2277 if (rx_ring->next_to_use != i) { in igc_alloc_rx_buffers()
2279 rx_ring->next_to_use = i; in igc_alloc_rx_buffers()
2282 rx_ring->next_to_alloc = i; in igc_alloc_rx_buffers()
2286 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers()
2287 * such as IA-64). in igc_alloc_rx_buffers()
2290 writel(i, rx_ring->tail); in igc_alloc_rx_buffers()
2297 u16 i = ring->next_to_use; in igc_alloc_rx_buffers_zc()
2308 bi = &ring->rx_buffer_info[i]; in igc_alloc_rx_buffers_zc()
2309 i -= ring->count; in igc_alloc_rx_buffers_zc()
2312 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
2313 if (!bi->xdp) { in igc_alloc_rx_buffers_zc()
2318 dma = xsk_buff_xdp_get_dma(bi->xdp); in igc_alloc_rx_buffers_zc()
2319 desc->read.pkt_addr = cpu_to_le64(dma); in igc_alloc_rx_buffers_zc()
2326 bi = ring->rx_buffer_info; in igc_alloc_rx_buffers_zc()
2327 i -= ring->count; in igc_alloc_rx_buffers_zc()
2331 desc->wb.upper.length = 0; in igc_alloc_rx_buffers_zc()
2333 count--; in igc_alloc_rx_buffers_zc()
2336 i += ring->count; in igc_alloc_rx_buffers_zc()
2338 if (ring->next_to_use != i) { in igc_alloc_rx_buffers_zc()
2339 ring->next_to_use = i; in igc_alloc_rx_buffers_zc()
2343 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers_zc()
2344 * such as IA-64). in igc_alloc_rx_buffers_zc()
2347 writel(i, ring->tail); in igc_alloc_rx_buffers_zc()
2358 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igc_xdp_init_tx_descriptor()
2359 u16 count, index = ring->next_to_use; in igc_xdp_init_tx_descriptor()
2360 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2363 u32 olinfo_status, len = xdpf->len, cmd_type; in igc_xdp_init_tx_descriptor()
2364 void *data = xdpf->data; in igc_xdp_init_tx_descriptor()
2369 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igc_xdp_init_tx_descriptor()
2373 return -EBUSY; in igc_xdp_init_tx_descriptor()
2377 head->bytecount = xdp_get_frame_len(xdpf); in igc_xdp_init_tx_descriptor()
2378 head->type = IGC_TX_BUFFER_TYPE_XDP; in igc_xdp_init_tx_descriptor()
2379 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
2380 head->xdpf = xdpf; in igc_xdp_init_tx_descriptor()
2382 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_xdp_init_tx_descriptor()
2383 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_init_tx_descriptor()
2388 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in igc_xdp_init_tx_descriptor()
2389 if (dma_mapping_error(ring->dev, dma)) { in igc_xdp_init_tx_descriptor()
2390 netdev_err_once(ring->netdev, in igc_xdp_init_tx_descriptor()
2401 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_init_tx_descriptor()
2402 desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_init_tx_descriptor()
2404 buffer->protocol = 0; in igc_xdp_init_tx_descriptor()
2406 if (++index == ring->count) in igc_xdp_init_tx_descriptor()
2412 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2414 desc->read.olinfo_status = 0; in igc_xdp_init_tx_descriptor()
2416 data = skb_frag_address(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2417 len = skb_frag_size(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2420 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); in igc_xdp_init_tx_descriptor()
2422 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor()
2424 head->time_stamp = jiffies; in igc_xdp_init_tx_descriptor()
2426 head->next_to_watch = desc; in igc_xdp_init_tx_descriptor()
2427 ring->next_to_use = index; in igc_xdp_init_tx_descriptor()
2433 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2435 dma_unmap_page(ring->dev, in igc_xdp_init_tx_descriptor()
2444 index += ring->count; in igc_xdp_init_tx_descriptor()
2445 index--; in igc_xdp_init_tx_descriptor()
2448 return -ENOMEM; in igc_xdp_init_tx_descriptor()
2459 while (index >= adapter->num_tx_queues) in igc_xdp_get_tx_ring()
2460 index -= adapter->num_tx_queues; in igc_xdp_get_tx_ring()
2462 return adapter->tx_ring[index]; in igc_xdp_get_tx_ring()
2474 return -EFAULT; in igc_xdp_xmit_back()
2502 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) in __igc_xdp_run_prog()
2507 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2511 trace_xdp_exception(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2524 prog = READ_ONCE(adapter->xdp_prog); in igc_xdp_run_prog()
2533 return ERR_PTR(-res); in igc_xdp_run_prog()
2544 writel(ring->next_to_use, ring->tail); in igc_flush_tx_descriptors()
2569 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats()
2571 u64_stats_update_begin(&ring->rx_syncp); in igc_update_rx_stats()
2572 ring->rx_stats.packets += packets; in igc_update_rx_stats()
2573 ring->rx_stats.bytes += bytes; in igc_update_rx_stats()
2574 u64_stats_update_end(&ring->rx_syncp); in igc_update_rx_stats()
2576 q_vector->rx.total_packets += packets; in igc_update_rx_stats()
2577 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats()
2583 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq()
2584 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2585 struct sk_buff *skb = rx_ring->skb; in igc_clean_rx_irq()
2603 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); in igc_clean_rx_irq()
2604 size = le16_to_cpu(rx_desc->wb.upper.length); in igc_clean_rx_irq()
2617 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igc_clean_rx_irq()
2622 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq()
2626 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); in igc_clean_rx_irq()
2627 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), in igc_clean_rx_irq()
2637 unsigned int xdp_res = -PTR_ERR(skb); in igc_clean_rx_irq()
2641 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2661 rx_ring->rx_stats.alloc_failed++; in igc_clean_rx_irq()
2662 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2663 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_clean_rx_irq()
2670 /* fetch next buffer in frame if non-eop */ in igc_clean_rx_irq()
2681 total_bytes += skb->len; in igc_clean_rx_irq()
2686 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2699 rx_ring->skb = skb; in igc_clean_rx_irq()
2712 unsigned int totalsize = xdp->data_end - xdp->data_meta; in igc_construct_skb_zc()
2713 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb_zc()
2716 net_prefetch(xdp->data_meta); in igc_construct_skb_zc()
2718 skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); in igc_construct_skb_zc()
2722 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in igc_construct_skb_zc()
2738 struct igc_ring *ring = q_vector->rx.ring; in igc_dispatch_skb_zc()
2743 ring->rx_stats.alloc_failed++; in igc_dispatch_skb_zc()
2744 set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags); in igc_dispatch_skb_zc()
2749 skb_hwtstamps(skb)->hwtstamp = timestamp; in igc_dispatch_skb_zc()
2755 napi_gro_receive(&q_vector->napi, skb); in igc_dispatch_skb_zc()
2762 * igc_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_igc_ctx()
2769 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq_zc()
2770 struct igc_ring *ring = q_vector->rx.ring; in igc_clean_rx_irq_zc()
2773 u16 ntc = ring->next_to_clean; in igc_clean_rx_irq_zc()
2780 prog = READ_ONCE(adapter->xdp_prog); in igc_clean_rx_irq_zc()
2791 size = le16_to_cpu(desc->wb.upper.length); in igc_clean_rx_irq_zc()
2801 bi = &ring->rx_buffer_info[ntc]; in igc_clean_rx_irq_zc()
2803 ctx = xsk_buff_to_igc_ctx(bi->xdp); in igc_clean_rx_irq_zc()
2804 ctx->rx_desc = desc; in igc_clean_rx_irq_zc()
2807 ctx->rx_ts = bi->xdp->data; in igc_clean_rx_irq_zc()
2809 bi->xdp->data += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2814 bi->xdp->data_meta += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2815 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2818 bi->xdp->data_end = bi->xdp->data + size; in igc_clean_rx_irq_zc()
2819 xsk_buff_dma_sync_for_cpu(bi->xdp); in igc_clean_rx_irq_zc()
2821 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); in igc_clean_rx_irq_zc()
2824 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); in igc_clean_rx_irq_zc()
2827 xsk_buff_free(bi->xdp); in igc_clean_rx_irq_zc()
2835 bi->xdp = NULL; in igc_clean_rx_irq_zc()
2840 if (ntc == ring->count) in igc_clean_rx_irq_zc()
2844 ring->next_to_clean = ntc; in igc_clean_rx_irq_zc()
2855 if (xsk_uses_need_wakeup(ring->xsk_pool)) { in igc_clean_rx_irq_zc()
2856 if (failure || ring->next_to_clean == ring->next_to_use) in igc_clean_rx_irq_zc()
2857 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2859 xsk_clear_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2869 struct igc_ring *ring = q_vector->tx.ring; in igc_update_tx_stats()
2871 u64_stats_update_begin(&ring->tx_syncp); in igc_update_tx_stats()
2872 ring->tx_stats.bytes += bytes; in igc_update_tx_stats()
2873 ring->tx_stats.packets += packets; in igc_update_tx_stats()
2874 u64_stats_update_end(&ring->tx_syncp); in igc_update_tx_stats()
2876 q_vector->tx.total_bytes += bytes; in igc_update_tx_stats()
2877 q_vector->tx.total_packets += packets; in igc_update_tx_stats()
2883 struct igc_ring *tx_ring = meta_req->tx_ring; in igc_xsk_request_timestamp()
2891 if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { in igc_xsk_request_timestamp()
2892 adapter = netdev_priv(tx_ring->netdev); in igc_xsk_request_timestamp()
2894 spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2898 tstamp = &adapter->tx_tstamp[i]; in igc_xsk_request_timestamp()
2900 /* tstamp->skb and tstamp->xsk_tx_buffer are in union. in igc_xsk_request_timestamp()
2901 * When tstamp->skb is equal to NULL, in igc_xsk_request_timestamp()
2902 * tstamp->xsk_tx_buffer is equal to NULL as well. in igc_xsk_request_timestamp()
2906 if (!tstamp->skb) { in igc_xsk_request_timestamp()
2914 adapter->tx_hwtstamp_skipped++; in igc_xsk_request_timestamp()
2915 spin_unlock_irqrestore(&adapter->ptp_tx_lock, in igc_xsk_request_timestamp()
2920 tstamp->start = jiffies; in igc_xsk_request_timestamp()
2921 tstamp->xsk_queue_index = tx_ring->queue_index; in igc_xsk_request_timestamp()
2922 tstamp->xsk_tx_buffer = meta_req->tx_buffer; in igc_xsk_request_timestamp()
2923 tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; in igc_xsk_request_timestamp()
2926 meta_req->tx_buffer->xsk_pending_ts = true; in igc_xsk_request_timestamp()
2932 xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); in igc_xsk_request_timestamp()
2935 tx_flags |= tstamp->flags; in igc_xsk_request_timestamp()
2936 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2939 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2942 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2945 meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, in igc_xsk_request_timestamp()
2949 spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); in igc_xsk_request_timestamp()
2965 struct xsk_buff_pool *pool = ring->xsk_pool; in igc_xdp_xmit_zc()
2972 if (!netif_carrier_ok(ring->netdev)) in igc_xdp_xmit_zc()
2980 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
2983 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { in igc_xdp_xmit_zc()
2999 bi = &ring->tx_buffer_info[ntu]; in igc_xdp_xmit_zc()
3008 tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); in igc_xdp_xmit_zc()
3009 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_xmit_zc()
3010 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_xmit_zc()
3012 bi->type = IGC_TX_BUFFER_TYPE_XSK; in igc_xdp_xmit_zc()
3013 bi->protocol = 0; in igc_xdp_xmit_zc()
3014 bi->bytecount = xdp_desc.len; in igc_xdp_xmit_zc()
3015 bi->gso_segs = 1; in igc_xdp_xmit_zc()
3016 bi->time_stamp = jiffies; in igc_xdp_xmit_zc()
3017 bi->next_to_watch = tx_desc; in igc_xdp_xmit_zc()
3022 if (ntu == ring->count) in igc_xdp_xmit_zc()
3026 ring->next_to_use = ntu; in igc_xdp_xmit_zc()
3036 * igc_clean_tx_irq - Reclaim resources after transmit completes
3044 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
3046 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
3047 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq()
3048 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
3053 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_clean_tx_irq()
3056 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
3058 i -= tx_ring->count; in igc_clean_tx_irq()
3061 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_irq()
3071 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) in igc_clean_tx_irq()
3077 if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && in igc_clean_tx_irq()
3078 tx_buffer->xsk_pending_ts) in igc_clean_tx_irq()
3082 tx_buffer->next_to_watch = NULL; in igc_clean_tx_irq()
3085 total_bytes += tx_buffer->bytecount; in igc_clean_tx_irq()
3086 total_packets += tx_buffer->gso_segs; in igc_clean_tx_irq()
3088 switch (tx_buffer->type) { in igc_clean_tx_irq()
3093 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_irq()
3094 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3097 napi_consume_skb(tx_buffer->skb, napi_budget); in igc_clean_tx_irq()
3098 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3101 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
3111 i -= tx_ring->count; in igc_clean_tx_irq()
3112 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3118 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3126 i -= tx_ring->count; in igc_clean_tx_irq()
3127 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3135 budget--; in igc_clean_tx_irq()
3141 i += tx_ring->count; in igc_clean_tx_irq()
3142 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
3146 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
3148 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
3149 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
3150 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
3154 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
3155 struct igc_hw *hw = &adapter->hw; in igc_clean_tx_irq()
3160 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
3161 if (tx_buffer->next_to_watch && in igc_clean_tx_irq()
3162 time_after(jiffies, tx_buffer->time_stamp + in igc_clean_tx_irq()
3163 (adapter->tx_timeout_factor * HZ)) && in igc_clean_tx_irq()
3165 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && in igc_clean_tx_irq()
3166 !tx_ring->oper_gate_closed) { in igc_clean_tx_irq()
3168 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
3180 tx_ring->queue_index, in igc_clean_tx_irq()
3181 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
3182 readl(tx_ring->tail), in igc_clean_tx_irq()
3183 tx_ring->next_to_use, in igc_clean_tx_irq()
3184 tx_ring->next_to_clean, in igc_clean_tx_irq()
3185 tx_buffer->time_stamp, in igc_clean_tx_irq()
3186 tx_buffer->next_to_watch, in igc_clean_tx_irq()
3188 tx_buffer->next_to_watch->wb.status); in igc_clean_tx_irq()
3189 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3190 tx_ring->queue_index); in igc_clean_tx_irq()
3199 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
3205 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
3206 tx_ring->queue_index) && in igc_clean_tx_irq()
3207 !(test_bit(__IGC_DOWN, &adapter->state))) { in igc_clean_tx_irq()
3208 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3209 tx_ring->queue_index); in igc_clean_tx_irq()
3211 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3212 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
3213 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3223 struct igc_hw *hw = &adapter->hw; in igc_find_mac_filter()
3224 int max_entries = hw->mac.rar_entry_count; in igc_find_mac_filter()
3245 return -1; in igc_find_mac_filter()
3250 struct igc_hw *hw = &adapter->hw; in igc_get_avail_mac_filter_slot()
3251 int max_entries = hw->mac.rar_entry_count; in igc_get_avail_mac_filter_slot()
3262 return -1; in igc_get_avail_mac_filter_slot()
3266 * igc_add_mac_filter() - Add MAC address filter
3270 * @queue: If non-negative, queue assignment feature is enabled and frames
3280 struct net_device *dev = adapter->netdev; in igc_add_mac_filter()
3289 return -ENOSPC; in igc_add_mac_filter()
3301 * igc_del_mac_filter() - Delete MAC address filter
3309 struct net_device *dev = adapter->netdev; in igc_del_mac_filter()
3323 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3335 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3345 struct net_device *dev = adapter->netdev; in igc_add_vlan_prio_filter()
3346 struct igc_hw *hw = &adapter->hw; in igc_add_vlan_prio_filter()
3353 return -EEXIST; in igc_add_vlan_prio_filter()
3367 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3373 struct igc_hw *hw = &adapter->hw; in igc_del_vlan_prio_filter()
3383 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", in igc_del_vlan_prio_filter()
3389 struct igc_hw *hw = &adapter->hw; in igc_get_avail_etype_filter_slot()
3399 return -1; in igc_get_avail_etype_filter_slot()
3403 * igc_add_etype_filter() - Add ethertype filter
3406 * @queue: If non-negative, queue assignment feature is enabled and frames
3415 struct igc_hw *hw = &adapter->hw; in igc_add_etype_filter()
3421 return -ENOSPC; in igc_add_etype_filter()
3438 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", in igc_add_etype_filter()
3445 struct igc_hw *hw = &adapter->hw; in igc_find_etype_filter()
3455 return -1; in igc_find_etype_filter()
3459 * igc_del_etype_filter() - Delete ethertype filter
3465 struct igc_hw *hw = &adapter->hw; in igc_del_etype_filter()
3474 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", in igc_del_etype_filter()
3482 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_select()
3486 if (input->index >= MAX_FLEX_FILTER) { in igc_flex_filter_select()
3487 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); in igc_flex_filter_select()
3488 return -EINVAL; in igc_flex_filter_select()
3494 switch (input->index) { in igc_flex_filter_select()
3511 fhft_index = input->index % 8; in igc_flex_filter_select()
3514 IGC_FHFT_EXT(fhft_index - 4); in igc_flex_filter_select()
3522 struct igc_hw *hw = &adapter->hw; in igc_write_flex_filter_ll()
3523 u8 *data = input->data; in igc_write_flex_filter_ll()
3524 u8 *mask = input->mask; in igc_write_flex_filter_ll()
3534 if (input->length % 8 != 0) { in igc_write_flex_filter_ll()
3535 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); in igc_write_flex_filter_ll()
3536 return -EINVAL; in igc_write_flex_filter_ll()
3552 queuing = input->length & IGC_FHFT_LENGTH_MASK; in igc_write_flex_filter_ll()
3553 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); in igc_write_flex_filter_ll()
3554 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); in igc_write_flex_filter_ll()
3556 if (input->immediate_irq) in igc_write_flex_filter_ll()
3559 if (input->drop) in igc_write_flex_filter_ll()
3593 if (input->index > 8) { in igc_write_flex_filter_ll()
3594 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ in igc_write_flex_filter_ll()
3597 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); in igc_write_flex_filter_ll()
3601 wufc |= (IGC_WUFC_FLX0 << input->index); in igc_write_flex_filter_ll()
3605 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", in igc_write_flex_filter_ll()
3606 input->index); in igc_write_flex_filter_ll()
3618 memcpy(&flex->data[offset], src, len); in igc_flex_filter_add_field()
3627 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3632 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3638 struct igc_hw *hw = &adapter->hw; in igc_find_avail_flex_filter_slot()
3650 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) in igc_find_avail_flex_filter_slot()
3655 return -ENOSPC; in igc_find_avail_flex_filter_slot()
3660 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_in_use()
3678 struct igc_nfc_filter *filter = &rule->filter; in igc_add_flex_filter()
3686 return -ENOSPC; in igc_add_flex_filter()
3689 * -> dest_mac [6] in igc_add_flex_filter()
3690 * -> src_mac [6] in igc_add_flex_filter()
3691 * -> tpid [2] in igc_add_flex_filter()
3692 * -> vlan tci [2] in igc_add_flex_filter()
3693 * -> ether type [2] in igc_add_flex_filter()
3694 * -> user data [8] in igc_add_flex_filter()
3695 * -> = 26 bytes => 32 length in igc_add_flex_filter()
3699 flex.rx_queue = rule->action; in igc_add_flex_filter()
3701 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; in igc_add_flex_filter()
3706 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_add_flex_filter()
3707 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, in igc_add_flex_filter()
3711 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_add_flex_filter()
3712 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, in igc_add_flex_filter()
3716 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { in igc_add_flex_filter()
3717 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); in igc_add_flex_filter()
3724 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) in igc_add_flex_filter()
3725 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, in igc_add_flex_filter()
3726 sizeof(filter->vlan_tci), NULL); in igc_add_flex_filter()
3729 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_add_flex_filter()
3730 __be16 etype = cpu_to_be16(filter->etype); in igc_add_flex_filter()
3737 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) in igc_add_flex_filter()
3738 igc_flex_filter_add_field(&flex, &filter->user_data, in igc_add_flex_filter()
3740 sizeof(filter->user_data), in igc_add_flex_filter()
3741 filter->user_mask); in igc_add_flex_filter()
3748 filter->flex_index = index; in igc_add_flex_filter()
3756 struct igc_hw *hw = &adapter->hw; in igc_del_flex_filter()
3766 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); in igc_del_flex_filter()
3789 if (rule->flex) { in igc_enable_nfc_rule()
3793 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_enable_nfc_rule()
3794 err = igc_add_etype_filter(adapter, rule->filter.etype, in igc_enable_nfc_rule()
3795 rule->action); in igc_enable_nfc_rule()
3800 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { in igc_enable_nfc_rule()
3802 rule->filter.src_addr, rule->action); in igc_enable_nfc_rule()
3807 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { in igc_enable_nfc_rule()
3809 rule->filter.dst_addr, rule->action); in igc_enable_nfc_rule()
3814 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_enable_nfc_rule()
3815 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_enable_nfc_rule()
3817 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); in igc_enable_nfc_rule()
3828 if (rule->flex) { in igc_disable_nfc_rule()
3829 igc_del_flex_filter(adapter, rule->filter.flex_index); in igc_disable_nfc_rule()
3833 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) in igc_disable_nfc_rule()
3834 igc_del_etype_filter(adapter, rule->filter.etype); in igc_disable_nfc_rule()
3836 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_disable_nfc_rule()
3837 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_disable_nfc_rule()
3842 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_disable_nfc_rule()
3844 rule->filter.src_addr); in igc_disable_nfc_rule()
3846 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_disable_nfc_rule()
3848 rule->filter.dst_addr); in igc_disable_nfc_rule()
3852 * igc_get_nfc_rule() - Get NFC rule
3856 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3865 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { in igc_get_nfc_rule()
3866 if (rule->location == location) in igc_get_nfc_rule()
3868 if (rule->location > location) in igc_get_nfc_rule()
3876 * igc_del_nfc_rule() - Delete NFC rule
3882 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3888 list_del(&rule->list); in igc_del_nfc_rule()
3889 adapter->nfc_rule_count--; in igc_del_nfc_rule()
3898 mutex_lock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3900 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) in igc_flush_nfc_rules()
3903 mutex_unlock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3907 * igc_add_nfc_rule() - Add NFC rule
3913 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3927 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { in igc_add_nfc_rule()
3928 if (cur->location >= rule->location) in igc_add_nfc_rule()
3933 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); in igc_add_nfc_rule()
3934 adapter->nfc_rule_count++; in igc_add_nfc_rule()
3942 mutex_lock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3944 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) in igc_restore_nfc_rules()
3947 mutex_unlock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3954 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
3966 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3972 * promiscuous mode, and all-multi behavior.
3977 struct igc_hw *hw = &adapter->hw; in igc_set_rx_mode()
3982 if (netdev->flags & IFF_PROMISC) { in igc_set_rx_mode()
3985 if (netdev->flags & IFF_ALLMULTI) { in igc_set_rx_mode()
4010 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) in igc_set_rx_mode()
4017 * igc_configure - configure the hardware for RX and TX
4022 struct net_device *netdev = adapter->netdev; in igc_configure()
4040 igc_rx_fifo_flush_base(&adapter->hw); in igc_configure()
4046 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_configure()
4047 struct igc_ring *ring = adapter->rx_ring[i]; in igc_configure()
4049 if (ring->xsk_pool) in igc_configure()
4057 * igc_write_ivar - configure ivar for given MSI-X vector
4083 struct igc_adapter *adapter = q_vector->adapter; in igc_assign_vector()
4084 struct igc_hw *hw = &adapter->hw; in igc_assign_vector()
4088 if (q_vector->rx.ring) in igc_assign_vector()
4089 rx_queue = q_vector->rx.ring->reg_idx; in igc_assign_vector()
4090 if (q_vector->tx.ring) in igc_assign_vector()
4091 tx_queue = q_vector->tx.ring->reg_idx; in igc_assign_vector()
4093 switch (hw->mac.type) { in igc_assign_vector()
4103 q_vector->eims_value = BIT(msix_vector); in igc_assign_vector()
4106 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); in igc_assign_vector()
4111 adapter->eims_enable_mask |= q_vector->eims_value; in igc_assign_vector()
4114 q_vector->set_itr = 1; in igc_assign_vector()
4118 * igc_configure_msix - Configure MSI-X hardware
4122 * generate MSI-X interrupts.
4126 struct igc_hw *hw = &adapter->hw; in igc_configure_msix()
4130 adapter->eims_enable_mask = 0; in igc_configure_msix()
4133 switch (hw->mac.type) { in igc_configure_msix()
4135 /* Turn on MSI-X capability first, or our settings in igc_configure_msix()
4143 adapter->eims_other = BIT(vector); in igc_configure_msix()
4149 /* do nothing, since nothing else supports MSI-X */ in igc_configure_msix()
4151 } /* switch (hw->mac.type) */ in igc_configure_msix()
4153 adapter->eims_enable_mask |= adapter->eims_other; in igc_configure_msix()
4155 for (i = 0; i < adapter->num_q_vectors; i++) in igc_configure_msix()
4156 igc_assign_vector(adapter->q_vector[i], vector++); in igc_configure_msix()
4162 * igc_irq_enable - Enable default interrupt generation settings
4167 struct igc_hw *hw = &adapter->hw; in igc_irq_enable()
4169 if (adapter->msix_entries) { in igc_irq_enable()
4173 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); in igc_irq_enable()
4175 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); in igc_irq_enable()
4176 wr32(IGC_EIMS, adapter->eims_enable_mask); in igc_irq_enable()
4185 * igc_irq_disable - Mask off interrupt generation on the NIC
4190 struct igc_hw *hw = &adapter->hw; in igc_irq_disable()
4192 if (adapter->msix_entries) { in igc_irq_disable()
4195 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4196 wr32(IGC_EIMC, adapter->eims_enable_mask); in igc_irq_disable()
4198 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4205 if (adapter->msix_entries) { in igc_irq_disable()
4208 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4210 for (i = 0; i < adapter->num_q_vectors; i++) in igc_irq_disable()
4211 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4213 synchronize_irq(adapter->pdev->irq); in igc_irq_disable()
4222 * order to conserve interrupts due to limited supply. in igc_set_flag_queue_pairs()
4224 if (adapter->rss_queues > (max_rss_queues / 2)) in igc_set_flag_queue_pairs()
4225 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4227 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4240 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igc_init_queue_configuration()
4246 * igc_reset_q_vector - Reset config for interrupt vector
4255 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_reset_q_vector()
4263 if (q_vector->tx.ring) in igc_reset_q_vector()
4264 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4266 if (q_vector->rx.ring) in igc_reset_q_vector()
4267 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igc_reset_q_vector()
4269 netif_napi_del(&q_vector->napi); in igc_reset_q_vector()
4273 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4281 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_free_q_vector()
4283 adapter->q_vector[v_idx] = NULL; in igc_free_q_vector()
4293 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4302 int v_idx = adapter->num_q_vectors; in igc_free_q_vectors()
4304 adapter->num_tx_queues = 0; in igc_free_q_vectors()
4305 adapter->num_rx_queues = 0; in igc_free_q_vectors()
4306 adapter->num_q_vectors = 0; in igc_free_q_vectors()
4308 while (v_idx--) { in igc_free_q_vectors()
4315 * igc_update_itr - update the dynamic ITR value based on statistics
4326 * NOTE: These calculations are only valid when operating in a single-
4332 unsigned int packets = ring_container->total_packets; in igc_update_itr()
4333 unsigned int bytes = ring_container->total_bytes; in igc_update_itr()
4334 u8 itrval = ring_container->itr; in igc_update_itr()
4374 ring_container->total_bytes = 0; in igc_update_itr()
4375 ring_container->total_packets = 0; in igc_update_itr()
4378 ring_container->itr = itrval; in igc_update_itr()
4383 struct igc_adapter *adapter = q_vector->adapter; in igc_set_itr()
4384 u32 new_itr = q_vector->itr_val; in igc_set_itr()
4387 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igc_set_itr()
4388 switch (adapter->link_speed) { in igc_set_itr()
4398 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr()
4399 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr()
4401 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr()
4405 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_set_itr()
4406 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_set_itr()
4425 if (new_itr != q_vector->itr_val) { in igc_set_itr()
4430 new_itr = new_itr > q_vector->itr_val ? in igc_set_itr()
4431 max((new_itr * q_vector->itr_val) / in igc_set_itr()
4432 (new_itr + (q_vector->itr_val >> 2)), in igc_set_itr()
4440 q_vector->itr_val = new_itr; in igc_set_itr()
4441 q_vector->set_itr = 1; in igc_set_itr()
4447 int v_idx = adapter->num_q_vectors; in igc_reset_interrupt_capability()
4449 if (adapter->msix_entries) { in igc_reset_interrupt_capability()
4450 pci_disable_msix(adapter->pdev); in igc_reset_interrupt_capability()
4451 kfree(adapter->msix_entries); in igc_reset_interrupt_capability()
4452 adapter->msix_entries = NULL; in igc_reset_interrupt_capability()
4453 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_reset_interrupt_capability()
4454 pci_disable_msi(adapter->pdev); in igc_reset_interrupt_capability()
4457 while (v_idx--) in igc_reset_interrupt_capability()
4462 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4464 * @msix: boolean value for MSI-X capability
4477 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4480 adapter->num_rx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4482 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4485 numvecs = adapter->num_rx_queues; in igc_set_interrupt_capability()
4488 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) in igc_set_interrupt_capability()
4489 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability()
4492 adapter->num_q_vectors = numvecs; in igc_set_interrupt_capability()
4497 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), in igc_set_interrupt_capability()
4500 if (!adapter->msix_entries) in igc_set_interrupt_capability()
4505 adapter->msix_entries[i].entry = i; in igc_set_interrupt_capability()
4507 err = pci_enable_msix_range(adapter->pdev, in igc_set_interrupt_capability()
4508 adapter->msix_entries, in igc_set_interrupt_capability()
4514 kfree(adapter->msix_entries); in igc_set_interrupt_capability()
4515 adapter->msix_entries = NULL; in igc_set_interrupt_capability()
4520 adapter->flags &= ~IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4522 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4523 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_interrupt_capability()
4524 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4525 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4526 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4527 if (!pci_enable_msi(adapter->pdev)) in igc_set_interrupt_capability()
4528 adapter->flags |= IGC_FLAG_HAS_MSI; in igc_set_interrupt_capability()
4532 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4547 struct igc_adapter *adapter = q_vector->adapter; in igc_update_ring_itr()
4548 int new_val = q_vector->itr_val; in igc_update_ring_itr()
4552 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igc_update_ring_itr()
4553 * ints/sec - ITR timer value of 120 ticks. in igc_update_ring_itr()
4555 switch (adapter->link_speed) { in igc_update_ring_itr()
4564 packets = q_vector->rx.total_packets; in igc_update_ring_itr()
4566 avg_wire_size = q_vector->rx.total_bytes / packets; in igc_update_ring_itr()
4568 packets = q_vector->tx.total_packets; in igc_update_ring_itr()
4571 q_vector->tx.total_bytes / packets); in igc_update_ring_itr()
4583 /* Give a little boost to mid-size frames */ in igc_update_ring_itr()
4591 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_update_ring_itr()
4592 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_update_ring_itr()
4596 if (new_val != q_vector->itr_val) { in igc_update_ring_itr()
4597 q_vector->itr_val = new_val; in igc_update_ring_itr()
4598 q_vector->set_itr = 1; in igc_update_ring_itr()
4601 q_vector->rx.total_bytes = 0; in igc_update_ring_itr()
4602 q_vector->rx.total_packets = 0; in igc_update_ring_itr()
4603 q_vector->tx.total_bytes = 0; in igc_update_ring_itr()
4604 q_vector->tx.total_packets = 0; in igc_update_ring_itr()
4609 struct igc_adapter *adapter = q_vector->adapter; in igc_ring_irq_enable()
4610 struct igc_hw *hw = &adapter->hw; in igc_ring_irq_enable()
4612 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igc_ring_irq_enable()
4613 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igc_ring_irq_enable()
4614 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4620 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_ring_irq_enable()
4621 if (adapter->msix_entries) in igc_ring_irq_enable()
4622 wr32(IGC_EIMS, q_vector->eims_value); in igc_ring_irq_enable()
4631 head->ring = ring; in igc_add_ring()
4632 head->count++; in igc_add_ring()
4636 * igc_cache_ring_register - Descriptor ring to register mapping
4639 * Once we know the feature-set enabled for the device, we'll cache
4646 switch (adapter->hw.mac.type) { in igc_cache_ring_register()
4649 for (; i < adapter->num_rx_queues; i++) in igc_cache_ring_register()
4650 adapter->rx_ring[i]->reg_idx = i; in igc_cache_ring_register()
4651 for (; j < adapter->num_tx_queues; j++) in igc_cache_ring_register()
4652 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4658 * igc_poll - NAPI Rx polling callback
4667 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_poll()
4671 if (q_vector->tx.ring) in igc_poll()
4675 int cleaned = rx_ring->xsk_pool ? in igc_poll()
4688 /* Exit the polling mode, but don't re-enable interrupts if stack might in igc_poll()
4689 * poll us due to busy-polling in igc_poll()
4694 return min(work_done, budget - 1); in igc_poll()
4698 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4707 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4720 return -ENOMEM; in igc_alloc_q_vector()
4725 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector()
4732 return -ENOMEM; in igc_alloc_q_vector()
4735 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); in igc_alloc_q_vector()
4738 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector()
4739 q_vector->adapter = adapter; in igc_alloc_q_vector()
4742 q_vector->tx.work_limit = adapter->tx_work_limit; in igc_alloc_q_vector()
4745 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); in igc_alloc_q_vector()
4746 q_vector->itr_val = IGC_START_ITR; in igc_alloc_q_vector()
4749 ring = q_vector->ring; in igc_alloc_q_vector()
4754 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igc_alloc_q_vector()
4755 q_vector->itr_val = adapter->rx_itr_setting; in igc_alloc_q_vector()
4758 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igc_alloc_q_vector()
4759 q_vector->itr_val = adapter->tx_itr_setting; in igc_alloc_q_vector()
4764 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4765 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4768 ring->q_vector = q_vector; in igc_alloc_q_vector()
4771 igc_add_ring(ring, &q_vector->tx); in igc_alloc_q_vector()
4774 ring->count = adapter->tx_ring_count; in igc_alloc_q_vector()
4775 ring->queue_index = txr_idx; in igc_alloc_q_vector()
4778 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4786 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4787 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4790 ring->q_vector = q_vector; in igc_alloc_q_vector()
4793 igc_add_ring(ring, &q_vector->rx); in igc_alloc_q_vector()
4796 ring->count = adapter->rx_ring_count; in igc_alloc_q_vector()
4797 ring->queue_index = rxr_idx; in igc_alloc_q_vector()
4800 adapter->rx_ring[rxr_idx] = ring; in igc_alloc_q_vector()
4807 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4811 * return -ENOMEM.
4815 int rxr_remaining = adapter->num_rx_queues; in igc_alloc_q_vectors()
4816 int txr_remaining = adapter->num_tx_queues; in igc_alloc_q_vectors()
4818 int q_vectors = adapter->num_q_vectors; in igc_alloc_q_vectors()
4830 rxr_remaining--; in igc_alloc_q_vectors()
4836 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4837 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4846 rxr_remaining -= rqpv; in igc_alloc_q_vectors()
4847 txr_remaining -= tqpv; in igc_alloc_q_vectors()
4855 adapter->num_tx_queues = 0; in igc_alloc_q_vectors()
4856 adapter->num_rx_queues = 0; in igc_alloc_q_vectors()
4857 adapter->num_q_vectors = 0; in igc_alloc_q_vectors()
4859 while (v_idx--) in igc_alloc_q_vectors()
4862 return -ENOMEM; in igc_alloc_q_vectors()
4866 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4868 * @msix: boolean for MSI-X capability
4874 struct net_device *dev = adapter->netdev; in igc_init_interrupt_scheme()
4895 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4904 struct net_device *netdev = adapter->netdev; in igc_sw_init()
4905 struct pci_dev *pdev = adapter->pdev; in igc_sw_init()
4906 struct igc_hw *hw = &adapter->hw; in igc_sw_init()
4908 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igc_sw_init()
4911 adapter->tx_ring_count = IGC_DEFAULT_TXD; in igc_sw_init()
4912 adapter->rx_ring_count = IGC_DEFAULT_RXD; in igc_sw_init()
4915 adapter->rx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4916 adapter->tx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4919 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; in igc_sw_init()
4922 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igc_sw_init()
4924 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igc_sw_init()
4926 mutex_init(&adapter->nfc_rule_lock); in igc_sw_init()
4927 INIT_LIST_HEAD(&adapter->nfc_rule_list); in igc_sw_init()
4928 adapter->nfc_rule_count = 0; in igc_sw_init()
4930 spin_lock_init(&adapter->stats64_lock); in igc_sw_init()
4931 spin_lock_init(&adapter->qbv_tx_lock); in igc_sw_init()
4932 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igc_sw_init()
4933 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_sw_init()
4940 return -ENOMEM; in igc_sw_init()
4946 set_bit(__IGC_DOWN, &adapter->state); in igc_sw_init()
4952 * igc_up - Open the interface and prepare it to handle traffic
4957 struct igc_hw *hw = &adapter->hw; in igc_up()
4963 clear_bit(__IGC_DOWN, &adapter->state); in igc_up()
4965 for (i = 0; i < adapter->num_q_vectors; i++) in igc_up()
4966 napi_enable(&adapter->q_vector[i]->napi); in igc_up()
4968 if (adapter->msix_entries) in igc_up()
4971 igc_assign_vector(adapter->q_vector[0], 0); in igc_up()
4977 netif_tx_start_all_queues(adapter->netdev); in igc_up()
4980 hw->mac.get_link_status = true; in igc_up()
4981 schedule_work(&adapter->watchdog_task); in igc_up()
4985 * igc_update_stats - Update the board statistics counters
4990 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igc_update_stats()
4991 struct pci_dev *pdev = adapter->pdev; in igc_update_stats()
4992 struct igc_hw *hw = &adapter->hw; in igc_update_stats()
5002 if (adapter->link_speed == 0) in igc_update_stats()
5011 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_update_stats()
5012 struct igc_ring *ring = adapter->rx_ring[i]; in igc_update_stats()
5015 if (hw->mac.type >= igc_i225) in igc_update_stats()
5019 ring->rx_stats.drops += rqdpc; in igc_update_stats()
5020 net_stats->rx_fifo_errors += rqdpc; in igc_update_stats()
5024 start = u64_stats_fetch_begin(&ring->rx_syncp); in igc_update_stats()
5025 _bytes = ring->rx_stats.bytes; in igc_update_stats()
5026 _packets = ring->rx_stats.packets; in igc_update_stats()
5027 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igc_update_stats()
5032 net_stats->rx_bytes = bytes; in igc_update_stats()
5033 net_stats->rx_packets = packets; in igc_update_stats()
5037 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_update_stats()
5038 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
5041 start = u64_stats_fetch_begin(&ring->tx_syncp); in igc_update_stats()
5042 _bytes = ring->tx_stats.bytes; in igc_update_stats()
5043 _packets = ring->tx_stats.packets; in igc_update_stats()
5044 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igc_update_stats()
5048 net_stats->tx_bytes = bytes; in igc_update_stats()
5049 net_stats->tx_packets = packets; in igc_update_stats()
5053 adapter->stats.crcerrs += rd32(IGC_CRCERRS); in igc_update_stats()
5054 adapter->stats.gprc += rd32(IGC_GPRC); in igc_update_stats()
5055 adapter->stats.gorc += rd32(IGC_GORCL); in igc_update_stats()
5057 adapter->stats.bprc += rd32(IGC_BPRC); in igc_update_stats()
5058 adapter->stats.mprc += rd32(IGC_MPRC); in igc_update_stats()
5059 adapter->stats.roc += rd32(IGC_ROC); in igc_update_stats()
5061 adapter->stats.prc64 += rd32(IGC_PRC64); in igc_update_stats()
5062 adapter->stats.prc127 += rd32(IGC_PRC127); in igc_update_stats()
5063 adapter->stats.prc255 += rd32(IGC_PRC255); in igc_update_stats()
5064 adapter->stats.prc511 += rd32(IGC_PRC511); in igc_update_stats()
5065 adapter->stats.prc1023 += rd32(IGC_PRC1023); in igc_update_stats()
5066 adapter->stats.prc1522 += rd32(IGC_PRC1522); in igc_update_stats()
5067 adapter->stats.tlpic += rd32(IGC_TLPIC); in igc_update_stats()
5068 adapter->stats.rlpic += rd32(IGC_RLPIC); in igc_update_stats()
5069 adapter->stats.hgptc += rd32(IGC_HGPTC); in igc_update_stats()
5072 adapter->stats.mpc += mpc; in igc_update_stats()
5073 net_stats->rx_fifo_errors += mpc; in igc_update_stats()
5074 adapter->stats.scc += rd32(IGC_SCC); in igc_update_stats()
5075 adapter->stats.ecol += rd32(IGC_ECOL); in igc_update_stats()
5076 adapter->stats.mcc += rd32(IGC_MCC); in igc_update_stats()
5077 adapter->stats.latecol += rd32(IGC_LATECOL); in igc_update_stats()
5078 adapter->stats.dc += rd32(IGC_DC); in igc_update_stats()
5079 adapter->stats.rlec += rd32(IGC_RLEC); in igc_update_stats()
5080 adapter->stats.xonrxc += rd32(IGC_XONRXC); in igc_update_stats()
5081 adapter->stats.xontxc += rd32(IGC_XONTXC); in igc_update_stats()
5082 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); in igc_update_stats()
5083 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); in igc_update_stats()
5084 adapter->stats.fcruc += rd32(IGC_FCRUC); in igc_update_stats()
5085 adapter->stats.gptc += rd32(IGC_GPTC); in igc_update_stats()
5086 adapter->stats.gotc += rd32(IGC_GOTCL); in igc_update_stats()
5088 adapter->stats.rnbc += rd32(IGC_RNBC); in igc_update_stats()
5089 adapter->stats.ruc += rd32(IGC_RUC); in igc_update_stats()
5090 adapter->stats.rfc += rd32(IGC_RFC); in igc_update_stats()
5091 adapter->stats.rjc += rd32(IGC_RJC); in igc_update_stats()
5092 adapter->stats.tor += rd32(IGC_TORH); in igc_update_stats()
5093 adapter->stats.tot += rd32(IGC_TOTH); in igc_update_stats()
5094 adapter->stats.tpr += rd32(IGC_TPR); in igc_update_stats()
5096 adapter->stats.ptc64 += rd32(IGC_PTC64); in igc_update_stats()
5097 adapter->stats.ptc127 += rd32(IGC_PTC127); in igc_update_stats()
5098 adapter->stats.ptc255 += rd32(IGC_PTC255); in igc_update_stats()
5099 adapter->stats.ptc511 += rd32(IGC_PTC511); in igc_update_stats()
5100 adapter->stats.ptc1023 += rd32(IGC_PTC1023); in igc_update_stats()
5101 adapter->stats.ptc1522 += rd32(IGC_PTC1522); in igc_update_stats()
5103 adapter->stats.mptc += rd32(IGC_MPTC); in igc_update_stats()
5104 adapter->stats.bptc += rd32(IGC_BPTC); in igc_update_stats()
5106 adapter->stats.tpt += rd32(IGC_TPT); in igc_update_stats()
5107 adapter->stats.colc += rd32(IGC_COLC); in igc_update_stats()
5108 adapter->stats.colc += rd32(IGC_RERC); in igc_update_stats()
5110 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); in igc_update_stats()
5112 adapter->stats.tsctc += rd32(IGC_TSCTC); in igc_update_stats()
5114 adapter->stats.iac += rd32(IGC_IAC); in igc_update_stats()
5117 net_stats->multicast = adapter->stats.mprc; in igc_update_stats()
5118 net_stats->collisions = adapter->stats.colc; in igc_update_stats()
5125 net_stats->rx_errors = adapter->stats.rxerrc + in igc_update_stats()
5126 adapter->stats.crcerrs + adapter->stats.algnerrc + in igc_update_stats()
5127 adapter->stats.ruc + adapter->stats.roc + in igc_update_stats()
5128 adapter->stats.cexterr; in igc_update_stats()
5129 net_stats->rx_length_errors = adapter->stats.ruc + in igc_update_stats()
5130 adapter->stats.roc; in igc_update_stats()
5131 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igc_update_stats()
5132 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igc_update_stats()
5133 net_stats->rx_missed_errors = adapter->stats.mpc; in igc_update_stats()
5136 net_stats->tx_errors = adapter->stats.ecol + in igc_update_stats()
5137 adapter->stats.latecol; in igc_update_stats()
5138 net_stats->tx_aborted_errors = adapter->stats.ecol; in igc_update_stats()
5139 net_stats->tx_window_errors = adapter->stats.latecol; in igc_update_stats()
5140 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igc_update_stats()
5143 net_stats->tx_dropped = adapter->stats.txdrop; in igc_update_stats()
5146 adapter->stats.mgptc += rd32(IGC_MGTPTC); in igc_update_stats()
5147 adapter->stats.mgprc += rd32(IGC_MGTPRC); in igc_update_stats()
5148 adapter->stats.mgpdc += rd32(IGC_MGTPDC); in igc_update_stats()
5152 * igc_down - Close the interface
5157 struct net_device *netdev = adapter->netdev; in igc_down()
5158 struct igc_hw *hw = &adapter->hw; in igc_down()
5162 set_bit(__IGC_DOWN, &adapter->state); in igc_down()
5166 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5178 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5190 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_down()
5192 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_down()
5193 if (adapter->q_vector[i]) { in igc_down()
5194 napi_synchronize(&adapter->q_vector[i]->napi); in igc_down()
5195 napi_disable(&adapter->q_vector[i]->napi); in igc_down()
5199 del_timer_sync(&adapter->watchdog_timer); in igc_down()
5200 del_timer_sync(&adapter->phy_info_timer); in igc_down()
5203 spin_lock(&adapter->stats64_lock); in igc_down()
5205 spin_unlock(&adapter->stats64_lock); in igc_down()
5207 adapter->link_speed = 0; in igc_down()
5208 adapter->link_duplex = 0; in igc_down()
5210 if (!pci_channel_offline(adapter->pdev)) in igc_down()
5214 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; in igc_down()
5223 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_reinit_locked()
5227 clear_bit(__IGC_RESETTING, &adapter->state); in igc_reinit_locked()
5238 if (test_bit(__IGC_DOWN, &adapter->state) || in igc_reset_task()
5239 test_bit(__IGC_RESETTING, &adapter->state)) { in igc_reset_task()
5246 netdev_err(adapter->netdev, "Reset adapter\n"); in igc_reset_task()
5252 * igc_change_mtu - Change the Maximum Transfer Unit
5265 return -EINVAL; in igc_change_mtu()
5272 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_change_mtu()
5276 adapter->max_frame_size = max_frame; in igc_change_mtu()
5281 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); in igc_change_mtu()
5282 WRITE_ONCE(netdev->mtu, new_mtu); in igc_change_mtu()
5289 clear_bit(__IGC_RESETTING, &adapter->state); in igc_change_mtu()
5295 * igc_tx_timeout - Respond to a Tx Hang
5303 struct igc_hw *hw = &adapter->hw; in igc_tx_timeout()
5306 adapter->tx_timeout_count++; in igc_tx_timeout()
5307 schedule_work(&adapter->reset_task); in igc_tx_timeout()
5309 (adapter->eims_enable_mask & ~adapter->eims_other)); in igc_tx_timeout()
5313 * igc_get_stats64 - Get System Network Statistics
5325 spin_lock(&adapter->stats64_lock); in igc_get_stats64()
5326 if (!test_bit(__IGC_RESETTING, &adapter->state)) in igc_get_stats64()
5328 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igc_get_stats64()
5329 spin_unlock(&adapter->stats64_lock); in igc_get_stats64()
5349 netdev_features_t changed = netdev->features ^ features; in igc_set_features()
5362 netdev->features = features; in igc_set_features()
5387 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igc_features_check()
5397 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igc_features_check()
5405 struct igc_hw *hw = &adapter->hw; in igc_tsync_interrupt()
5414 if (adapter->ptp_caps.pps) in igc_tsync_interrupt()
5415 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5424 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5425 ts = timespec64_add(adapter->perout[0].start, in igc_tsync_interrupt()
5426 adapter->perout[0].period); in igc_tsync_interrupt()
5432 adapter->perout[0].start = ts; in igc_tsync_interrupt()
5433 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5437 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5438 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5439 adapter->perout[1].period); in igc_tsync_interrupt()
5445 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5446 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5455 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5464 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5469 * igc_msix_other - msix other interrupt handler
5476 struct igc_hw *hw = &adapter->hw; in igc_msix_other()
5481 schedule_work(&adapter->reset_task); in igc_msix_other()
5485 adapter->stats.doosync++; in igc_msix_other()
5489 hw->mac.get_link_status = true; in igc_msix_other()
5491 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_msix_other()
5492 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5498 wr32(IGC_EIMS, adapter->eims_other); in igc_msix_other()
5505 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; in igc_write_itr()
5507 if (!q_vector->set_itr) in igc_write_itr()
5515 writel(itr_val, q_vector->itr_register); in igc_write_itr()
5516 q_vector->set_itr = 0; in igc_write_itr()
5526 napi_schedule(&q_vector->napi); in igc_msix_ring()
5532 * igc_request_msix - Initialize MSI-X interrupts
5535 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5540 unsigned int num_q_vectors = adapter->num_q_vectors; in igc_request_msix()
5542 struct net_device *netdev = adapter->netdev; in igc_request_msix()
5544 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5545 &igc_msix_other, 0, netdev->name, adapter); in igc_request_msix()
5551 dev_warn(&adapter->pdev->dev, in igc_request_msix()
5553 adapter->num_q_vectors, MAX_Q_VECTORS); in igc_request_msix()
5556 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_request_msix()
5560 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); in igc_request_msix()
5562 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix()
5563 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igc_request_msix()
5564 q_vector->rx.ring->queue_index); in igc_request_msix()
5565 else if (q_vector->tx.ring) in igc_request_msix()
5566 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igc_request_msix()
5567 q_vector->tx.ring->queue_index); in igc_request_msix()
5568 else if (q_vector->rx.ring) in igc_request_msix()
5569 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igc_request_msix()
5570 q_vector->rx.ring->queue_index); in igc_request_msix()
5572 sprintf(q_vector->name, "%s-unused", netdev->name); in igc_request_msix()
5574 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5575 igc_msix_ring, 0, q_vector->name, in igc_request_msix()
5586 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igc_request_msix()
5588 vector--; in igc_request_msix()
5590 free_irq(adapter->msix_entries[free_vector++].vector, in igc_request_msix()
5591 adapter->q_vector[i]); in igc_request_msix()
5598 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5602 * MSI-X interrupts allocated.
5617 igc_get_phy_info(&adapter->hw); in igc_update_phy_info()
5621 * igc_has_link - check shared code for link and determine up/down
5626 struct igc_hw *hw = &adapter->hw; in igc_has_link()
5634 if (!hw->mac.get_link_status) in igc_has_link()
5636 hw->mac.ops.check_for_link(hw); in igc_has_link()
5637 link_active = !hw->mac.get_link_status; in igc_has_link()
5639 if (hw->mac.type == igc_i225) { in igc_has_link()
5640 if (!netif_carrier_ok(adapter->netdev)) { in igc_has_link()
5641 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5642 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { in igc_has_link()
5643 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5644 adapter->link_check_timeout = jiffies; in igc_has_link()
5652 * igc_watchdog - Timer Call-back
5659 schedule_work(&adapter->watchdog_task); in igc_watchdog()
5667 struct net_device *netdev = adapter->netdev; in igc_watchdog_task()
5668 struct igc_hw *hw = &adapter->hw; in igc_watchdog_task()
5669 struct igc_phy_info *phy = &hw->phy; in igc_watchdog_task()
5676 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { in igc_watchdog_task()
5677 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igc_watchdog_task()
5678 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_watchdog_task()
5685 pm_runtime_resume(netdev->dev.parent); in igc_watchdog_task()
5690 hw->mac.ops.get_speed_and_duplex(hw, in igc_watchdog_task()
5691 &adapter->link_speed, in igc_watchdog_task()
5692 &adapter->link_duplex); in igc_watchdog_task()
5698 adapter->link_speed, in igc_watchdog_task()
5699 adapter->link_duplex == FULL_DUPLEX ? in igc_watchdog_task()
5707 if ((adapter->flags & IGC_FLAG_EEE) && in igc_watchdog_task()
5708 adapter->link_duplex == HALF_DUPLEX) { in igc_watchdog_task()
5710 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); in igc_watchdog_task()
5711 adapter->hw.dev_spec._base.eee_enable = false; in igc_watchdog_task()
5712 adapter->flags &= ~IGC_FLAG_EEE; in igc_watchdog_task()
5717 if (phy->speed_downgraded) in igc_watchdog_task()
5721 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5722 switch (adapter->link_speed) { in igc_watchdog_task()
5724 adapter->tx_timeout_factor = 14; in igc_watchdog_task()
5729 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5735 * based on link-up activity. Write into the register in igc_watchdog_task()
5740 if (adapter->link_speed != SPEED_1000) in igc_watchdog_task()
5750 retry_count--; in igc_watchdog_task()
5756 netdev_err(netdev, "read 1000Base-T Status Reg\n"); in igc_watchdog_task()
5762 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5763 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5768 adapter->link_speed = 0; in igc_watchdog_task()
5769 adapter->link_duplex = 0; in igc_watchdog_task()
5776 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5777 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5780 pm_schedule_suspend(netdev->dev.parent, in igc_watchdog_task()
5785 spin_lock(&adapter->stats64_lock); in igc_watchdog_task()
5787 spin_unlock(&adapter->stats64_lock); in igc_watchdog_task()
5789 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_watchdog_task()
5790 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task()
5798 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5799 adapter->tx_timeout_count++; in igc_watchdog_task()
5800 schedule_work(&adapter->reset_task); in igc_watchdog_task()
5807 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5811 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_watchdog_task()
5814 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_watchdog_task()
5815 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_watchdog_task()
5818 if (!q_vector->rx.ring) in igc_watchdog_task()
5821 rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; in igc_watchdog_task()
5823 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5824 eics |= q_vector->eims_value; in igc_watchdog_task()
5825 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5831 struct igc_ring *rx_ring = adapter->rx_ring[0]; in igc_watchdog_task()
5833 if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { in igc_watchdog_task()
5834 clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); in igc_watchdog_task()
5842 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_watchdog_task()
5843 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) in igc_watchdog_task()
5844 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5847 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5853 * igc_intr_msi - Interrupt Handler
5860 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi()
5861 struct igc_hw *hw = &adapter->hw; in igc_intr_msi()
5868 schedule_work(&adapter->reset_task); in igc_intr_msi()
5872 adapter->stats.doosync++; in igc_intr_msi()
5876 hw->mac.get_link_status = true; in igc_intr_msi()
5877 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr_msi()
5878 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
5884 napi_schedule(&q_vector->napi); in igc_intr_msi()
5890 * igc_intr - Legacy Interrupt Handler
5897 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr()
5898 struct igc_hw *hw = &adapter->hw; in igc_intr()
5899 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igc_intr()
5904 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igc_intr()
5913 schedule_work(&adapter->reset_task); in igc_intr()
5917 adapter->stats.doosync++; in igc_intr()
5921 hw->mac.get_link_status = true; in igc_intr()
5923 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr()
5924 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
5930 napi_schedule(&q_vector->napi); in igc_intr()
5937 if (adapter->msix_entries) { in igc_free_irq()
5940 free_irq(adapter->msix_entries[vector++].vector, adapter); in igc_free_irq()
5942 for (i = 0; i < adapter->num_q_vectors; i++) in igc_free_irq()
5943 free_irq(adapter->msix_entries[vector++].vector, in igc_free_irq()
5944 adapter->q_vector[i]); in igc_free_irq()
5946 free_irq(adapter->pdev->irq, adapter); in igc_free_irq()
5951 * igc_request_irq - initialize interrupts
5959 struct net_device *netdev = adapter->netdev; in igc_request_irq()
5960 struct pci_dev *pdev = adapter->pdev; in igc_request_irq()
5963 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_request_irq()
5980 igc_assign_vector(adapter->q_vector[0], 0); in igc_request_irq()
5982 if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_request_irq()
5983 err = request_irq(pdev->irq, &igc_intr_msi, 0, in igc_request_irq()
5984 netdev->name, adapter); in igc_request_irq()
5990 adapter->flags &= ~IGC_FLAG_HAS_MSI; in igc_request_irq()
5993 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, in igc_request_irq()
5994 netdev->name, adapter); in igc_request_irq()
6004 * __igc_open - Called when a network interface is made active
6019 struct pci_dev *pdev = adapter->pdev; in __igc_open()
6020 struct igc_hw *hw = &adapter->hw; in __igc_open()
6026 if (test_bit(__IGC_TESTING, &adapter->state)) { in __igc_open()
6028 return -EBUSY; in __igc_open()
6032 pm_runtime_get_sync(&pdev->dev); in __igc_open()
6054 clear_bit(__IGC_DOWN, &adapter->state); in __igc_open()
6056 for (i = 0; i < adapter->num_q_vectors; i++) in __igc_open()
6057 napi_enable(&adapter->q_vector[i]->napi); in __igc_open()
6064 pm_runtime_put(&pdev->dev); in __igc_open()
6069 hw->mac.get_link_status = true; in __igc_open()
6070 schedule_work(&adapter->watchdog_task); in __igc_open()
6076 igc_power_down_phy_copper_base(&adapter->hw); in __igc_open()
6083 pm_runtime_put(&pdev->dev); in __igc_open()
6094 err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, in igc_open()
6095 adapter->num_rx_queues); in igc_open()
6105 * __igc_close - Disables a network interface
6111 * The close entry point is called when an interface is de-activated
6119 struct pci_dev *pdev = adapter->pdev; in __igc_close()
6121 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); in __igc_close()
6124 pm_runtime_get_sync(&pdev->dev); in __igc_close()
6136 pm_runtime_put_sync(&pdev->dev); in __igc_close()
6143 if (netif_device_present(netdev) || netdev->dismantle) in igc_close()
6149 * igc_ioctl - Access the hwtstamp interface
6162 return -EOPNOTSUPP; in igc_ioctl()
6171 if (queue < 0 || queue >= adapter->num_tx_queues) in igc_save_launchtime_params()
6172 return -EINVAL; in igc_save_launchtime_params()
6174 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
6175 ring->launchtime_enable = enable; in igc_save_launchtime_params()
6193 struct igc_hw *hw = &adapter->hw; in validate_schedule()
6197 if (qopt->cycle_time_extension) in validate_schedule()
6208 if (!is_base_time_past(qopt->base_time, &now) && in validate_schedule()
6212 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule()
6216 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6217 e = &qopt->entries[n]; in validate_schedule()
6222 if (e->command != TC_TAPRIO_CMD_SET_GATES) in validate_schedule()
6225 for (i = 0; i < adapter->num_tx_queues; i++) in validate_schedule()
6226 if (e->gate_mask & BIT(i)) { in validate_schedule()
6234 !(prev->gate_mask & BIT(i))) in validate_schedule()
6245 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_launchtime()
6248 if (hw->mac.type != igc_i225) in igc_tsn_enable_launchtime()
6249 return -EOPNOTSUPP; in igc_tsn_enable_launchtime()
6251 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime()
6263 adapter->base_time = 0; in igc_qbv_clear_schedule()
6264 adapter->cycle_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6265 adapter->taprio_offload_enable = false; in igc_qbv_clear_schedule()
6266 adapter->qbv_config_change_errors = 0; in igc_qbv_clear_schedule()
6267 adapter->qbv_count = 0; in igc_qbv_clear_schedule()
6269 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6270 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6272 ring->start_time = 0; in igc_qbv_clear_schedule()
6273 ring->end_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6274 ring->max_sdu = 0; in igc_qbv_clear_schedule()
6277 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6279 adapter->qbv_transition = false; in igc_qbv_clear_schedule()
6281 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6282 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6284 ring->oper_gate_closed = false; in igc_qbv_clear_schedule()
6285 ring->admin_gate_closed = false; in igc_qbv_clear_schedule()
6288 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6306 stats->tx_overruns = 0; in igc_taprio_stats()
6312 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; in igc_taprio_queue_stats()
6317 stats->tx_overruns = 0; in igc_taprio_queue_stats()
6324 struct igc_hw *hw = &adapter->hw; in igc_save_qbv_schedule()
6331 if (qopt->base_time < 0) in igc_save_qbv_schedule()
6332 return -ERANGE; in igc_save_qbv_schedule()
6334 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) in igc_save_qbv_schedule()
6335 return -EALREADY; in igc_save_qbv_schedule()
6338 return -EINVAL; in igc_save_qbv_schedule()
6343 is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6344 adapter->qbv_config_change_errors++; in igc_save_qbv_schedule()
6346 adapter->cycle_time = qopt->cycle_time; in igc_save_qbv_schedule()
6347 adapter->base_time = qopt->base_time; in igc_save_qbv_schedule()
6348 adapter->taprio_offload_enable = true; in igc_save_qbv_schedule()
6350 for (n = 0; n < qopt->num_entries; n++) { in igc_save_qbv_schedule()
6351 struct tc_taprio_sched_entry *e = &qopt->entries[n]; in igc_save_qbv_schedule()
6353 end_time += e->interval; in igc_save_qbv_schedule()
6361 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, in igc_save_qbv_schedule()
6366 if (end_time > adapter->cycle_time || in igc_save_qbv_schedule()
6367 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6368 end_time = adapter->cycle_time; in igc_save_qbv_schedule()
6370 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6371 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6373 if (!(e->gate_mask & BIT(i))) in igc_save_qbv_schedule()
6381 ring->start_time = start_time; in igc_save_qbv_schedule()
6382 ring->end_time = end_time; in igc_save_qbv_schedule()
6384 if (ring->start_time >= adapter->cycle_time) in igc_save_qbv_schedule()
6390 start_time += e->interval; in igc_save_qbv_schedule()
6393 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6398 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6399 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6401 if (!is_base_time_past(qopt->base_time, &now)) { in igc_save_qbv_schedule()
6402 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6404 ring->oper_gate_closed = false; in igc_save_qbv_schedule()
6405 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6409 if (!is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6410 ring->admin_gate_closed = true; in igc_save_qbv_schedule()
6412 ring->oper_gate_closed = true; in igc_save_qbv_schedule()
6414 ring->start_time = end_time; in igc_save_qbv_schedule()
6415 ring->end_time = end_time; in igc_save_qbv_schedule()
6419 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6421 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6422 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6423 struct net_device *dev = adapter->netdev; in igc_save_qbv_schedule()
6425 if (qopt->max_sdu[i]) in igc_save_qbv_schedule()
6426 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; in igc_save_qbv_schedule()
6428 ring->max_sdu = 0; in igc_save_qbv_schedule()
6437 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_qbv_scheduling()
6440 if (hw->mac.type != igc_i225) in igc_tsn_enable_qbv_scheduling()
6441 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6443 switch (qopt->cmd) { in igc_tsn_enable_qbv_scheduling()
6451 igc_taprio_stats(adapter->netdev, &qopt->stats); in igc_tsn_enable_qbv_scheduling()
6454 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); in igc_tsn_enable_qbv_scheduling()
6457 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6471 struct net_device *netdev = adapter->netdev; in igc_save_cbs_params()
6475 /* i225 has two sets of credit-based shaper logic. in igc_save_cbs_params()
6479 return -EINVAL; in igc_save_cbs_params()
6481 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
6484 if (adapter->tx_ring[i]) in igc_save_cbs_params()
6485 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()
6494 return -EINVAL; in igc_save_cbs_params()
6500 return -EINVAL; in igc_save_cbs_params()
6504 ring->cbs_enable = enable; in igc_save_cbs_params()
6505 ring->idleslope = idleslope; in igc_save_cbs_params()
6506 ring->sendslope = sendslope; in igc_save_cbs_params()
6507 ring->hicredit = hicredit; in igc_save_cbs_params()
6508 ring->locredit = locredit; in igc_save_cbs_params()
6516 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_cbs()
6519 if (hw->mac.type != igc_i225) in igc_tsn_enable_cbs()
6520 return -EOPNOTSUPP; in igc_tsn_enable_cbs()
6522 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6523 return -EINVAL; in igc_tsn_enable_cbs()
6525 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, in igc_tsn_enable_cbs()
6526 qopt->idleslope, qopt->sendslope, in igc_tsn_enable_cbs()
6527 qopt->hicredit, qopt->locredit); in igc_tsn_enable_cbs()
6537 struct igc_hw *hw = &adapter->hw; in igc_tc_query_caps()
6539 switch (base->type) { in igc_tc_query_caps()
6541 struct tc_mqprio_caps *caps = base->caps; in igc_tc_query_caps()
6543 caps->validate_queue_counts = true; in igc_tc_query_caps()
6548 struct tc_taprio_caps *caps = base->caps; in igc_tc_query_caps()
6550 caps->broken_mqprio = true; in igc_tc_query_caps()
6552 if (hw->mac.type == igc_i225) { in igc_tc_query_caps()
6553 caps->supports_queue_max_sdu = true; in igc_tc_query_caps()
6554 caps->gate_mask_per_txq = true; in igc_tc_query_caps()
6560 return -EOPNOTSUPP; in igc_tc_query_caps()
6569 adapter->strict_priority_enable = true; in igc_save_mqprio_params()
6570 adapter->num_tc = num_tc; in igc_save_mqprio_params()
6573 adapter->queue_per_tc[i] = offset[i]; in igc_save_mqprio_params()
6579 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_mqprio()
6582 if (hw->mac.type != igc_i225) in igc_tsn_enable_mqprio()
6583 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6585 if (!mqprio->qopt.num_tc) { in igc_tsn_enable_mqprio()
6586 adapter->strict_priority_enable = false; in igc_tsn_enable_mqprio()
6591 if (mqprio->qopt.num_tc != adapter->num_tx_queues) { in igc_tsn_enable_mqprio()
6592 NL_SET_ERR_MSG_FMT_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6594 adapter->num_tx_queues); in igc_tsn_enable_mqprio()
6595 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6599 for (i = 0; i < mqprio->qopt.num_tc; i++) { in igc_tsn_enable_mqprio()
6600 if (mqprio->qopt.count[i] != 1) { in igc_tsn_enable_mqprio()
6601 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6603 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6608 if (mqprio->preemptible_tcs) { in igc_tsn_enable_mqprio()
6609 NL_SET_ERR_MSG_MOD(mqprio->extack, in igc_tsn_enable_mqprio()
6611 return -EOPNOTSUPP; in igc_tsn_enable_mqprio()
6614 igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, in igc_tsn_enable_mqprio()
6615 mqprio->qopt.offset); in igc_tsn_enable_mqprio()
6617 mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; in igc_tsn_enable_mqprio()
6628 adapter->tc_setup_type = type; in igc_setup_tc()
6646 return -EOPNOTSUPP; in igc_setup_tc()
6654 switch (bpf->command) { in igc_bpf()
6656 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); in igc_bpf()
6658 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, in igc_bpf()
6659 bpf->xsk.queue_id); in igc_bpf()
6661 return -EOPNOTSUPP; in igc_bpf()
6675 return -ENETDOWN; in igc_xdp_xmit()
6678 return -EINVAL; in igc_xdp_xmit()
6710 struct igc_hw *hw = &adapter->hw; in igc_trigger_rxtxq_interrupt()
6713 eics |= q_vector->eims_value; in igc_trigger_rxtxq_interrupt()
6723 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_xsk_wakeup()
6724 return -ENETDOWN; in igc_xsk_wakeup()
6727 return -ENXIO; in igc_xsk_wakeup()
6729 if (queue_id >= adapter->num_rx_queues) in igc_xsk_wakeup()
6730 return -EINVAL; in igc_xsk_wakeup()
6732 ring = adapter->rx_ring[queue_id]; in igc_xsk_wakeup()
6734 if (!ring->xsk_pool) in igc_xsk_wakeup()
6735 return -ENXIO; in igc_xsk_wakeup()
6737 q_vector = adapter->q_vector[queue_id]; in igc_xsk_wakeup()
6738 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) in igc_xsk_wakeup()
6752 tstamp = hwtstamps->netdev_data; in igc_get_tstamp()
6755 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); in igc_get_tstamp()
6757 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_get_tstamp()
6785 struct igc_adapter *adapter = hw->back; in igc_read_pci_cfg()
6787 pci_read_config_word(adapter->pdev, reg, value); in igc_read_pci_cfg()
6792 struct igc_adapter *adapter = hw->back; in igc_write_pci_cfg()
6794 pci_write_config_word(adapter->pdev, reg, *value); in igc_write_pci_cfg()
6799 struct igc_adapter *adapter = hw->back; in igc_read_pcie_cap_reg()
6801 if (!pci_is_pcie(adapter->pdev)) in igc_read_pcie_cap_reg()
6802 return -IGC_ERR_CONFIG; in igc_read_pcie_cap_reg()
6804 pcie_capability_read_word(adapter->pdev, reg, value); in igc_read_pcie_cap_reg()
6811 struct igc_adapter *adapter = hw->back; in igc_write_pcie_cap_reg()
6813 if (!pci_is_pcie(adapter->pdev)) in igc_write_pcie_cap_reg()
6814 return -IGC_ERR_CONFIG; in igc_write_pcie_cap_reg()
6816 pcie_capability_write_word(adapter->pdev, reg, *value); in igc_write_pcie_cap_reg()
6824 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igc_rd32()
6834 struct net_device *netdev = igc->netdev; in igc_rd32()
6836 hw->hw_addr = NULL; in igc_rd32()
6839 WARN(pci_device_is_present(igc->pdev), in igc_rd32()
6859 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6871 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) in igc_xdp_rx_hash()
6872 return -ENODATA; in igc_xdp_rx_hash()
6874 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); in igc_xdp_rx_hash()
6875 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; in igc_xdp_rx_hash()
6883 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); in igc_xdp_rx_timestamp()
6884 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; in igc_xdp_rx_timestamp()
6886 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { in igc_xdp_rx_timestamp()
6887 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_xdp_rx_timestamp()
6892 return -ENODATA; in igc_xdp_rx_timestamp()
6907 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6909 adapter->qbv_transition = true; in igc_qbv_scheduling_timer()
6910 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_scheduling_timer()
6911 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_qbv_scheduling_timer()
6913 if (tx_ring->admin_gate_closed) { in igc_qbv_scheduling_timer()
6914 tx_ring->admin_gate_closed = false; in igc_qbv_scheduling_timer()
6915 tx_ring->oper_gate_closed = true; in igc_qbv_scheduling_timer()
6917 tx_ring->oper_gate_closed = false; in igc_qbv_scheduling_timer()
6920 adapter->qbv_transition = false; in igc_qbv_scheduling_timer()
6922 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6928 * igc_probe - Device Initialization Routine
6944 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; in igc_probe()
6951 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igc_probe()
6953 dev_err(&pdev->dev, in igc_probe()
6964 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); in igc_probe()
6968 err = -ENOMEM; in igc_probe()
6975 SET_NETDEV_DEV(netdev, &pdev->dev); in igc_probe()
6979 adapter->netdev = netdev; in igc_probe()
6980 adapter->pdev = pdev; in igc_probe()
6981 hw = &adapter->hw; in igc_probe()
6982 hw->back = adapter; in igc_probe()
6983 adapter->port_num = hw->bus.func; in igc_probe()
6984 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igc_probe()
6990 err = -EIO; in igc_probe()
6991 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), in igc_probe()
6993 if (!adapter->io_addr) in igc_probe()
6996 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ in igc_probe()
6997 hw->hw_addr = adapter->io_addr; in igc_probe()
6999 netdev->netdev_ops = &igc_netdev_ops; in igc_probe()
7000 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; in igc_probe()
7001 netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; in igc_probe()
7003 netdev->watchdog_timeo = 5 * HZ; in igc_probe()
7005 netdev->mem_start = pci_resource_start(pdev, 0); in igc_probe()
7006 netdev->mem_end = pci_resource_end(pdev, 0); in igc_probe()
7009 hw->vendor_id = pdev->vendor; in igc_probe()
7010 hw->device_id = pdev->device; in igc_probe()
7011 hw->revision_id = pdev->revision; in igc_probe()
7012 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igc_probe()
7013 hw->subsystem_device_id = pdev->subsystem_device; in igc_probe()
7016 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igc_probe()
7017 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igc_probe()
7019 /* Initialize skew-specific constants */ in igc_probe()
7020 err = ei->get_invariants(hw); in igc_probe()
7025 netdev->features |= NETIF_F_SG; in igc_probe()
7026 netdev->features |= NETIF_F_TSO; in igc_probe()
7027 netdev->features |= NETIF_F_TSO6; in igc_probe()
7028 netdev->features |= NETIF_F_TSO_ECN; in igc_probe()
7029 netdev->features |= NETIF_F_RXHASH; in igc_probe()
7030 netdev->features |= NETIF_F_RXCSUM; in igc_probe()
7031 netdev->features |= NETIF_F_HW_CSUM; in igc_probe()
7032 netdev->features |= NETIF_F_SCTP_CRC; in igc_probe()
7033 netdev->features |= NETIF_F_HW_TC; in igc_probe()
7042 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7043 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; in igc_probe()
7051 netdev->hw_features |= NETIF_F_NTUPLE; in igc_probe()
7052 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; in igc_probe()
7053 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
7054 netdev->hw_features |= netdev->features; in igc_probe()
7056 netdev->features |= NETIF_F_HIGHDMA; in igc_probe()
7058 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igc_probe()
7059 netdev->mpls_features |= NETIF_F_HW_CSUM; in igc_probe()
7060 netdev->hw_enc_features |= netdev->vlan_features; in igc_probe()
7062 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igc_probe()
7065 /* MTU range: 68 - 9216 */ in igc_probe()
7066 netdev->min_mtu = ETH_MIN_MTU; in igc_probe()
7067 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igc_probe()
7072 hw->mac.ops.reset_hw(hw); in igc_probe()
7075 if (hw->nvm.ops.validate(hw) < 0) { in igc_probe()
7076 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igc_probe()
7077 err = -EIO; in igc_probe()
7082 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igc_probe()
7084 if (hw->mac.ops.read_mac_addr(hw)) in igc_probe()
7085 dev_err(&pdev->dev, "NVM Read Error\n"); in igc_probe()
7088 eth_hw_addr_set(netdev, hw->mac.addr); in igc_probe()
7090 if (!is_valid_ether_addr(netdev->dev_addr)) { in igc_probe()
7091 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igc_probe()
7092 err = -EIO; in igc_probe()
7100 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); in igc_probe()
7101 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); in igc_probe()
7103 INIT_WORK(&adapter->reset_task, igc_reset_task); in igc_probe()
7104 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); in igc_probe()
7106 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in igc_probe()
7107 adapter->hrtimer.function = &igc_qbv_scheduling_timer; in igc_probe()
7109 /* Initialize link properties that are user-changeable */ in igc_probe()
7110 adapter->fc_autoneg = true; in igc_probe()
7111 hw->mac.autoneg = true; in igc_probe()
7112 hw->phy.autoneg_advertised = 0xaf; in igc_probe()
7114 hw->fc.requested_mode = igc_fc_default; in igc_probe()
7115 hw->fc.current_mode = igc_fc_default; in igc_probe()
7118 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; in igc_probe()
7121 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) in igc_probe()
7122 adapter->wol |= IGC_WUFC_MAG; in igc_probe()
7124 device_set_wakeup_enable(&adapter->pdev->dev, in igc_probe()
7125 adapter->flags & IGC_FLAG_WOL_SUPPORTED); in igc_probe()
7139 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in igc_probe()
7148 adapter->ei = *ei; in igc_probe()
7152 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); in igc_probe()
7154 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igc_probe()
7156 hw->dev_spec._base.eee_enable = false; in igc_probe()
7157 adapter->flags &= ~IGC_FLAG_EEE; in igc_probe()
7160 pm_runtime_put_noidle(&pdev->dev); in igc_probe()
7177 iounmap(adapter->io_addr); in igc_probe()
7189 * igc_remove - Device Removal Routine
7194 * Hot-Plug event, or because the driver is going to be removed from
7202 pm_runtime_get_noresume(&pdev->dev); in igc_remove()
7211 set_bit(__IGC_DOWN, &adapter->state); in igc_remove()
7213 del_timer_sync(&adapter->watchdog_timer); in igc_remove()
7214 del_timer_sync(&adapter->phy_info_timer); in igc_remove()
7216 cancel_work_sync(&adapter->reset_task); in igc_remove()
7217 cancel_work_sync(&adapter->watchdog_task); in igc_remove()
7218 hrtimer_cancel(&adapter->hrtimer); in igc_remove()
7230 pci_iounmap(pdev, adapter->io_addr); in igc_remove()
7243 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; in __igc_shutdown()
7244 struct igc_hw *hw = &adapter->hw; in __igc_shutdown()
7267 /* turn on all-multi mode if wake on multicast is enabled */ in __igc_shutdown()
7288 wake = wufc || adapter->en_mng_pt; in __igc_shutdown()
7290 igc_power_down_phy_copper_base(&adapter->hw); in __igc_shutdown()
7315 struct igc_hw *hw = &adapter->hw; in igc_deliver_wake_packet()
7333 /* Ensure reads are 32-bit aligned */ in igc_deliver_wake_packet()
7336 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); in igc_deliver_wake_packet()
7338 skb->protocol = eth_type_trans(skb, netdev); in igc_deliver_wake_packet()
7347 struct igc_hw *hw = &adapter->hw; in igc_resume()
7355 return -ENODEV; in igc_resume()
7368 return -ENOMEM; in igc_resume()
7411 return -EBUSY; in igc_runtime_idle()
7427 * igc_io_error_detected - called when PCI error is detected
7454 * igc_io_slot_reset - called after the PCI bus has been reset.
7457 * Restart the card from scratch, as if from a cold-boot. Implementation
7458 * resembles the first-half of the igc_resume routine.
7464 struct igc_hw *hw = &adapter->hw; in igc_io_slot_reset()
7468 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); in igc_io_slot_reset()
7479 * so we should re-assign it here. in igc_io_slot_reset()
7481 hw->hw_addr = adapter->io_addr; in igc_io_slot_reset()
7492 * igc_io_resume - called when traffic can start to flow again.
7497 * second-half of the igc_resume routine.
7543 * igc_reinit_queues - return error
7548 struct net_device *netdev = adapter->netdev; in igc_reinit_queues()
7558 return -ENOMEM; in igc_reinit_queues()
7568 * igc_get_hw_dev - return device
7575 struct igc_adapter *adapter = hw->back; in igc_get_hw_dev()
7577 return adapter->netdev; in igc_get_hw_dev()
7582 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_rx_ring_hw()
7583 u8 idx = ring->reg_idx; in igc_disable_rx_ring_hw()
7600 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_rx_ring()
7604 if (ring->xsk_pool) in igc_enable_rx_ring()
7618 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_tx_ring()
7624 * igc_init_module - Driver Registration Routine
7643 * igc_exit_module - Driver Exit Cleanup Routine