Lines Matching +full:free +full:- +full:flowing

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2009 - 2018 Intel Corporation. */
31 "Copyright (c) 2009 - 2012 Intel Corporation.";
34 static int debug = -1;
63 * igbvf_desc_unused - calculate if we have unused descriptors
68 if (ring->next_to_clean > ring->next_to_use) in igbvf_desc_unused()
69 return ring->next_to_clean - ring->next_to_use - 1; in igbvf_desc_unused()
71 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in igbvf_desc_unused()
75 * igbvf_receive_skb - helper function to handle Rx indications
91 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && in igbvf_receive_skb()
96 if (test_bit(vid, adapter->active_vlans)) in igbvf_receive_skb()
100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb()
110 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) in igbvf_rx_checksum_adv()
117 adapter->hw_csum_err++; in igbvf_rx_checksum_adv()
123 skb->ip_summed = CHECKSUM_UNNECESSARY; in igbvf_rx_checksum_adv()
125 adapter->hw_csum_good++; in igbvf_rx_checksum_adv()
129 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers()
137 struct net_device *netdev = adapter->netdev; in igbvf_alloc_rx_buffers()
138 struct pci_dev *pdev = adapter->pdev; in igbvf_alloc_rx_buffers()
145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers()
146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
148 if (adapter->rx_ps_hdr_size) in igbvf_alloc_rx_buffers()
149 bufsz = adapter->rx_ps_hdr_size; in igbvf_alloc_rx_buffers()
151 bufsz = adapter->rx_buffer_len; in igbvf_alloc_rx_buffers()
153 while (cleaned_count--) { in igbvf_alloc_rx_buffers()
156 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { in igbvf_alloc_rx_buffers()
157 if (!buffer_info->page) { in igbvf_alloc_rx_buffers()
158 buffer_info->page = alloc_page(GFP_ATOMIC); in igbvf_alloc_rx_buffers()
159 if (!buffer_info->page) { in igbvf_alloc_rx_buffers()
160 adapter->alloc_rx_buff_failed++; in igbvf_alloc_rx_buffers()
163 buffer_info->page_offset = 0; in igbvf_alloc_rx_buffers()
165 buffer_info->page_offset ^= PAGE_SIZE / 2; in igbvf_alloc_rx_buffers()
167 buffer_info->page_dma = in igbvf_alloc_rx_buffers()
168 dma_map_page(&pdev->dev, buffer_info->page, in igbvf_alloc_rx_buffers()
169 buffer_info->page_offset, in igbvf_alloc_rx_buffers()
172 if (dma_mapping_error(&pdev->dev, in igbvf_alloc_rx_buffers()
173 buffer_info->page_dma)) { in igbvf_alloc_rx_buffers()
174 __free_page(buffer_info->page); in igbvf_alloc_rx_buffers()
175 buffer_info->page = NULL; in igbvf_alloc_rx_buffers()
176 dev_err(&pdev->dev, "RX DMA map failed\n"); in igbvf_alloc_rx_buffers()
181 if (!buffer_info->skb) { in igbvf_alloc_rx_buffers()
184 adapter->alloc_rx_buff_failed++; in igbvf_alloc_rx_buffers()
188 buffer_info->skb = skb; in igbvf_alloc_rx_buffers()
189 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in igbvf_alloc_rx_buffers()
192 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in igbvf_alloc_rx_buffers()
193 dev_kfree_skb(buffer_info->skb); in igbvf_alloc_rx_buffers()
194 buffer_info->skb = NULL; in igbvf_alloc_rx_buffers()
195 dev_err(&pdev->dev, "RX DMA map failed\n"); in igbvf_alloc_rx_buffers()
200 * each write-back erases this info. in igbvf_alloc_rx_buffers()
202 if (adapter->rx_ps_hdr_size) { in igbvf_alloc_rx_buffers()
203 rx_desc->read.pkt_addr = in igbvf_alloc_rx_buffers()
204 cpu_to_le64(buffer_info->page_dma); in igbvf_alloc_rx_buffers()
205 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); in igbvf_alloc_rx_buffers()
207 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); in igbvf_alloc_rx_buffers()
208 rx_desc->read.hdr_addr = 0; in igbvf_alloc_rx_buffers()
212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers()
214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers()
219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers()
221 i = (rx_ring->count - 1); in igbvf_alloc_rx_buffers()
223 i--; in igbvf_alloc_rx_buffers()
227 * applicable for weak-ordered memory model archs, in igbvf_alloc_rx_buffers()
228 * such as IA-64). in igbvf_alloc_rx_buffers()
231 writel(i, adapter->hw.hw_addr + rx_ring->tail); in igbvf_alloc_rx_buffers()
236 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
247 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_clean_rx_irq()
248 struct net_device *netdev = adapter->netdev; in igbvf_clean_rx_irq()
249 struct pci_dev *pdev = adapter->pdev; in igbvf_clean_rx_irq()
259 i = rx_ring->next_to_clean; in igbvf_clean_rx_irq()
261 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igbvf_clean_rx_irq()
269 buffer_info = &rx_ring->buffer_info[i]; in igbvf_clean_rx_irq()
276 hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info, in igbvf_clean_rx_irq()
278 if (hlen > adapter->rx_ps_hdr_size) in igbvf_clean_rx_irq()
279 hlen = adapter->rx_ps_hdr_size; in igbvf_clean_rx_irq()
281 length = le16_to_cpu(rx_desc->wb.upper.length); in igbvf_clean_rx_irq()
285 skb = buffer_info->skb; in igbvf_clean_rx_irq()
286 prefetch(skb->data - NET_IP_ALIGN); in igbvf_clean_rx_irq()
287 buffer_info->skb = NULL; in igbvf_clean_rx_irq()
288 if (!adapter->rx_ps_hdr_size) { in igbvf_clean_rx_irq()
289 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_irq()
290 adapter->rx_buffer_len, in igbvf_clean_rx_irq()
292 buffer_info->dma = 0; in igbvf_clean_rx_irq()
297 if (!skb_shinfo(skb)->nr_frags) { in igbvf_clean_rx_irq()
298 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_irq()
299 adapter->rx_ps_hdr_size, in igbvf_clean_rx_irq()
301 buffer_info->dma = 0; in igbvf_clean_rx_irq()
306 dma_unmap_page(&pdev->dev, buffer_info->page_dma, in igbvf_clean_rx_irq()
309 buffer_info->page_dma = 0; in igbvf_clean_rx_irq()
311 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, in igbvf_clean_rx_irq()
312 buffer_info->page, in igbvf_clean_rx_irq()
313 buffer_info->page_offset, in igbvf_clean_rx_irq()
316 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || in igbvf_clean_rx_irq()
317 (page_count(buffer_info->page) != 1)) in igbvf_clean_rx_irq()
318 buffer_info->page = NULL; in igbvf_clean_rx_irq()
320 get_page(buffer_info->page); in igbvf_clean_rx_irq()
322 skb->len += length; in igbvf_clean_rx_irq()
323 skb->data_len += length; in igbvf_clean_rx_irq()
324 skb->truesize += PAGE_SIZE / 2; in igbvf_clean_rx_irq()
328 if (i == rx_ring->count) in igbvf_clean_rx_irq()
332 next_buffer = &rx_ring->buffer_info[i]; in igbvf_clean_rx_irq()
335 buffer_info->skb = next_buffer->skb; in igbvf_clean_rx_irq()
336 buffer_info->dma = next_buffer->dma; in igbvf_clean_rx_irq()
337 next_buffer->skb = skb; in igbvf_clean_rx_irq()
338 next_buffer->dma = 0; in igbvf_clean_rx_irq()
347 total_bytes += skb->len; in igbvf_clean_rx_irq()
352 skb->protocol = eth_type_trans(skb, netdev); in igbvf_clean_rx_irq()
355 rx_desc->wb.upper.vlan); in igbvf_clean_rx_irq()
358 rx_desc->wb.upper.status_error = 0; in igbvf_clean_rx_irq()
370 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in igbvf_clean_rx_irq()
373 rx_ring->next_to_clean = i; in igbvf_clean_rx_irq()
379 adapter->total_rx_packets += total_packets; in igbvf_clean_rx_irq()
380 adapter->total_rx_bytes += total_bytes; in igbvf_clean_rx_irq()
381 netdev->stats.rx_bytes += total_bytes; in igbvf_clean_rx_irq()
382 netdev->stats.rx_packets += total_packets; in igbvf_clean_rx_irq()
389 if (buffer_info->dma) { in igbvf_put_txbuf()
390 if (buffer_info->mapped_as_page) in igbvf_put_txbuf()
391 dma_unmap_page(&adapter->pdev->dev, in igbvf_put_txbuf()
392 buffer_info->dma, in igbvf_put_txbuf()
393 buffer_info->length, in igbvf_put_txbuf()
396 dma_unmap_single(&adapter->pdev->dev, in igbvf_put_txbuf()
397 buffer_info->dma, in igbvf_put_txbuf()
398 buffer_info->length, in igbvf_put_txbuf()
400 buffer_info->dma = 0; in igbvf_put_txbuf()
402 if (buffer_info->skb) { in igbvf_put_txbuf()
403 dev_kfree_skb_any(buffer_info->skb); in igbvf_put_txbuf()
404 buffer_info->skb = NULL; in igbvf_put_txbuf()
406 buffer_info->time_stamp = 0; in igbvf_put_txbuf()
410 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
419 struct pci_dev *pdev = adapter->pdev; in igbvf_setup_tx_resources()
422 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources()
423 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources()
424 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources()
428 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources()
429 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources()
431 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources()
432 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources()
433 if (!tx_ring->desc) in igbvf_setup_tx_resources()
436 tx_ring->adapter = adapter; in igbvf_setup_tx_resources()
437 tx_ring->next_to_use = 0; in igbvf_setup_tx_resources()
438 tx_ring->next_to_clean = 0; in igbvf_setup_tx_resources()
442 vfree(tx_ring->buffer_info); in igbvf_setup_tx_resources()
443 dev_err(&adapter->pdev->dev, in igbvf_setup_tx_resources()
445 return -ENOMEM; in igbvf_setup_tx_resources()
449 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
458 struct pci_dev *pdev = adapter->pdev; in igbvf_setup_rx_resources()
461 size = sizeof(struct igbvf_buffer) * rx_ring->count; in igbvf_setup_rx_resources()
462 rx_ring->buffer_info = vzalloc(size); in igbvf_setup_rx_resources()
463 if (!rx_ring->buffer_info) in igbvf_setup_rx_resources()
469 rx_ring->size = rx_ring->count * desc_len; in igbvf_setup_rx_resources()
470 rx_ring->size = ALIGN(rx_ring->size, 4096); in igbvf_setup_rx_resources()
472 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, in igbvf_setup_rx_resources()
473 &rx_ring->dma, GFP_KERNEL); in igbvf_setup_rx_resources()
474 if (!rx_ring->desc) in igbvf_setup_rx_resources()
477 rx_ring->next_to_clean = 0; in igbvf_setup_rx_resources()
478 rx_ring->next_to_use = 0; in igbvf_setup_rx_resources()
480 rx_ring->adapter = adapter; in igbvf_setup_rx_resources()
485 vfree(rx_ring->buffer_info); in igbvf_setup_rx_resources()
486 rx_ring->buffer_info = NULL; in igbvf_setup_rx_resources()
487 dev_err(&adapter->pdev->dev, in igbvf_setup_rx_resources()
489 return -ENOMEM; in igbvf_setup_rx_resources()
493 * igbvf_clean_tx_ring - Free Tx Buffers
498 struct igbvf_adapter *adapter = tx_ring->adapter; in igbvf_clean_tx_ring()
503 if (!tx_ring->buffer_info) in igbvf_clean_tx_ring()
506 /* Free all the Tx ring sk_buffs */ in igbvf_clean_tx_ring()
507 for (i = 0; i < tx_ring->count; i++) { in igbvf_clean_tx_ring()
508 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_ring()
512 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_clean_tx_ring()
513 memset(tx_ring->buffer_info, 0, size); in igbvf_clean_tx_ring()
516 memset(tx_ring->desc, 0, tx_ring->size); in igbvf_clean_tx_ring()
518 tx_ring->next_to_use = 0; in igbvf_clean_tx_ring()
519 tx_ring->next_to_clean = 0; in igbvf_clean_tx_ring()
521 writel(0, adapter->hw.hw_addr + tx_ring->head); in igbvf_clean_tx_ring()
522 writel(0, adapter->hw.hw_addr + tx_ring->tail); in igbvf_clean_tx_ring()
526 * igbvf_free_tx_resources - Free Tx Resources per Queue
527 * @tx_ring: ring to free resources from
529 * Free all transmit software resources
533 struct pci_dev *pdev = tx_ring->adapter->pdev; in igbvf_free_tx_resources()
537 vfree(tx_ring->buffer_info); in igbvf_free_tx_resources()
538 tx_ring->buffer_info = NULL; in igbvf_free_tx_resources()
540 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in igbvf_free_tx_resources()
541 tx_ring->dma); in igbvf_free_tx_resources()
543 tx_ring->desc = NULL; in igbvf_free_tx_resources()
547 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
548 * @rx_ring: ring structure pointer to free buffers from
552 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_clean_rx_ring()
554 struct pci_dev *pdev = adapter->pdev; in igbvf_clean_rx_ring()
558 if (!rx_ring->buffer_info) in igbvf_clean_rx_ring()
561 /* Free all the Rx ring sk_buffs */ in igbvf_clean_rx_ring()
562 for (i = 0; i < rx_ring->count; i++) { in igbvf_clean_rx_ring()
563 buffer_info = &rx_ring->buffer_info[i]; in igbvf_clean_rx_ring()
564 if (buffer_info->dma) { in igbvf_clean_rx_ring()
565 if (adapter->rx_ps_hdr_size) { in igbvf_clean_rx_ring()
566 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_ring()
567 adapter->rx_ps_hdr_size, in igbvf_clean_rx_ring()
570 dma_unmap_single(&pdev->dev, buffer_info->dma, in igbvf_clean_rx_ring()
571 adapter->rx_buffer_len, in igbvf_clean_rx_ring()
574 buffer_info->dma = 0; in igbvf_clean_rx_ring()
577 if (buffer_info->skb) { in igbvf_clean_rx_ring()
578 dev_kfree_skb(buffer_info->skb); in igbvf_clean_rx_ring()
579 buffer_info->skb = NULL; in igbvf_clean_rx_ring()
582 if (buffer_info->page) { in igbvf_clean_rx_ring()
583 if (buffer_info->page_dma) in igbvf_clean_rx_ring()
584 dma_unmap_page(&pdev->dev, in igbvf_clean_rx_ring()
585 buffer_info->page_dma, in igbvf_clean_rx_ring()
588 put_page(buffer_info->page); in igbvf_clean_rx_ring()
589 buffer_info->page = NULL; in igbvf_clean_rx_ring()
590 buffer_info->page_dma = 0; in igbvf_clean_rx_ring()
591 buffer_info->page_offset = 0; in igbvf_clean_rx_ring()
595 size = sizeof(struct igbvf_buffer) * rx_ring->count; in igbvf_clean_rx_ring()
596 memset(rx_ring->buffer_info, 0, size); in igbvf_clean_rx_ring()
599 memset(rx_ring->desc, 0, rx_ring->size); in igbvf_clean_rx_ring()
601 rx_ring->next_to_clean = 0; in igbvf_clean_rx_ring()
602 rx_ring->next_to_use = 0; in igbvf_clean_rx_ring()
604 writel(0, adapter->hw.hw_addr + rx_ring->head); in igbvf_clean_rx_ring()
605 writel(0, adapter->hw.hw_addr + rx_ring->tail); in igbvf_clean_rx_ring()
609 * igbvf_free_rx_resources - Free Rx Resources
612 * Free all receive software resources
617 struct pci_dev *pdev = rx_ring->adapter->pdev; in igbvf_free_rx_resources()
621 vfree(rx_ring->buffer_info); in igbvf_free_rx_resources()
622 rx_ring->buffer_info = NULL; in igbvf_free_rx_resources()
624 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in igbvf_free_rx_resources()
625 rx_ring->dma); in igbvf_free_rx_resources()
626 rx_ring->desc = NULL; in igbvf_free_rx_resources()
630 * igbvf_update_itr - update the dynamic ITR value based on statistics
632 * @itr_setting: current adapter->itr
717 adapter->tx_ring->itr_range = in igbvf_set_itr()
719 adapter->tx_ring->itr_val, in igbvf_set_itr()
720 adapter->total_tx_packets, in igbvf_set_itr()
721 adapter->total_tx_bytes); in igbvf_set_itr()
724 if (adapter->requested_itr == 3 && in igbvf_set_itr()
725 adapter->tx_ring->itr_range == lowest_latency) in igbvf_set_itr()
726 adapter->tx_ring->itr_range = low_latency; in igbvf_set_itr()
728 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); in igbvf_set_itr()
730 if (new_itr != adapter->tx_ring->itr_val) { in igbvf_set_itr()
731 u32 current_itr = adapter->tx_ring->itr_val; in igbvf_set_itr()
739 adapter->tx_ring->itr_val = new_itr; in igbvf_set_itr()
741 adapter->tx_ring->set_itr = 1; in igbvf_set_itr()
744 adapter->rx_ring->itr_range = in igbvf_set_itr()
745 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, in igbvf_set_itr()
746 adapter->total_rx_packets, in igbvf_set_itr()
747 adapter->total_rx_bytes); in igbvf_set_itr()
748 if (adapter->requested_itr == 3 && in igbvf_set_itr()
749 adapter->rx_ring->itr_range == lowest_latency) in igbvf_set_itr()
750 adapter->rx_ring->itr_range = low_latency; in igbvf_set_itr()
752 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); in igbvf_set_itr()
754 if (new_itr != adapter->rx_ring->itr_val) { in igbvf_set_itr()
755 u32 current_itr = adapter->rx_ring->itr_val; in igbvf_set_itr()
760 adapter->rx_ring->itr_val = new_itr; in igbvf_set_itr()
762 adapter->rx_ring->set_itr = 1; in igbvf_set_itr()
767 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
774 struct igbvf_adapter *adapter = tx_ring->adapter; in igbvf_clean_tx_irq()
775 struct net_device *netdev = adapter->netdev; in igbvf_clean_tx_irq()
783 i = tx_ring->next_to_clean; in igbvf_clean_tx_irq()
784 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_irq()
785 eop_desc = buffer_info->next_to_watch; in igbvf_clean_tx_irq()
796 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) in igbvf_clean_tx_irq()
800 buffer_info->next_to_watch = NULL; in igbvf_clean_tx_irq()
805 skb = buffer_info->skb; in igbvf_clean_tx_irq()
811 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
813 bytecount = ((segs - 1) * skb_headlen(skb)) + in igbvf_clean_tx_irq()
814 skb->len; in igbvf_clean_tx_irq()
820 tx_desc->wb.status = 0; in igbvf_clean_tx_irq()
823 if (i == tx_ring->count) in igbvf_clean_tx_irq()
826 buffer_info = &tx_ring->buffer_info[i]; in igbvf_clean_tx_irq()
829 eop_desc = buffer_info->next_to_watch; in igbvf_clean_tx_irq()
830 } while (count < tx_ring->count); in igbvf_clean_tx_irq()
832 tx_ring->next_to_clean = i; in igbvf_clean_tx_irq()
841 !(test_bit(__IGBVF_DOWN, &adapter->state))) { in igbvf_clean_tx_irq()
843 ++adapter->restart_queue; in igbvf_clean_tx_irq()
847 netdev->stats.tx_bytes += total_bytes; in igbvf_clean_tx_irq()
848 netdev->stats.tx_packets += total_packets; in igbvf_clean_tx_irq()
849 return count < tx_ring->count; in igbvf_clean_tx_irq()
856 struct e1000_hw *hw = &adapter->hw; in igbvf_msix_other()
858 adapter->int_counter1++; in igbvf_msix_other()
860 hw->mac.get_link_status = 1; in igbvf_msix_other()
861 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_msix_other()
862 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_msix_other()
864 ew32(EIMS, adapter->eims_other); in igbvf_msix_other()
873 struct e1000_hw *hw = &adapter->hw; in igbvf_intr_msix_tx()
874 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_intr_msix_tx()
876 if (tx_ring->set_itr) { in igbvf_intr_msix_tx()
877 writel(tx_ring->itr_val, in igbvf_intr_msix_tx()
878 adapter->hw.hw_addr + tx_ring->itr_register); in igbvf_intr_msix_tx()
879 adapter->tx_ring->set_itr = 0; in igbvf_intr_msix_tx()
882 adapter->total_tx_bytes = 0; in igbvf_intr_msix_tx()
883 adapter->total_tx_packets = 0; in igbvf_intr_msix_tx()
885 /* auto mask will automatically re-enable the interrupt when we write in igbvf_intr_msix_tx()
890 ew32(EICS, tx_ring->eims_value); in igbvf_intr_msix_tx()
892 ew32(EIMS, tx_ring->eims_value); in igbvf_intr_msix_tx()
902 adapter->int_counter0++; in igbvf_intr_msix_rx()
907 if (adapter->rx_ring->set_itr) { in igbvf_intr_msix_rx()
908 writel(adapter->rx_ring->itr_val, in igbvf_intr_msix_rx()
909 adapter->hw.hw_addr + adapter->rx_ring->itr_register); in igbvf_intr_msix_rx()
910 adapter->rx_ring->set_itr = 0; in igbvf_intr_msix_rx()
913 if (napi_schedule_prep(&adapter->rx_ring->napi)) { in igbvf_intr_msix_rx()
914 adapter->total_rx_bytes = 0; in igbvf_intr_msix_rx()
915 adapter->total_rx_packets = 0; in igbvf_intr_msix_rx()
916 __napi_schedule(&adapter->rx_ring->napi); in igbvf_intr_msix_rx()
922 #define IGBVF_NO_QUEUE -1
927 struct e1000_hw *hw = &adapter->hw; in igbvf_assign_vector()
930 /* 82576 uses a table-based method for assigning vectors. in igbvf_assign_vector()
947 adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); in igbvf_assign_vector()
962 adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); in igbvf_assign_vector()
968 * igbvf_configure_msix - Configure MSI-X hardware
972 * generate MSI-X interrupts.
977 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_msix()
978 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_configure_msix()
979 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_configure_msix()
982 adapter->eims_enable_mask = 0; in igbvf_configure_msix()
985 adapter->eims_enable_mask |= tx_ring->eims_value; in igbvf_configure_msix()
986 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); in igbvf_configure_msix()
988 adapter->eims_enable_mask |= rx_ring->eims_value; in igbvf_configure_msix()
989 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); in igbvf_configure_msix()
997 adapter->eims_enable_mask = GENMASK(vector - 1, 0); in igbvf_configure_msix()
998 adapter->eims_other = BIT(vector - 1); in igbvf_configure_msix()
1004 if (adapter->msix_entries) { in igbvf_reset_interrupt_capability()
1005 pci_disable_msix(adapter->pdev); in igbvf_reset_interrupt_capability()
1006 kfree(adapter->msix_entries); in igbvf_reset_interrupt_capability()
1007 adapter->msix_entries = NULL; in igbvf_reset_interrupt_capability()
1012 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1020 int err = -ENOMEM; in igbvf_set_interrupt_capability()
1024 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), in igbvf_set_interrupt_capability()
1026 if (adapter->msix_entries) { in igbvf_set_interrupt_capability()
1028 adapter->msix_entries[i].entry = i; in igbvf_set_interrupt_capability()
1030 err = pci_enable_msix_range(adapter->pdev, in igbvf_set_interrupt_capability()
1031 adapter->msix_entries, 3, 3); in igbvf_set_interrupt_capability()
1035 /* MSI-X failed */ in igbvf_set_interrupt_capability()
1036 dev_err(&adapter->pdev->dev, in igbvf_set_interrupt_capability()
1037 "Failed to initialize MSI-X interrupts.\n"); in igbvf_set_interrupt_capability()
1043 * igbvf_request_msix - Initialize MSI-X interrupts
1046 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1051 struct net_device *netdev = adapter->netdev; in igbvf_request_msix()
1054 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { in igbvf_request_msix()
1055 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); in igbvf_request_msix()
1056 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); in igbvf_request_msix()
1058 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in igbvf_request_msix()
1059 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in igbvf_request_msix()
1062 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1063 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, in igbvf_request_msix()
1068 adapter->tx_ring->itr_register = E1000_EITR(vector); in igbvf_request_msix()
1069 adapter->tx_ring->itr_val = adapter->current_itr; in igbvf_request_msix()
1072 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1073 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, in igbvf_request_msix()
1078 adapter->rx_ring->itr_register = E1000_EITR(vector); in igbvf_request_msix()
1079 adapter->rx_ring->itr_val = adapter->current_itr; in igbvf_request_msix()
1082 err = request_irq(adapter->msix_entries[vector].vector, in igbvf_request_msix()
1083 igbvf_msix_other, 0, netdev->name, netdev); in igbvf_request_msix()
1090 free_irq(adapter->msix_entries[--vector].vector, netdev); in igbvf_request_msix()
1092 free_irq(adapter->msix_entries[--vector].vector, netdev); in igbvf_request_msix()
1098 * igbvf_alloc_queues - Allocate memory for all rings
1103 struct net_device *netdev = adapter->netdev; in igbvf_alloc_queues()
1105 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); in igbvf_alloc_queues()
1106 if (!adapter->tx_ring) in igbvf_alloc_queues()
1107 return -ENOMEM; in igbvf_alloc_queues()
1109 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); in igbvf_alloc_queues()
1110 if (!adapter->rx_ring) { in igbvf_alloc_queues()
1111 kfree(adapter->tx_ring); in igbvf_alloc_queues()
1112 return -ENOMEM; in igbvf_alloc_queues()
1115 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll); in igbvf_alloc_queues()
1121 * igbvf_request_irq - initialize interrupts
1129 int err = -1; in igbvf_request_irq()
1131 /* igbvf supports msi-x only */ in igbvf_request_irq()
1132 if (adapter->msix_entries) in igbvf_request_irq()
1138 dev_err(&adapter->pdev->dev, in igbvf_request_irq()
1146 struct net_device *netdev = adapter->netdev; in igbvf_free_irq()
1149 if (adapter->msix_entries) { in igbvf_free_irq()
1151 free_irq(adapter->msix_entries[vector].vector, netdev); in igbvf_free_irq()
1156 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1161 struct e1000_hw *hw = &adapter->hw; in igbvf_irq_disable()
1165 if (adapter->msix_entries) in igbvf_irq_disable()
1170 * igbvf_irq_enable - Enable default interrupt generation settings
1175 struct e1000_hw *hw = &adapter->hw; in igbvf_irq_enable()
1177 ew32(EIAC, adapter->eims_enable_mask); in igbvf_irq_enable()
1178 ew32(EIAM, adapter->eims_enable_mask); in igbvf_irq_enable()
1179 ew32(EIMS, adapter->eims_enable_mask); in igbvf_irq_enable()
1183 * igbvf_poll - NAPI Rx polling callback
1190 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_poll()
1191 struct e1000_hw *hw = &adapter->hw; in igbvf_poll()
1199 /* Exit the polling mode, but don't re-enable interrupts if stack might in igbvf_poll()
1200 * poll us due to busy-polling in igbvf_poll()
1203 if (adapter->requested_itr & 3) in igbvf_poll()
1206 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_poll()
1207 ew32(EIMS, adapter->rx_ring->eims_value); in igbvf_poll()
1214 * igbvf_set_rlpml - set receive large packet maximum length
1222 struct e1000_hw *hw = &adapter->hw; in igbvf_set_rlpml()
1224 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; in igbvf_set_rlpml()
1226 spin_lock_bh(&hw->mbx_lock); in igbvf_set_rlpml()
1230 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_rlpml()
1237 struct e1000_hw *hw = &adapter->hw; in igbvf_vlan_rx_add_vid()
1239 spin_lock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1241 if (hw->mac.ops.set_vfta(hw, vid, true)) { in igbvf_vlan_rx_add_vid()
1242 dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); in igbvf_vlan_rx_add_vid()
1243 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1244 return -EINVAL; in igbvf_vlan_rx_add_vid()
1247 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_add_vid()
1249 set_bit(vid, adapter->active_vlans); in igbvf_vlan_rx_add_vid()
1257 struct e1000_hw *hw = &adapter->hw; in igbvf_vlan_rx_kill_vid()
1259 spin_lock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1261 if (hw->mac.ops.set_vfta(hw, vid, false)) { in igbvf_vlan_rx_kill_vid()
1262 dev_err(&adapter->pdev->dev, in igbvf_vlan_rx_kill_vid()
1264 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1265 return -EINVAL; in igbvf_vlan_rx_kill_vid()
1268 spin_unlock_bh(&hw->mbx_lock); in igbvf_vlan_rx_kill_vid()
1270 clear_bit(vid, adapter->active_vlans); in igbvf_vlan_rx_kill_vid()
1278 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in igbvf_restore_vlan()
1279 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igbvf_restore_vlan()
1283 * igbvf_configure_tx - Configure Transmit Unit after Reset
1290 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_tx()
1291 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_configure_tx()
1302 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); in igbvf_configure_tx()
1303 tdba = tx_ring->dma; in igbvf_configure_tx()
1308 tx_ring->head = E1000_TDH(0); in igbvf_configure_tx()
1309 tx_ring->tail = E1000_TDT(0); in igbvf_configure_tx()
1311 /* Turn off Relaxed Ordering on head write-backs. The writebacks in igbvf_configure_tx()
1324 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; in igbvf_configure_tx()
1327 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; in igbvf_configure_tx()
1331 * igbvf_setup_srrctl - configure the receive control registers
1336 struct e1000_hw *hw = &adapter->hw; in igbvf_setup_srrctl()
1347 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> in igbvf_setup_srrctl()
1350 if (adapter->rx_buffer_len < 2048) { in igbvf_setup_srrctl()
1351 adapter->rx_ps_hdr_size = 0; in igbvf_setup_srrctl()
1354 adapter->rx_ps_hdr_size = 128; in igbvf_setup_srrctl()
1355 srrctl |= adapter->rx_ps_hdr_size << in igbvf_setup_srrctl()
1364 * igbvf_configure_rx - Configure Receive Unit after Reset
1371 struct e1000_hw *hw = &adapter->hw; in igbvf_configure_rx()
1372 struct igbvf_ring *rx_ring = adapter->rx_ring; in igbvf_configure_rx()
1385 rdba = rx_ring->dma; in igbvf_configure_rx()
1388 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); in igbvf_configure_rx()
1389 rx_ring->head = E1000_RDH(0); in igbvf_configure_rx()
1390 rx_ring->tail = E1000_RDT(0); in igbvf_configure_rx()
1407 * igbvf_set_multi - Multicast and Promiscuous mode set
1413 * promiscuous mode, and all-multi behavior.
1418 struct e1000_hw *hw = &adapter->hw; in igbvf_set_multi()
1433 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igbvf_set_multi()
1435 spin_lock_bh(&hw->mbx_lock); in igbvf_set_multi()
1437 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); in igbvf_set_multi()
1439 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_multi()
1444 * igbvf_set_uni - Configure unicast MAC filters
1453 struct e1000_hw *hw = &adapter->hw; in igbvf_set_uni()
1456 pr_err("Too many unicast filters - No Space\n"); in igbvf_set_uni()
1457 return -ENOSPC; in igbvf_set_uni()
1460 spin_lock_bh(&hw->mbx_lock); in igbvf_set_uni()
1463 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); in igbvf_set_uni()
1465 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_uni()
1472 spin_lock_bh(&hw->mbx_lock); in igbvf_set_uni()
1474 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, in igbvf_set_uni()
1475 ha->addr); in igbvf_set_uni()
1477 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_uni()
1492 * igbvf_configure - configure the hardware for Rx and Tx
1497 igbvf_set_rx_mode(adapter->netdev); in igbvf_configure()
1504 igbvf_alloc_rx_buffers(adapter->rx_ring, in igbvf_configure()
1505 igbvf_desc_unused(adapter->rx_ring)); in igbvf_configure()
1508 /* igbvf_reset - bring the hardware into a known good state
1512 * require a configuration cycle of the hardware - those cannot be
1518 struct e1000_mac_info *mac = &adapter->hw.mac; in igbvf_reset()
1519 struct net_device *netdev = adapter->netdev; in igbvf_reset()
1520 struct e1000_hw *hw = &adapter->hw; in igbvf_reset()
1522 spin_lock_bh(&hw->mbx_lock); in igbvf_reset()
1525 if (mac->ops.reset_hw(hw)) in igbvf_reset()
1526 dev_info(&adapter->pdev->dev, "PF still resetting\n"); in igbvf_reset()
1528 mac->ops.init_hw(hw); in igbvf_reset()
1530 spin_unlock_bh(&hw->mbx_lock); in igbvf_reset()
1532 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in igbvf_reset()
1533 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in igbvf_reset()
1534 memcpy(netdev->perm_addr, adapter->hw.mac.addr, in igbvf_reset()
1535 netdev->addr_len); in igbvf_reset()
1538 adapter->last_reset = jiffies; in igbvf_reset()
1543 struct e1000_hw *hw = &adapter->hw; in igbvf_up()
1548 clear_bit(__IGBVF_DOWN, &adapter->state); in igbvf_up()
1550 napi_enable(&adapter->rx_ring->napi); in igbvf_up()
1551 if (adapter->msix_entries) in igbvf_up()
1559 hw->mac.get_link_status = 1; in igbvf_up()
1560 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_up()
1567 struct net_device *netdev = adapter->netdev; in igbvf_down()
1568 struct e1000_hw *hw = &adapter->hw; in igbvf_down()
1574 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_down()
1591 napi_disable(&adapter->rx_ring->napi); in igbvf_down()
1595 del_timer_sync(&adapter->watchdog_timer); in igbvf_down()
1600 adapter->link_speed = 0; in igbvf_down()
1601 adapter->link_duplex = 0; in igbvf_down()
1604 igbvf_clean_tx_ring(adapter->tx_ring); in igbvf_down()
1605 igbvf_clean_rx_ring(adapter->rx_ring); in igbvf_down()
1611 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_reinit_locked()
1615 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_reinit_locked()
1619 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1628 struct net_device *netdev = adapter->netdev; in igbvf_sw_init()
1631 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; in igbvf_sw_init()
1632 adapter->rx_ps_hdr_size = 0; in igbvf_sw_init()
1633 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in igbvf_sw_init()
1634 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igbvf_sw_init()
1636 adapter->tx_int_delay = 8; in igbvf_sw_init()
1637 adapter->tx_abs_int_delay = 32; in igbvf_sw_init()
1638 adapter->rx_int_delay = 0; in igbvf_sw_init()
1639 adapter->rx_abs_int_delay = 8; in igbvf_sw_init()
1640 adapter->requested_itr = 3; in igbvf_sw_init()
1641 adapter->current_itr = IGBVF_START_ITR; in igbvf_sw_init()
1644 adapter->ei->init_ops(&adapter->hw); in igbvf_sw_init()
1646 rc = adapter->hw.mac.ops.init_params(&adapter->hw); in igbvf_sw_init()
1650 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); in igbvf_sw_init()
1657 return -ENOMEM; in igbvf_sw_init()
1659 spin_lock_init(&adapter->tx_queue_lock); in igbvf_sw_init()
1664 spin_lock_init(&adapter->stats_lock); in igbvf_sw_init()
1665 spin_lock_init(&adapter->hw.mbx_lock); in igbvf_sw_init()
1667 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_sw_init()
1673 struct e1000_hw *hw = &adapter->hw; in igbvf_initialize_last_counter_stats()
1675 adapter->stats.last_gprc = er32(VFGPRC); in igbvf_initialize_last_counter_stats()
1676 adapter->stats.last_gorc = er32(VFGORC); in igbvf_initialize_last_counter_stats()
1677 adapter->stats.last_gptc = er32(VFGPTC); in igbvf_initialize_last_counter_stats()
1678 adapter->stats.last_gotc = er32(VFGOTC); in igbvf_initialize_last_counter_stats()
1679 adapter->stats.last_mprc = er32(VFMPRC); in igbvf_initialize_last_counter_stats()
1680 adapter->stats.last_gotlbc = er32(VFGOTLBC); in igbvf_initialize_last_counter_stats()
1681 adapter->stats.last_gptlbc = er32(VFGPTLBC); in igbvf_initialize_last_counter_stats()
1682 adapter->stats.last_gorlbc = er32(VFGORLBC); in igbvf_initialize_last_counter_stats()
1683 adapter->stats.last_gprlbc = er32(VFGPRLBC); in igbvf_initialize_last_counter_stats()
1685 adapter->stats.base_gprc = er32(VFGPRC); in igbvf_initialize_last_counter_stats()
1686 adapter->stats.base_gorc = er32(VFGORC); in igbvf_initialize_last_counter_stats()
1687 adapter->stats.base_gptc = er32(VFGPTC); in igbvf_initialize_last_counter_stats()
1688 adapter->stats.base_gotc = er32(VFGOTC); in igbvf_initialize_last_counter_stats()
1689 adapter->stats.base_mprc = er32(VFMPRC); in igbvf_initialize_last_counter_stats()
1690 adapter->stats.base_gotlbc = er32(VFGOTLBC); in igbvf_initialize_last_counter_stats()
1691 adapter->stats.base_gptlbc = er32(VFGPTLBC); in igbvf_initialize_last_counter_stats()
1692 adapter->stats.base_gorlbc = er32(VFGORLBC); in igbvf_initialize_last_counter_stats()
1693 adapter->stats.base_gprlbc = er32(VFGPRLBC); in igbvf_initialize_last_counter_stats()
1697 * igbvf_open - Called when a network interface is made active
1711 struct e1000_hw *hw = &adapter->hw; in igbvf_open()
1715 if (test_bit(__IGBVF_TESTING, &adapter->state)) in igbvf_open()
1716 return -EBUSY; in igbvf_open()
1719 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); in igbvf_open()
1724 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); in igbvf_open()
1740 clear_bit(__IGBVF_DOWN, &adapter->state); in igbvf_open()
1742 napi_enable(&adapter->rx_ring->napi); in igbvf_open()
1750 hw->mac.get_link_status = 1; in igbvf_open()
1751 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igbvf_open()
1756 igbvf_free_rx_resources(adapter->rx_ring); in igbvf_open()
1758 igbvf_free_tx_resources(adapter->tx_ring); in igbvf_open()
1766 * igbvf_close - Disables a network interface
1771 * The close entry point is called when an interface is de-activated
1780 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); in igbvf_close()
1785 igbvf_free_tx_resources(adapter->tx_ring); in igbvf_close()
1786 igbvf_free_rx_resources(adapter->rx_ring); in igbvf_close()
1792 * igbvf_set_mac - Change the Ethernet Address of the NIC
1801 struct e1000_hw *hw = &adapter->hw; in igbvf_set_mac()
1804 if (!is_valid_ether_addr(addr->sa_data)) in igbvf_set_mac()
1805 return -EADDRNOTAVAIL; in igbvf_set_mac()
1807 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igbvf_set_mac()
1809 spin_lock_bh(&hw->mbx_lock); in igbvf_set_mac()
1811 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); in igbvf_set_mac()
1813 spin_unlock_bh(&hw->mbx_lock); in igbvf_set_mac()
1815 if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) in igbvf_set_mac()
1816 return -EADDRNOTAVAIL; in igbvf_set_mac()
1818 eth_hw_addr_set(netdev, addr->sa_data); in igbvf_set_mac()
1826 if (current_counter < adapter->stats.last_##name) \
1827 adapter->stats.name += 0x100000000LL; \
1828 adapter->stats.last_##name = current_counter; \
1829 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1830 adapter->stats.name |= current_counter; \
1834 * igbvf_update_stats - Update the board statistics counters
1839 struct e1000_hw *hw = &adapter->hw; in igbvf_update_stats()
1840 struct pci_dev *pdev = adapter->pdev; in igbvf_update_stats()
1845 if (adapter->link_speed == 0) in igbvf_update_stats()
1848 if (test_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_update_stats()
1865 adapter->netdev->stats.multicast = adapter->stats.mprc; in igbvf_update_stats()
1870 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", in igbvf_print_link_info()
1871 adapter->link_speed, in igbvf_print_link_info()
1872 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); in igbvf_print_link_info()
1877 struct e1000_hw *hw = &adapter->hw; in igbvf_has_link()
1882 if (test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_has_link()
1885 spin_lock_bh(&hw->mbx_lock); in igbvf_has_link()
1887 ret_val = hw->mac.ops.check_for_link(hw); in igbvf_has_link()
1889 spin_unlock_bh(&hw->mbx_lock); in igbvf_has_link()
1891 link_active = !hw->mac.get_link_status; in igbvf_has_link()
1894 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) in igbvf_has_link()
1895 schedule_work(&adapter->reset_task); in igbvf_has_link()
1901 * igbvf_watchdog - Timer Call-back
1909 schedule_work(&adapter->watchdog_task); in igbvf_watchdog()
1917 struct net_device *netdev = adapter->netdev; in igbvf_watchdog_task()
1918 struct e1000_mac_info *mac = &adapter->hw.mac; in igbvf_watchdog_task()
1919 struct igbvf_ring *tx_ring = adapter->tx_ring; in igbvf_watchdog_task()
1920 struct e1000_hw *hw = &adapter->hw; in igbvf_watchdog_task()
1928 mac->ops.get_link_up_info(&adapter->hw, in igbvf_watchdog_task()
1929 &adapter->link_speed, in igbvf_watchdog_task()
1930 &adapter->link_duplex); in igbvf_watchdog_task()
1938 adapter->link_speed = 0; in igbvf_watchdog_task()
1939 adapter->link_duplex = 0; in igbvf_watchdog_task()
1940 dev_info(&adapter->pdev->dev, "Link is Down\n"); in igbvf_watchdog_task()
1950 tx_ring->count); in igbvf_watchdog_task()
1957 adapter->tx_timeout_count++; in igbvf_watchdog_task()
1958 schedule_work(&adapter->reset_task); in igbvf_watchdog_task()
1963 ew32(EICS, adapter->rx_ring->eims_value); in igbvf_watchdog_task()
1966 if (!test_bit(__IGBVF_DOWN, &adapter->state)) in igbvf_watchdog_task()
1967 mod_timer(&adapter->watchdog_timer, in igbvf_watchdog_task()
1983 u16 i = tx_ring->next_to_use; in igbvf_tx_ctxtdesc()
1986 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_ctxtdesc()
1989 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igbvf_tx_ctxtdesc()
1994 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igbvf_tx_ctxtdesc()
1995 context_desc->seqnum_seed = 0; in igbvf_tx_ctxtdesc()
1996 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igbvf_tx_ctxtdesc()
1997 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igbvf_tx_ctxtdesc()
1999 buffer_info->time_stamp = jiffies; in igbvf_tx_ctxtdesc()
2000 buffer_info->dma = 0; in igbvf_tx_ctxtdesc()
2019 if (skb->ip_summed != CHECKSUM_PARTIAL) in igbvf_tso()
2036 if (ip.v4->version == 4) { in igbvf_tso()
2038 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igbvf_tso()
2043 ip.v4->check = csum_fold(csum_partial(trans_start, in igbvf_tso()
2044 csum_start - trans_start, in igbvf_tso()
2048 ip.v4->tot_len = 0; in igbvf_tso()
2050 ip.v6->payload_len = 0; in igbvf_tso()
2054 l4_offset = l4.hdr - skb->data; in igbvf_tso()
2057 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igbvf_tso()
2060 paylen = skb->len - l4_offset; in igbvf_tso()
2061 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in igbvf_tso()
2064 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; in igbvf_tso()
2065 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; in igbvf_tso()
2068 vlan_macip_lens = l4.hdr - ip.hdr; in igbvf_tso()
2069 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; in igbvf_tso()
2083 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igbvf_tx_csum()
2090 switch (skb->csum_offset) { in igbvf_tx_csum()
2108 vlan_macip_lens = skb_checksum_start_offset(skb) - in igbvf_tx_csum()
2123 if (igbvf_desc_unused(adapter->tx_ring) >= size) in igbvf_maybe_stop_tx()
2135 if (igbvf_desc_unused(adapter->tx_ring) < size) in igbvf_maybe_stop_tx()
2136 return -EBUSY; in igbvf_maybe_stop_tx()
2140 ++adapter->restart_queue; in igbvf_maybe_stop_tx()
2152 struct pci_dev *pdev = adapter->pdev; in igbvf_tx_map_adv()
2157 i = tx_ring->next_to_use; in igbvf_tx_map_adv()
2159 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2161 buffer_info->length = len; in igbvf_tx_map_adv()
2163 buffer_info->time_stamp = jiffies; in igbvf_tx_map_adv()
2164 buffer_info->mapped_as_page = false; in igbvf_tx_map_adv()
2165 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, in igbvf_tx_map_adv()
2167 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in igbvf_tx_map_adv()
2170 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in igbvf_tx_map_adv()
2175 if (i == tx_ring->count) in igbvf_tx_map_adv()
2178 frag = &skb_shinfo(skb)->frags[f]; in igbvf_tx_map_adv()
2181 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2183 buffer_info->length = len; in igbvf_tx_map_adv()
2184 buffer_info->time_stamp = jiffies; in igbvf_tx_map_adv()
2185 buffer_info->mapped_as_page = true; in igbvf_tx_map_adv()
2186 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, in igbvf_tx_map_adv()
2188 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in igbvf_tx_map_adv()
2192 tx_ring->buffer_info[i].skb = skb; in igbvf_tx_map_adv()
2197 dev_err(&pdev->dev, "TX DMA map failed\n"); in igbvf_tx_map_adv()
2200 buffer_info->dma = 0; in igbvf_tx_map_adv()
2201 buffer_info->time_stamp = 0; in igbvf_tx_map_adv()
2202 buffer_info->length = 0; in igbvf_tx_map_adv()
2203 buffer_info->mapped_as_page = false; in igbvf_tx_map_adv()
2205 count--; in igbvf_tx_map_adv()
2208 while (count--) { in igbvf_tx_map_adv()
2210 i += tx_ring->count; in igbvf_tx_map_adv()
2211 i--; in igbvf_tx_map_adv()
2212 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_map_adv()
2250 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); in igbvf_tx_queue_adv()
2252 i = tx_ring->next_to_use; in igbvf_tx_queue_adv()
2253 while (count--) { in igbvf_tx_queue_adv()
2254 buffer_info = &tx_ring->buffer_info[i]; in igbvf_tx_queue_adv()
2256 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in igbvf_tx_queue_adv()
2257 tx_desc->read.cmd_type_len = in igbvf_tx_queue_adv()
2258 cpu_to_le32(cmd_type_len | buffer_info->length); in igbvf_tx_queue_adv()
2259 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igbvf_tx_queue_adv()
2261 if (i == tx_ring->count) in igbvf_tx_queue_adv()
2265 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); in igbvf_tx_queue_adv()
2268 * applicable for weak-ordered memory model archs, in igbvf_tx_queue_adv()
2269 * such as IA-64). in igbvf_tx_queue_adv()
2273 tx_ring->buffer_info[first].next_to_watch = tx_desc; in igbvf_tx_queue_adv()
2274 tx_ring->next_to_use = i; in igbvf_tx_queue_adv()
2275 writel(i, adapter->hw.hw_addr + tx_ring->tail); in igbvf_tx_queue_adv()
2289 if (test_bit(__IGBVF_DOWN, &adapter->state)) { in igbvf_xmit_frame_ring_adv()
2294 if (skb->len <= 0) { in igbvf_xmit_frame_ring_adv()
2301 * + 1 desc for skb->data, in igbvf_xmit_frame_ring_adv()
2305 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { in igbvf_xmit_frame_ring_adv()
2319 first = tx_ring->next_to_use; in igbvf_xmit_frame_ring_adv()
2330 (skb->ip_summed == CHECKSUM_PARTIAL)) in igbvf_xmit_frame_ring_adv()
2340 first, skb->len, hdr_len); in igbvf_xmit_frame_ring_adv()
2345 tx_ring->buffer_info[first].time_stamp = 0; in igbvf_xmit_frame_ring_adv()
2346 tx_ring->next_to_use = first; in igbvf_xmit_frame_ring_adv()
2358 if (test_bit(__IGBVF_DOWN, &adapter->state)) { in igbvf_xmit_frame()
2363 tx_ring = &adapter->tx_ring[0]; in igbvf_xmit_frame()
2369 * igbvf_tx_timeout - Respond to a Tx Hang
2378 adapter->tx_timeout_count++; in igbvf_tx_timeout()
2379 schedule_work(&adapter->reset_task); in igbvf_tx_timeout()
2392 * igbvf_change_mtu - Change the Maximum Transfer Unit
2403 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_change_mtu()
2406 adapter->max_frame_size = max_frame; in igbvf_change_mtu()
2413 * i.e. RXBUFFER_2048 --> size-4096 slab in igbvf_change_mtu()
2419 adapter->rx_buffer_len = 1024; in igbvf_change_mtu()
2421 adapter->rx_buffer_len = 2048; in igbvf_change_mtu()
2424 adapter->rx_buffer_len = 16384; in igbvf_change_mtu()
2426 adapter->rx_buffer_len = PAGE_SIZE / 2; in igbvf_change_mtu()
2432 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + in igbvf_change_mtu()
2436 netdev->mtu, new_mtu); in igbvf_change_mtu()
2437 WRITE_ONCE(netdev->mtu, new_mtu); in igbvf_change_mtu()
2444 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_change_mtu()
2453 return -EOPNOTSUPP; in igbvf_ioctl()
2465 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); in igbvf_suspend()
2500 igbvf_suspend(&pdev->dev); in igbvf_shutdown()
2504 /* Polling 'interrupt' - used by things like netconsole to send skbs
2505 * without having to re-enable interrupts. It's not called while
2512 disable_irq(adapter->pdev->irq); in igbvf_netpoll()
2514 igbvf_clean_tx_irq(adapter->tx_ring); in igbvf_netpoll()
2516 enable_irq(adapter->pdev->irq); in igbvf_netpoll()
2521 * igbvf_io_error_detected - called when PCI error is detected
2548 * igbvf_io_slot_reset - called after the pci bus has been reset.
2551 * Restart the card from scratch, as if from a cold-boot. Implementation
2552 * resembles the first-half of the igbvf_resume routine.
2560 dev_err(&pdev->dev, in igbvf_io_slot_reset()
2561 "Cannot re-enable PCI device after reset.\n"); in igbvf_io_slot_reset()
2572 * igbvf_io_resume - called when traffic can start flowing again.
2577 * second-half of the igbvf_resume routine.
2586 dev_err(&pdev->dev, in igbvf_io_resume()
2596 * igbvf_io_prepare - prepare device driver for PCI reset
2604 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) in igbvf_io_prepare()
2610 * igbvf_io_reset_done - PCI reset done, device driver reset can begin
2619 clear_bit(__IGBVF_RESETTING, &adapter->state); in igbvf_io_reset_done()
2624 struct e1000_hw *hw = &adapter->hw; in igbvf_print_device_info()
2625 struct net_device *netdev = adapter->netdev; in igbvf_print_device_info()
2626 struct pci_dev *pdev = adapter->pdev; in igbvf_print_device_info()
2628 if (hw->mac.type == e1000_vfadapt_i350) in igbvf_print_device_info()
2629 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); in igbvf_print_device_info()
2631 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); in igbvf_print_device_info()
2632 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); in igbvf_print_device_info()
2641 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; in igbvf_set_features()
2643 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; in igbvf_set_features()
2666 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igbvf_features_check()
2676 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igbvf_features_check()
2701 * igbvf_probe - Device Initialization Routine
2716 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; in igbvf_probe()
2724 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igbvf_probe()
2726 dev_err(&pdev->dev, in igbvf_probe()
2737 err = -ENOMEM; in igbvf_probe()
2742 SET_NETDEV_DEV(netdev, &pdev->dev); in igbvf_probe()
2746 hw = &adapter->hw; in igbvf_probe()
2747 adapter->netdev = netdev; in igbvf_probe()
2748 adapter->pdev = pdev; in igbvf_probe()
2749 adapter->ei = ei; in igbvf_probe()
2750 adapter->pba = ei->pba; in igbvf_probe()
2751 adapter->flags = ei->flags; in igbvf_probe()
2752 adapter->hw.back = adapter; in igbvf_probe()
2753 adapter->hw.mac.type = ei->mac; in igbvf_probe()
2754 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igbvf_probe()
2758 hw->vendor_id = pdev->vendor; in igbvf_probe()
2759 hw->device_id = pdev->device; in igbvf_probe()
2760 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igbvf_probe()
2761 hw->subsystem_device_id = pdev->subsystem_device; in igbvf_probe()
2762 hw->revision_id = pdev->revision; in igbvf_probe()
2764 err = -EIO; in igbvf_probe()
2765 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), in igbvf_probe()
2768 if (!adapter->hw.hw_addr) in igbvf_probe()
2771 if (ei->get_variants) { in igbvf_probe()
2772 err = ei->get_variants(adapter); in igbvf_probe()
2783 netdev->netdev_ops = &igbvf_netdev_ops; in igbvf_probe()
2786 netdev->watchdog_timeo = 5 * HZ; in igbvf_probe()
2787 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in igbvf_probe()
2789 adapter->bd_number = cards_found++; in igbvf_probe()
2791 netdev->hw_features = NETIF_F_SG | in igbvf_probe()
2805 netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; in igbvf_probe()
2806 netdev->hw_features |= NETIF_F_GSO_PARTIAL | in igbvf_probe()
2809 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in igbvf_probe()
2811 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igbvf_probe()
2812 netdev->mpls_features |= NETIF_F_HW_CSUM; in igbvf_probe()
2813 netdev->hw_enc_features |= netdev->vlan_features; in igbvf_probe()
2816 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in igbvf_probe()
2820 /* MTU range: 68 - 9216 */ in igbvf_probe()
2821 netdev->min_mtu = ETH_MIN_MTU; in igbvf_probe()
2822 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igbvf_probe()
2824 spin_lock_bh(&hw->mbx_lock); in igbvf_probe()
2827 err = hw->mac.ops.reset_hw(hw); in igbvf_probe()
2829 dev_info(&pdev->dev, in igbvf_probe()
2832 err = hw->mac.ops.read_mac_addr(hw); in igbvf_probe()
2834 dev_info(&pdev->dev, "Error reading MAC address.\n"); in igbvf_probe()
2835 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in igbvf_probe()
2836 dev_info(&pdev->dev, in igbvf_probe()
2838 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in igbvf_probe()
2841 spin_unlock_bh(&hw->mbx_lock); in igbvf_probe()
2843 if (!is_valid_ether_addr(netdev->dev_addr)) { in igbvf_probe()
2844 dev_info(&pdev->dev, "Assigning random MAC address.\n"); in igbvf_probe()
2846 memcpy(adapter->hw.mac.addr, netdev->dev_addr, in igbvf_probe()
2847 netdev->addr_len); in igbvf_probe()
2850 timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0); in igbvf_probe()
2852 INIT_WORK(&adapter->reset_task, igbvf_reset_task); in igbvf_probe()
2853 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); in igbvf_probe()
2856 adapter->rx_ring->count = 1024; in igbvf_probe()
2857 adapter->tx_ring->count = 1024; in igbvf_probe()
2862 /* set hardware-specific flags */ in igbvf_probe()
2863 if (adapter->hw.mac.type == e1000_vfadapt_i350) in igbvf_probe()
2864 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; in igbvf_probe()
2866 strcpy(netdev->name, "eth%d"); in igbvf_probe()
2882 netif_napi_del(&adapter->rx_ring->napi); in igbvf_probe()
2883 kfree(adapter->tx_ring); in igbvf_probe()
2884 kfree(adapter->rx_ring); in igbvf_probe()
2888 iounmap(adapter->hw.hw_addr); in igbvf_probe()
2900 * igbvf_remove - Device Removal Routine
2905 * Hot-Plug event, or because the driver is going to be removed from
2912 struct e1000_hw *hw = &adapter->hw; in igbvf_remove()
2917 set_bit(__IGBVF_DOWN, &adapter->state); in igbvf_remove()
2918 del_timer_sync(&adapter->watchdog_timer); in igbvf_remove()
2920 cancel_work_sync(&adapter->reset_task); in igbvf_remove()
2921 cancel_work_sync(&adapter->watchdog_task); in igbvf_remove()
2930 netif_napi_del(&adapter->rx_ring->napi); in igbvf_remove()
2931 kfree(adapter->tx_ring); in igbvf_remove()
2932 kfree(adapter->rx_ring); in igbvf_remove()
2934 iounmap(hw->hw_addr); in igbvf_remove()
2935 if (hw->flash_address) in igbvf_remove()
2936 iounmap(hw->flash_address); in igbvf_remove()
2974 * igbvf_init_module - Driver Registration Routine
2993 * igbvf_exit_module - Driver Exit Cleanup Routine