Lines Matching +full:pch +full:- +full:msi +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
36 static int debug = -1;
112 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
127 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) in __ew32_prepare()
133 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in __ew32()
136 writel(val, hw->hw_addr + reg); in __ew32()
140 * e1000_regdump - register printout routine
150 switch (reginfo->ofs) { in e1000_regdump()
164 pr_info("%-15s %08x\n", in e1000_regdump()
165 reginfo->name, __er32(hw, reginfo->ofs)); in e1000_regdump()
169 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
170 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
179 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
180 ps_page = &bi->ps_pages[i]; in e1000e_dump_ps_pages()
182 if (ps_page->page) { in e1000e_dump_ps_pages()
185 16, 1, page_address(ps_page->page), in e1000e_dump_ps_pages()
192 * e1000e_dump - Print registers, Tx-ring and Rx-ring
197 struct net_device *netdev = adapter->netdev; in e1000e_dump()
198 struct e1000_hw *hw = &adapter->hw; in e1000e_dump()
200 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump()
207 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump()
224 dev_info(&adapter->pdev->dev, "Net device Info\n"); in e1000e_dump()
226 pr_info("%-15s %016lX %016lX\n", netdev->name, in e1000e_dump()
227 netdev->state, dev_trans_start(netdev)); in e1000e_dump()
231 dev_info(&adapter->pdev->dev, "Register Dump\n"); in e1000e_dump()
234 reginfo->name; reginfo++) { in e1000e_dump()
242 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); in e1000e_dump()
243 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in e1000e_dump()
244 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
246 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
247 (unsigned long long)buffer_info->dma, in e1000e_dump()
248 buffer_info->length, in e1000e_dump()
249 buffer_info->next_to_watch, in e1000e_dump()
250 (unsigned long long)buffer_info->time_stamp); in e1000e_dump()
256 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); in e1000e_dump()
258 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
261 * +--------------------------------------------------------------+ in e1000e_dump()
263 * +--------------------------------------------------------------+ in e1000e_dump()
265 * +--------------------------------------------------------------+ in e1000e_dump()
270 * +----------------------------------------------------------------+ in e1000e_dump()
272 * +----------------------------------------------------------------+ in e1000e_dump()
274 * +----------------------------------------------------------------+ in e1000e_dump()
278 * +----------------------------------------------------------------+ in e1000e_dump()
280 * +----------------------------------------------------------------+ in e1000e_dump()
282 * +----------------------------------------------------------------+ in e1000e_dump()
285 …Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
286 …Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
287 …Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
288 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
291 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
293 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
295 else if (i == tx_ring->next_to_use) in e1000e_dump()
297 else if (i == tx_ring->next_to_clean) in e1000e_dump()
302 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : in e1000e_dump()
303 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), in e1000e_dump()
305 (unsigned long long)le64_to_cpu(u0->a), in e1000e_dump()
306 (unsigned long long)le64_to_cpu(u0->b), in e1000e_dump()
307 (unsigned long long)buffer_info->dma, in e1000e_dump()
308 buffer_info->length, buffer_info->next_to_watch, in e1000e_dump()
309 (unsigned long long)buffer_info->time_stamp, in e1000e_dump()
310 buffer_info->skb, next_desc); in e1000e_dump()
312 if (netif_msg_pktdata(adapter) && buffer_info->skb) in e1000e_dump()
314 16, 1, buffer_info->skb->data, in e1000e_dump()
315 buffer_info->skb->len, true); in e1000e_dump()
320 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); in e1000e_dump()
323 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
329 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); in e1000e_dump()
330 switch (adapter->rx_ps_pages) { in e1000e_dump()
331 case 1: in e1000e_dump()
336 * +-----------------------------------------------------+ in e1000e_dump()
338 * +-----------------------------------------------------+ in e1000e_dump()
339 * 8 | Buffer Address 1 [63:0] | in e1000e_dump()
340 * +-----------------------------------------------------+ in e1000e_dump()
342 * +-----------------------------------------------------+ in e1000e_dump()
344 * +-----------------------------------------------------+ in e1000e_dump()
346 …R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma … in e1000e_dump()
347 /* [Extended] Receive Descriptor (Write-Back) Format in e1000e_dump()
350 * +------------------------------------------------------+ in e1000e_dump()
353 * +------------------------------------------------------+ in e1000e_dump()
355 * +------------------------------------------------------+ in e1000e_dump()
358 …h] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write- in e1000e_dump()
359 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
361 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
365 le32_to_cpu(rx_desc_ps->wb.middle.status_error); in e1000e_dump()
367 if (i == rx_ring->next_to_use) in e1000e_dump()
369 else if (i == rx_ring->next_to_clean) in e1000e_dump()
376 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
378 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
379 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
380 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
381 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
382 buffer_info->skb, next_desc); in e1000e_dump()
386 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
387 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
388 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
389 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
390 (unsigned long long)buffer_info->dma, in e1000e_dump()
391 buffer_info->skb, next_desc); in e1000e_dump()
403 * +-----------------------------------------------------+ in e1000e_dump()
405 * +-----------------------------------------------------+ in e1000e_dump()
407 * +-----------------------------------------------------+ in e1000e_dump()
409 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
410 /* Extended Receive Descriptor (Write-Back) Format in e1000e_dump()
413 * +------------------------------------------------------+ in e1000e_dump()
415 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
418 * +------------------------------------------------------+ in e1000e_dump()
420 * +------------------------------------------------------+ in e1000e_dump()
423 …pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"… in e1000e_dump()
425 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
428 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
431 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000e_dump()
433 if (i == rx_ring->next_to_use) in e1000e_dump()
435 else if (i == rx_ring->next_to_clean) in e1000e_dump()
442 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
444 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
445 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
446 buffer_info->skb, next_desc); in e1000e_dump()
450 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
451 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
452 (unsigned long long)buffer_info->dma, in e1000e_dump()
453 buffer_info->skb, next_desc); in e1000e_dump()
456 buffer_info->skb) in e1000e_dump()
459 1, in e1000e_dump()
460 buffer_info->skb->data, in e1000e_dump()
461 adapter->rx_buffer_len, in e1000e_dump()
469 * e1000_desc_unused - calculate if we have unused descriptors
474 if (ring->next_to_clean > ring->next_to_use) in e1000_desc_unused()
475 return ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
477 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
481 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
501 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
502 ns = timecounter_cyc2time(&adapter->tc, systim); in e1000e_systim_to_hwtstamp()
503 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
506 hwtstamps->hwtstamp = ns_to_ktime(ns); in e1000e_systim_to_hwtstamp()
510 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
522 struct e1000_hw *hw = &adapter->hw; in e1000e_rx_hwtstamp()
525 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || in e1000e_rx_hwtstamp()
541 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; in e1000e_rx_hwtstamp()
545 * e1000_receive_skb - helper function to handle Rx indications
560 skb->protocol = eth_type_trans(skb, netdev); in e1000_receive_skb()
565 napi_gro_receive(&adapter->napi, skb); in e1000_receive_skb()
569 * e1000_rx_checksum - Receive Checksum Offload
583 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) in e1000_rx_checksum()
593 adapter->hw_csum_err++; in e1000_rx_checksum()
602 skb->ip_summed = CHECKSUM_UNNECESSARY; in e1000_rx_checksum()
603 adapter->hw_csum_good++; in e1000_rx_checksum()
608 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_update_rdt_wa()
609 struct e1000_hw *hw = &adapter->hw; in e1000e_update_rdt_wa()
612 writel(i, rx_ring->tail); in e1000e_update_rdt_wa()
614 if (unlikely(i != readl(rx_ring->tail))) { in e1000e_update_rdt_wa()
618 e_err("ME firmware caused invalid RDT - resetting\n"); in e1000e_update_rdt_wa()
619 schedule_work(&adapter->reset_task); in e1000e_update_rdt_wa()
625 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_update_tdt_wa()
626 struct e1000_hw *hw = &adapter->hw; in e1000e_update_tdt_wa()
629 writel(i, tx_ring->tail); in e1000e_update_tdt_wa()
631 if (unlikely(i != readl(tx_ring->tail))) { in e1000e_update_tdt_wa()
635 e_err("ME firmware caused invalid TDT - resetting\n"); in e1000e_update_tdt_wa()
636 schedule_work(&adapter->reset_task); in e1000e_update_tdt_wa()
641 * e1000_alloc_rx_buffers - Replace used receive buffers
649 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers()
650 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers()
651 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers()
656 unsigned int bufsz = adapter->rx_buffer_len; in e1000_alloc_rx_buffers()
658 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers()
659 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
661 while (cleaned_count--) { in e1000_alloc_rx_buffers()
662 skb = buffer_info->skb; in e1000_alloc_rx_buffers()
671 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers()
675 buffer_info->skb = skb; in e1000_alloc_rx_buffers()
677 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers()
678 adapter->rx_buffer_len, in e1000_alloc_rx_buffers()
680 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers()
681 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers()
682 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers()
687 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers()
689 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers()
692 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers()
693 * such as IA-64). in e1000_alloc_rx_buffers()
696 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers()
699 writel(i, rx_ring->tail); in e1000_alloc_rx_buffers()
702 if (i == rx_ring->count) in e1000_alloc_rx_buffers()
704 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
707 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers()
711 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
719 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers_ps()
720 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers_ps()
721 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers_ps()
728 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers_ps()
729 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
731 while (cleaned_count--) { in e1000_alloc_rx_buffers_ps()
735 ps_page = &buffer_info->ps_pages[j]; in e1000_alloc_rx_buffers_ps()
736 if (j >= adapter->rx_ps_pages) { in e1000_alloc_rx_buffers_ps()
738 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
742 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
743 ps_page->page = alloc_page(gfp); in e1000_alloc_rx_buffers_ps()
744 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
745 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
748 ps_page->dma = dma_map_page(&pdev->dev, in e1000_alloc_rx_buffers_ps()
749 ps_page->page, in e1000_alloc_rx_buffers_ps()
752 if (dma_mapping_error(&pdev->dev, in e1000_alloc_rx_buffers_ps()
753 ps_page->dma)) { in e1000_alloc_rx_buffers_ps()
754 dev_err(&adapter->pdev->dev, in e1000_alloc_rx_buffers_ps()
756 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
761 * didn't change because each write-back in e1000_alloc_rx_buffers_ps()
764 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
765 cpu_to_le64(ps_page->dma); in e1000_alloc_rx_buffers_ps()
768 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
772 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
776 buffer_info->skb = skb; in e1000_alloc_rx_buffers_ps()
777 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers_ps()
778 adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
780 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers_ps()
781 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers_ps()
782 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
785 buffer_info->skb = NULL; in e1000_alloc_rx_buffers_ps()
789 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
791 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers_ps()
794 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers_ps()
795 * such as IA-64). in e1000_alloc_rx_buffers_ps()
798 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers_ps()
799 e1000e_update_rdt_wa(rx_ring, i << 1); in e1000_alloc_rx_buffers_ps()
801 writel(i << 1, rx_ring->tail); in e1000_alloc_rx_buffers_ps()
805 if (i == rx_ring->count) in e1000_alloc_rx_buffers_ps()
807 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
811 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers_ps()
815 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
824 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_jumbo_rx_buffers()
825 struct net_device *netdev = adapter->netdev; in e1000_alloc_jumbo_rx_buffers()
826 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_jumbo_rx_buffers()
831 unsigned int bufsz = 256 - 16; /* for skb_reserve */ in e1000_alloc_jumbo_rx_buffers()
833 i = rx_ring->next_to_use; in e1000_alloc_jumbo_rx_buffers()
834 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
836 while (cleaned_count--) { in e1000_alloc_jumbo_rx_buffers()
837 skb = buffer_info->skb; in e1000_alloc_jumbo_rx_buffers()
846 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
850 buffer_info->skb = skb; in e1000_alloc_jumbo_rx_buffers()
853 if (!buffer_info->page) { in e1000_alloc_jumbo_rx_buffers()
854 buffer_info->page = alloc_page(gfp); in e1000_alloc_jumbo_rx_buffers()
855 if (unlikely(!buffer_info->page)) { in e1000_alloc_jumbo_rx_buffers()
856 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
861 if (!buffer_info->dma) { in e1000_alloc_jumbo_rx_buffers()
862 buffer_info->dma = dma_map_page(&pdev->dev, in e1000_alloc_jumbo_rx_buffers()
863 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
866 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_jumbo_rx_buffers()
867 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
873 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_jumbo_rx_buffers()
875 if (unlikely(++i == rx_ring->count)) in e1000_alloc_jumbo_rx_buffers()
877 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
880 if (likely(rx_ring->next_to_use != i)) { in e1000_alloc_jumbo_rx_buffers()
881 rx_ring->next_to_use = i; in e1000_alloc_jumbo_rx_buffers()
882 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
883 i = (rx_ring->count - 1); in e1000_alloc_jumbo_rx_buffers()
887 * applicable for weak-ordered memory model archs, in e1000_alloc_jumbo_rx_buffers()
888 * such as IA-64). in e1000_alloc_jumbo_rx_buffers()
891 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_jumbo_rx_buffers()
894 writel(i, rx_ring->tail); in e1000_alloc_jumbo_rx_buffers()
901 if (netdev->features & NETIF_F_RXHASH) in e1000_rx_hash()
906 * e1000_clean_rx_irq - Send received data up the network stack
917 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq()
918 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq()
919 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq()
920 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq()
929 i = rx_ring->next_to_clean; in e1000_clean_rx_irq()
931 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
932 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
942 skb = buffer_info->skb; in e1000_clean_rx_irq()
943 buffer_info->skb = NULL; in e1000_clean_rx_irq()
945 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq()
948 if (i == rx_ring->count) in e1000_clean_rx_irq()
953 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
957 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq()
958 adapter->rx_buffer_len, DMA_FROM_DEVICE); in e1000_clean_rx_irq()
959 buffer_info->dma = 0; in e1000_clean_rx_irq()
961 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_rx_irq()
970 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
972 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq()
976 buffer_info->skb = skb; in e1000_clean_rx_irq()
978 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
983 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq()
985 buffer_info->skb = skb; in e1000_clean_rx_irq()
990 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq()
995 if (netdev->features & NETIF_F_RXFCS) in e1000_clean_rx_irq()
996 total_rx_bytes -= 4; in e1000_clean_rx_irq()
998 length -= 4; in e1000_clean_rx_irq()
1010 napi_alloc_skb(&adapter->napi, length); in e1000_clean_rx_irq()
1013 -NET_IP_ALIGN, in e1000_clean_rx_irq()
1014 (skb->data - in e1000_clean_rx_irq()
1019 buffer_info->skb = skb; in e1000_clean_rx_irq()
1030 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq()
1033 rx_desc->wb.upper.vlan); in e1000_clean_rx_irq()
1036 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1040 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq()
1049 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
1051 rx_ring->next_to_clean = i; in e1000_clean_rx_irq()
1055 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq()
1057 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq()
1058 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq()
1066 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_put_txbuf()
1068 if (buffer_info->dma) { in e1000_put_txbuf()
1069 if (buffer_info->mapped_as_page) in e1000_put_txbuf()
1070 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1071 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1073 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1074 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1075 buffer_info->dma = 0; in e1000_put_txbuf()
1077 if (buffer_info->skb) { in e1000_put_txbuf()
1079 dev_kfree_skb_any(buffer_info->skb); in e1000_put_txbuf()
1081 dev_consume_skb_any(buffer_info->skb); in e1000_put_txbuf()
1082 buffer_info->skb = NULL; in e1000_put_txbuf()
1084 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1092 struct net_device *netdev = adapter->netdev; in e1000_print_hw_hang()
1093 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_print_hw_hang()
1094 unsigned int i = tx_ring->next_to_clean; in e1000_print_hw_hang()
1095 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; in e1000_print_hw_hang()
1097 struct e1000_hw *hw = &adapter->hw; in e1000_print_hw_hang()
1101 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_print_hw_hang()
1104 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { in e1000_print_hw_hang()
1105 /* May be block on write-back, flush and detect again in e1000_print_hw_hang()
1108 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1114 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1117 adapter->tx_hang_recheck = true; in e1000_print_hw_hang()
1120 adapter->tx_hang_recheck = false; in e1000_print_hw_hang()
1134 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); in e1000_print_hw_hang()
1149 "PHY 1000BASE-T Status <%x>\n" in e1000_print_hw_hang()
1152 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, in e1000_print_hw_hang()
1153 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, in e1000_print_hw_hang()
1154 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), in e1000_print_hw_hang()
1160 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) in e1000_print_hw_hang()
1165 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1176 struct e1000_hw *hw = &adapter->hw; in e1000e_tx_hwtstamp_work()
1179 struct sk_buff *skb = adapter->tx_hwtstamp_skb; in e1000e_tx_hwtstamp_work()
1191 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1196 } else if (time_after(jiffies, adapter->tx_hwtstamp_start in e1000e_tx_hwtstamp_work()
1197 + adapter->tx_timeout_factor * HZ)) { in e1000e_tx_hwtstamp_work()
1198 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); in e1000e_tx_hwtstamp_work()
1199 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1200 adapter->tx_hwtstamp_timeouts++; in e1000e_tx_hwtstamp_work()
1204 schedule_work(&adapter->tx_hwtstamp_work); in e1000e_tx_hwtstamp_work()
1209 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1217 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_irq()
1218 struct net_device *netdev = adapter->netdev; in e1000_clean_tx_irq()
1219 struct e1000_hw *hw = &adapter->hw; in e1000_clean_tx_irq()
1227 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
1228 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1231 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && in e1000_clean_tx_irq()
1232 (count < tx_ring->count)) { in e1000_clean_tx_irq()
1238 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
1242 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
1243 total_tx_bytes += buffer_info->bytecount; in e1000_clean_tx_irq()
1244 if (buffer_info->skb) { in e1000_clean_tx_irq()
1245 bytes_compl += buffer_info->skb->len; in e1000_clean_tx_irq()
1251 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1254 if (i == tx_ring->count) in e1000_clean_tx_irq()
1258 if (i == tx_ring->next_to_use) in e1000_clean_tx_irq()
1260 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1264 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
1277 !(test_bit(__E1000_DOWN, &adapter->state))) { in e1000_clean_tx_irq()
1279 ++adapter->restart_queue; in e1000_clean_tx_irq()
1283 if (adapter->detect_tx_hung) { in e1000_clean_tx_irq()
1287 adapter->detect_tx_hung = false; in e1000_clean_tx_irq()
1288 if (tx_ring->buffer_info[i].time_stamp && in e1000_clean_tx_irq()
1289 time_after(jiffies, tx_ring->buffer_info[i].time_stamp in e1000_clean_tx_irq()
1290 + (adapter->tx_timeout_factor * HZ)) && in e1000_clean_tx_irq()
1292 schedule_work(&adapter->print_hang_task); in e1000_clean_tx_irq()
1294 adapter->tx_hang_recheck = false; in e1000_clean_tx_irq()
1296 adapter->total_tx_bytes += total_tx_bytes; in e1000_clean_tx_irq()
1297 adapter->total_tx_packets += total_tx_packets; in e1000_clean_tx_irq()
1298 return count < tx_ring->count; in e1000_clean_tx_irq()
1302 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1313 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq_ps()
1314 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq_ps()
1316 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq_ps()
1317 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq_ps()
1327 i = rx_ring->next_to_clean; in e1000_clean_rx_irq_ps()
1329 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1330 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1336 skb = buffer_info->skb; in e1000_clean_rx_irq_ps()
1340 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq_ps()
1343 if (i == rx_ring->count) in e1000_clean_rx_irq_ps()
1348 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1352 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq_ps()
1353 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); in e1000_clean_rx_irq_ps()
1354 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1358 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1360 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq_ps()
1364 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1369 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq_ps()
1374 length = le16_to_cpu(rx_desc->wb.middle.length0); in e1000_clean_rx_irq_ps()
1389 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1396 ((length + l1) <= adapter->rx_ps_bsize0)) { in e1000_clean_rx_irq_ps()
1397 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1399 dma_sync_single_for_cpu(&pdev->dev, in e1000_clean_rx_irq_ps()
1400 ps_page->dma, in e1000_clean_rx_irq_ps()
1404 page_address(ps_page->page), l1); in e1000_clean_rx_irq_ps()
1405 dma_sync_single_for_device(&pdev->dev, in e1000_clean_rx_irq_ps()
1406 ps_page->dma, in e1000_clean_rx_irq_ps()
1411 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1412 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1413 l1 -= 4; in e1000_clean_rx_irq_ps()
1422 length = le16_to_cpu(rx_desc->wb.upper.length[j]); in e1000_clean_rx_irq_ps()
1426 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_irq_ps()
1427 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_irq_ps()
1429 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1430 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1431 ps_page->page = NULL; in e1000_clean_rx_irq_ps()
1432 skb->len += length; in e1000_clean_rx_irq_ps()
1433 skb->data_len += length; in e1000_clean_rx_irq_ps()
1434 skb->truesize += PAGE_SIZE; in e1000_clean_rx_irq_ps()
1440 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1441 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1442 pskb_trim(skb, skb->len - 4); in e1000_clean_rx_irq_ps()
1446 total_rx_bytes += skb->len; in e1000_clean_rx_irq_ps()
1451 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq_ps()
1453 if (rx_desc->wb.upper.header_status & in e1000_clean_rx_irq_ps()
1455 adapter->rx_hdr_split++; in e1000_clean_rx_irq_ps()
1458 rx_desc->wb.middle.vlan); in e1000_clean_rx_irq_ps()
1461 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1462 buffer_info->skb = NULL; in e1000_clean_rx_irq_ps()
1466 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq_ps()
1475 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1477 rx_ring->next_to_clean = i; in e1000_clean_rx_irq_ps()
1481 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq_ps()
1483 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq_ps()
1484 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq_ps()
1491 bi->page = NULL; in e1000_consume_page()
1492 skb->len += length; in e1000_consume_page()
1493 skb->data_len += length; in e1000_consume_page()
1494 skb->truesize += PAGE_SIZE; in e1000_consume_page()
1498 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1509 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_jumbo_rx_irq()
1510 struct net_device *netdev = adapter->netdev; in e1000_clean_jumbo_rx_irq()
1511 struct pci_dev *pdev = adapter->pdev; in e1000_clean_jumbo_rx_irq()
1521 i = rx_ring->next_to_clean; in e1000_clean_jumbo_rx_irq()
1523 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1524 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1534 skb = buffer_info->skb; in e1000_clean_jumbo_rx_irq()
1535 buffer_info->skb = NULL; in e1000_clean_jumbo_rx_irq()
1538 if (i == rx_ring->count) in e1000_clean_jumbo_rx_irq()
1543 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1547 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, in e1000_clean_jumbo_rx_irq()
1549 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1551 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_jumbo_rx_irq()
1556 !(netdev->features & NETIF_F_RXALL)))) { in e1000_clean_jumbo_rx_irq()
1558 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1560 if (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1561 dev_kfree_skb_irq(rx_ring->rx_skb_top); in e1000_clean_jumbo_rx_irq()
1562 rx_ring->rx_skb_top = NULL; in e1000_clean_jumbo_rx_irq()
1565 #define rxtop (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1571 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1576 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1577 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1579 /* re-use the skb, only consumed the page */ in e1000_clean_jumbo_rx_irq()
1580 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1588 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1589 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1591 /* re-use the current skb, we only consumed the in e1000_clean_jumbo_rx_irq()
1594 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1605 page_address(buffer_info->page), in e1000_clean_jumbo_rx_irq()
1607 /* re-use the page, so don't erase in e1000_clean_jumbo_rx_irq()
1608 * buffer_info->page in e1000_clean_jumbo_rx_irq()
1613 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1624 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_jumbo_rx_irq()
1627 total_rx_bytes += skb->len; in e1000_clean_jumbo_rx_irq()
1630 /* eth type trans needs skb->data to point to something */ in e1000_clean_jumbo_rx_irq()
1638 rx_desc->wb.upper.vlan); in e1000_clean_jumbo_rx_irq()
1641 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1645 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_jumbo_rx_irq()
1654 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1656 rx_ring->next_to_clean = i; in e1000_clean_jumbo_rx_irq()
1660 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_jumbo_rx_irq()
1662 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_jumbo_rx_irq()
1663 adapter->total_rx_packets += total_rx_packets; in e1000_clean_jumbo_rx_irq()
1668 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1673 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_ring()
1676 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_ring()
1680 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1681 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_ring()
1682 if (buffer_info->dma) { in e1000_clean_rx_ring()
1683 if (adapter->clean_rx == e1000_clean_rx_irq) in e1000_clean_rx_ring()
1684 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1685 adapter->rx_buffer_len, in e1000_clean_rx_ring()
1687 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) in e1000_clean_rx_ring()
1688 dma_unmap_page(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1690 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) in e1000_clean_rx_ring()
1691 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1692 adapter->rx_ps_bsize0, in e1000_clean_rx_ring()
1694 buffer_info->dma = 0; in e1000_clean_rx_ring()
1697 if (buffer_info->page) { in e1000_clean_rx_ring()
1698 put_page(buffer_info->page); in e1000_clean_rx_ring()
1699 buffer_info->page = NULL; in e1000_clean_rx_ring()
1702 if (buffer_info->skb) { in e1000_clean_rx_ring()
1703 dev_kfree_skb(buffer_info->skb); in e1000_clean_rx_ring()
1704 buffer_info->skb = NULL; in e1000_clean_rx_ring()
1708 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_ring()
1709 if (!ps_page->page) in e1000_clean_rx_ring()
1711 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_ring()
1713 ps_page->dma = 0; in e1000_clean_rx_ring()
1714 put_page(ps_page->page); in e1000_clean_rx_ring()
1715 ps_page->page = NULL; in e1000_clean_rx_ring()
1720 if (rx_ring->rx_skb_top) { in e1000_clean_rx_ring()
1721 dev_kfree_skb(rx_ring->rx_skb_top); in e1000_clean_rx_ring()
1722 rx_ring->rx_skb_top = NULL; in e1000_clean_rx_ring()
1726 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1728 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1729 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1730 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_ring()
1739 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_downshift_workaround()
1742 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); in e1000e_downshift_workaround()
1746 * e1000_intr_msi - Interrupt Handler
1754 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi()
1759 hw->mac.get_link_status = true; in e1000_intr_msi()
1760 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr_msi()
1763 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr_msi()
1765 schedule_work(&adapter->downshift_task); in e1000_intr_msi()
1767 /* 80003ES2LAN workaround-- For packet buffer work-around on in e1000_intr_msi()
1772 adapter->flags & FLAG_RX_NEEDS_RESTART) { in e1000_intr_msi()
1777 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr_msi()
1780 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msi()
1781 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr_msi()
1785 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr_msi()
1788 adapter->corr_errors += in e1000_intr_msi()
1790 adapter->uncorr_errors += in e1000_intr_msi()
1794 schedule_work(&adapter->reset_task); in e1000_intr_msi()
1800 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msi()
1801 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1802 adapter->total_tx_packets = 0; in e1000_intr_msi()
1803 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1804 adapter->total_rx_packets = 0; in e1000_intr_msi()
1805 __napi_schedule(&adapter->napi); in e1000_intr_msi()
1812 * e1000_intr - Interrupt Handler
1820 struct e1000_hw *hw = &adapter->hw; in e1000_intr()
1823 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1826 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in e1000_intr()
1832 /* Interrupt Auto-Mask...upon reading ICR, in e1000_intr()
1838 hw->mac.get_link_status = true; in e1000_intr()
1839 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr()
1842 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr()
1844 schedule_work(&adapter->downshift_task); in e1000_intr()
1846 /* 80003ES2LAN workaround-- in e1000_intr()
1847 * For packet buffer work-around on link down event; in e1000_intr()
1852 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { in e1000_intr()
1856 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr()
1859 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1860 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr()
1864 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr()
1867 adapter->corr_errors += in e1000_intr()
1869 adapter->uncorr_errors += in e1000_intr()
1873 schedule_work(&adapter->reset_task); in e1000_intr()
1879 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr()
1880 adapter->total_tx_bytes = 0; in e1000_intr()
1881 adapter->total_tx_packets = 0; in e1000_intr()
1882 adapter->total_rx_bytes = 0; in e1000_intr()
1883 adapter->total_rx_packets = 0; in e1000_intr()
1884 __napi_schedule(&adapter->napi); in e1000_intr()
1894 struct e1000_hw *hw = &adapter->hw; in e1000_msix_other()
1897 if (icr & adapter->eiac_mask) in e1000_msix_other()
1898 ew32(ICS, (icr & adapter->eiac_mask)); in e1000_msix_other()
1901 hw->mac.get_link_status = true; in e1000_msix_other()
1903 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1904 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_msix_other()
1907 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1917 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msix_tx()
1918 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_intr_msix_tx()
1920 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1921 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1925 ew32(ICS, tx_ring->ims_val); in e1000_intr_msix_tx()
1927 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msix_tx()
1928 ew32(IMS, adapter->tx_ring->ims_val); in e1000_intr_msix_tx()
1937 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_intr_msix_rx()
1942 if (rx_ring->set_itr) { in e1000_intr_msix_rx()
1943 u32 itr = rx_ring->itr_val ? in e1000_intr_msix_rx()
1944 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1946 writel(itr, rx_ring->itr_register); in e1000_intr_msix_rx()
1947 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1950 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msix_rx()
1951 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1952 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1953 __napi_schedule(&adapter->napi); in e1000_intr_msix_rx()
1959 * e1000_configure_msix - Configure MSI-X hardware
1963 * generate MSI-X interrupts.
1967 struct e1000_hw *hw = &adapter->hw; in e1000_configure_msix()
1968 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_msix()
1969 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_msix()
1973 adapter->eiac_mask = 0; in e1000_configure_msix()
1975 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ in e1000_configure_msix()
1976 if (hw->mac.type == e1000_82574) { in e1000_configure_msix()
1984 rx_ring->ims_val = E1000_IMS_RXQ0; in e1000_configure_msix()
1985 adapter->eiac_mask |= rx_ring->ims_val; in e1000_configure_msix()
1986 if (rx_ring->itr_val) in e1000_configure_msix()
1987 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
1988 rx_ring->itr_register); in e1000_configure_msix()
1990 writel(1, rx_ring->itr_register); in e1000_configure_msix()
1994 tx_ring->ims_val = E1000_IMS_TXQ0; in e1000_configure_msix()
1996 if (tx_ring->itr_val) in e1000_configure_msix()
1997 writel(1000000000 / (tx_ring->itr_val * 256), in e1000_configure_msix()
1998 tx_ring->itr_register); in e1000_configure_msix()
2000 writel(1, tx_ring->itr_register); in e1000_configure_msix()
2001 adapter->eiac_mask |= tx_ring->ims_val; in e1000_configure_msix()
2007 if (rx_ring->itr_val) in e1000_configure_msix()
2008 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2009 hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2011 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2018 /* enable MSI-X PBA support */ in e1000_configure_msix()
2027 if (adapter->msix_entries) { in e1000e_reset_interrupt_capability()
2028 pci_disable_msix(adapter->pdev); in e1000e_reset_interrupt_capability()
2029 kfree(adapter->msix_entries); in e1000e_reset_interrupt_capability()
2030 adapter->msix_entries = NULL; in e1000e_reset_interrupt_capability()
2031 } else if (adapter->flags & FLAG_MSI_ENABLED) { in e1000e_reset_interrupt_capability()
2032 pci_disable_msi(adapter->pdev); in e1000e_reset_interrupt_capability()
2033 adapter->flags &= ~FLAG_MSI_ENABLED; in e1000e_reset_interrupt_capability()
2038 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2049 switch (adapter->int_mode) { in e1000e_set_interrupt_capability()
2051 if (adapter->flags & FLAG_HAS_MSIX) { in e1000e_set_interrupt_capability()
2052 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ in e1000e_set_interrupt_capability()
2053 adapter->msix_entries = kcalloc(adapter->num_vectors, in e1000e_set_interrupt_capability()
2057 if (adapter->msix_entries) { in e1000e_set_interrupt_capability()
2060 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2061 adapter->msix_entries[i].entry = i; in e1000e_set_interrupt_capability()
2063 err = pci_enable_msix_range(a->pdev, in e1000e_set_interrupt_capability()
2064 a->msix_entries, in e1000e_set_interrupt_capability()
2065 a->num_vectors, in e1000e_set_interrupt_capability()
2066 a->num_vectors); in e1000e_set_interrupt_capability()
2070 /* MSI-X failed, so fall through and try MSI */ in e1000e_set_interrupt_capability()
2071 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); in e1000e_set_interrupt_capability()
2074 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000e_set_interrupt_capability()
2077 if (!pci_enable_msi(adapter->pdev)) { in e1000e_set_interrupt_capability()
2078 adapter->flags |= FLAG_MSI_ENABLED; in e1000e_set_interrupt_capability()
2080 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000e_set_interrupt_capability()
2081 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); in e1000e_set_interrupt_capability()
2090 adapter->num_vectors = 1; in e1000e_set_interrupt_capability()
2094 * e1000_request_msix - Initialize MSI-X interrupts
2097 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2102 struct net_device *netdev = adapter->netdev; in e1000_request_msix()
2105 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2106 snprintf(adapter->rx_ring->name, in e1000_request_msix()
2107 sizeof(adapter->rx_ring->name) - 1, in e1000_request_msix()
2108 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2110 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2111 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2112 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2116 adapter->rx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2118 adapter->rx_ring->itr_val = adapter->itr; in e1000_request_msix()
2121 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2122 snprintf(adapter->tx_ring->name, in e1000_request_msix()
2123 sizeof(adapter->tx_ring->name) - 1, in e1000_request_msix()
2124 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2126 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2127 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2128 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2132 adapter->tx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2134 adapter->tx_ring->itr_val = adapter->itr; in e1000_request_msix()
2137 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2138 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2148 * e1000_request_irq - initialize interrupts
2156 struct net_device *netdev = adapter->netdev; in e1000_request_irq()
2159 if (adapter->msix_entries) { in e1000_request_irq()
2163 /* fall back to MSI */ in e1000_request_irq()
2165 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000_request_irq()
2168 if (adapter->flags & FLAG_MSI_ENABLED) { in e1000_request_irq()
2169 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2170 netdev->name, netdev); in e1000_request_irq()
2176 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_request_irq()
2179 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, in e1000_request_irq()
2180 netdev->name, netdev); in e1000_request_irq()
2189 struct net_device *netdev = adapter->netdev; in e1000_free_irq()
2191 if (adapter->msix_entries) { in e1000_free_irq()
2194 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2197 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2201 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2205 free_irq(adapter->pdev->irq, netdev); in e1000_free_irq()
2209 * e1000_irq_disable - Mask off interrupt generation on the NIC
2214 struct e1000_hw *hw = &adapter->hw; in e1000_irq_disable()
2217 if (adapter->msix_entries) in e1000_irq_disable()
2221 if (adapter->msix_entries) { in e1000_irq_disable()
2224 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2225 synchronize_irq(adapter->msix_entries[i].vector); in e1000_irq_disable()
2227 synchronize_irq(adapter->pdev->irq); in e1000_irq_disable()
2232 * e1000_irq_enable - Enable default interrupt generation settings
2237 struct e1000_hw *hw = &adapter->hw; in e1000_irq_enable()
2239 if (adapter->msix_entries) { in e1000_irq_enable()
2240 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); in e1000_irq_enable()
2241 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | in e1000_irq_enable()
2243 } else if (hw->mac.type >= e1000_pch_lpt) { in e1000_irq_enable()
2252 * e1000e_get_hw_control - get control of the h/w from f/w
2262 struct e1000_hw *hw = &adapter->hw; in e1000e_get_hw_control()
2267 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_get_hw_control()
2270 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_get_hw_control()
2277 * e1000e_release_hw_control - release control of the h/w to f/w
2288 struct e1000_hw *hw = &adapter->hw; in e1000e_release_hw_control()
2293 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_release_hw_control()
2296 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_release_hw_control()
2303 * e1000_alloc_ring_dma - allocate memory for a ring structure
2310 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_ring_dma()
2312 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, in e1000_alloc_ring_dma()
2314 if (!ring->desc) in e1000_alloc_ring_dma()
2315 return -ENOMEM; in e1000_alloc_ring_dma()
2321 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2328 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_setup_tx_resources()
2329 int err = -ENOMEM, size; in e1000e_setup_tx_resources()
2331 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000e_setup_tx_resources()
2332 tx_ring->buffer_info = vzalloc(size); in e1000e_setup_tx_resources()
2333 if (!tx_ring->buffer_info) in e1000e_setup_tx_resources()
2337 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000e_setup_tx_resources()
2338 tx_ring->size = ALIGN(tx_ring->size, 4096); in e1000e_setup_tx_resources()
2344 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2345 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2349 vfree(tx_ring->buffer_info); in e1000e_setup_tx_resources()
2355 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2362 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_setup_rx_resources()
2364 int i, size, desc_len, err = -ENOMEM; in e1000e_setup_rx_resources()
2366 size = sizeof(struct e1000_buffer) * rx_ring->count; in e1000e_setup_rx_resources()
2367 rx_ring->buffer_info = vzalloc(size); in e1000e_setup_rx_resources()
2368 if (!rx_ring->buffer_info) in e1000e_setup_rx_resources()
2371 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2372 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2373 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, in e1000e_setup_rx_resources()
2376 if (!buffer_info->ps_pages) in e1000e_setup_rx_resources()
2383 rx_ring->size = rx_ring->count * desc_len; in e1000e_setup_rx_resources()
2384 rx_ring->size = ALIGN(rx_ring->size, 4096); in e1000e_setup_rx_resources()
2390 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2391 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2392 rx_ring->rx_skb_top = NULL; in e1000e_setup_rx_resources()
2397 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2398 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2399 kfree(buffer_info->ps_pages); in e1000e_setup_rx_resources()
2402 vfree(rx_ring->buffer_info); in e1000e_setup_rx_resources()
2408 * e1000_clean_tx_ring - Free Tx Buffers
2413 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_ring()
2418 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2419 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2423 netdev_reset_queue(adapter->netdev); in e1000_clean_tx_ring()
2424 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2425 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2427 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2429 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2430 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2434 * e1000e_free_tx_resources - Free Tx Resources per Queue
2441 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_free_tx_resources()
2442 struct pci_dev *pdev = adapter->pdev; in e1000e_free_tx_resources()
2446 vfree(tx_ring->buffer_info); in e1000e_free_tx_resources()
2447 tx_ring->buffer_info = NULL; in e1000e_free_tx_resources()
2449 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000e_free_tx_resources()
2450 tx_ring->dma); in e1000e_free_tx_resources()
2451 tx_ring->desc = NULL; in e1000e_free_tx_resources()
2455 * e1000e_free_rx_resources - Free Rx Resources
2462 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_free_rx_resources()
2463 struct pci_dev *pdev = adapter->pdev; in e1000e_free_rx_resources()
2468 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2469 kfree(rx_ring->buffer_info[i].ps_pages); in e1000e_free_rx_resources()
2471 vfree(rx_ring->buffer_info); in e1000e_free_rx_resources()
2472 rx_ring->buffer_info = NULL; in e1000e_free_rx_resources()
2474 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000e_free_rx_resources()
2475 rx_ring->dma); in e1000e_free_rx_resources()
2476 rx_ring->desc = NULL; in e1000e_free_rx_resources()
2480 * e1000_update_itr - update the dynamic ITR value based on statistics
2481 * @itr_setting: current adapter->itr
2540 u32 new_itr = adapter->itr; in e1000_set_itr()
2542 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in e1000_set_itr()
2543 if (adapter->link_speed != SPEED_1000) { in e1000_set_itr()
2548 if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000_set_itr()
2553 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, in e1000_set_itr()
2554 adapter->total_tx_packets, in e1000_set_itr()
2555 adapter->total_tx_bytes); in e1000_set_itr()
2557 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) in e1000_set_itr()
2558 adapter->tx_itr = low_latency; in e1000_set_itr()
2560 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, in e1000_set_itr()
2561 adapter->total_rx_packets, in e1000_set_itr()
2562 adapter->total_rx_bytes); in e1000_set_itr()
2564 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) in e1000_set_itr()
2565 adapter->rx_itr = low_latency; in e1000_set_itr()
2567 current_itr = max(adapter->rx_itr, adapter->tx_itr); in e1000_set_itr()
2585 if (new_itr != adapter->itr) { in e1000_set_itr()
2590 new_itr = new_itr > adapter->itr ? in e1000_set_itr()
2591 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; in e1000_set_itr()
2592 adapter->itr = new_itr; in e1000_set_itr()
2593 adapter->rx_ring->itr_val = new_itr; in e1000_set_itr()
2594 if (adapter->msix_entries) in e1000_set_itr()
2595 adapter->rx_ring->set_itr = 1; in e1000_set_itr()
2602 * e1000e_write_itr - write the ITR value to the appropriate registers
2606 * e1000e_write_itr determines if the adapter is in MSI-X mode
2612 struct e1000_hw *hw = &adapter->hw; in e1000e_write_itr()
2615 if (adapter->msix_entries) { in e1000e_write_itr()
2618 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2619 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); in e1000e_write_itr()
2626 * e1000_alloc_queues - Allocate memory for all rings
2633 adapter->tx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2634 if (!adapter->tx_ring) in e1000_alloc_queues()
2636 adapter->tx_ring->count = adapter->tx_ring_count; in e1000_alloc_queues()
2637 adapter->tx_ring->adapter = adapter; in e1000_alloc_queues()
2639 adapter->rx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2640 if (!adapter->rx_ring) in e1000_alloc_queues()
2642 adapter->rx_ring->count = adapter->rx_ring_count; in e1000_alloc_queues()
2643 adapter->rx_ring->adapter = adapter; in e1000_alloc_queues()
2648 kfree(adapter->rx_ring); in e1000_alloc_queues()
2649 kfree(adapter->tx_ring); in e1000_alloc_queues()
2650 return -ENOMEM; in e1000_alloc_queues()
2654 * e1000e_poll - NAPI Rx polling callback
2662 struct e1000_hw *hw = &adapter->hw; in e1000e_poll()
2663 struct net_device *poll_dev = adapter->netdev; in e1000e_poll()
2664 int tx_cleaned = 1, work_done = 0; in e1000e_poll()
2668 if (!adapter->msix_entries || in e1000e_poll()
2669 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) in e1000e_poll()
2670 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); in e1000e_poll()
2672 adapter->clean_rx(adapter->rx_ring, &work_done, budget); in e1000e_poll()
2677 /* Exit the polling mode, but don't re-enable interrupts if stack might in e1000e_poll()
2678 * poll us due to busy-polling in e1000e_poll()
2681 if (adapter->itr_setting & 3) in e1000e_poll()
2683 if (!test_bit(__E1000_DOWN, &adapter->state)) { in e1000e_poll()
2684 if (adapter->msix_entries) in e1000e_poll()
2685 ew32(IMS, adapter->rx_ring->ims_val); in e1000e_poll()
2698 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_add_vid()
2702 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_add_vid()
2704 (vid == adapter->mng_vlan_id)) in e1000_vlan_rx_add_vid()
2708 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_add_vid()
2712 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_add_vid()
2715 set_bit(vid, adapter->active_vlans); in e1000_vlan_rx_add_vid()
2724 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_kill_vid()
2727 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_kill_vid()
2729 (vid == adapter->mng_vlan_id)) { in e1000_vlan_rx_kill_vid()
2736 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_kill_vid()
2740 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_kill_vid()
2743 clear_bit(vid, adapter->active_vlans); in e1000_vlan_rx_kill_vid()
2749 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2754 struct net_device *netdev = adapter->netdev; in e1000e_vlan_filter_disable()
2755 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_disable()
2758 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_disable()
2764 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { in e1000e_vlan_filter_disable()
2766 adapter->mng_vlan_id); in e1000e_vlan_filter_disable()
2767 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_vlan_filter_disable()
2773 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2778 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_enable()
2781 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_enable()
2791 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2796 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_disable()
2806 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2811 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_enable()
2822 struct net_device *netdev = adapter->netdev; in e1000_update_mng_vlan()
2823 u16 vid = adapter->hw.mng_cookie.vlan_id; in e1000_update_mng_vlan()
2824 u16 old_vid = adapter->mng_vlan_id; in e1000_update_mng_vlan()
2826 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in e1000_update_mng_vlan()
2828 adapter->mng_vlan_id = vid; in e1000_update_mng_vlan()
2839 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2841 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in e1000_restore_vlan()
2842 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in e1000_restore_vlan()
2847 struct e1000_hw *hw = &adapter->hw; in e1000_init_manageability_pt()
2850 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) in e1000_init_manageability_pt()
2862 switch (hw->mac.type) { in e1000_init_manageability_pt()
2868 /* Check if IPMI pass-through decision filter already exists; in e1000_init_manageability_pt()
2893 manc2h |= BIT(1); in e1000_init_manageability_pt()
2899 e_warn("Unable to create IPMI pass-through filter\n"); in e1000_init_manageability_pt()
2908 * e1000_configure_tx - Configure Transmit Unit after Reset
2915 struct e1000_hw *hw = &adapter->hw; in e1000_configure_tx()
2916 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_tx()
2921 tdba = tx_ring->dma; in e1000_configure_tx()
2922 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000_configure_tx()
2928 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2929 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2931 writel(0, tx_ring->head); in e1000_configure_tx()
2932 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_tx()
2935 writel(0, tx_ring->tail); in e1000_configure_tx()
2938 ew32(TIDV, adapter->tx_int_delay); in e1000_configure_tx()
2940 ew32(TADV, adapter->tx_abs_int_delay); in e1000_configure_tx()
2942 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_tx()
2950 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls in e1000_configure_tx()
2951 * hthresh = 1 ==> prefetch when one or more available in e1000_configure_tx()
2960 ew32(TXDCTL(1), er32(TXDCTL(0))); in e1000_configure_tx()
2968 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { in e1000_configure_tx()
2979 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { in e1000_configure_tx()
2981 tarc |= 1; in e1000_configure_tx()
2983 tarc = er32(TARC(1)); in e1000_configure_tx()
2984 tarc |= 1; in e1000_configure_tx()
2985 ew32(TARC(1), tarc); in e1000_configure_tx()
2989 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; in e1000_configure_tx()
2992 if (adapter->tx_int_delay) in e1000_configure_tx()
2993 adapter->txd_cmd |= E1000_TXD_CMD_IDE; in e1000_configure_tx()
2996 adapter->txd_cmd |= E1000_TXD_CMD_RS; in e1000_configure_tx()
3000 hw->mac.ops.config_collision_dist(hw); in e1000_configure_tx()
3003 if (hw->mac.type == e1000_pch_spt) { in e1000_configure_tx()
3022 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3025 * e1000_setup_rctl - configure the receive control registers
3030 struct e1000_hw *hw = &adapter->hw; in e1000_setup_rctl()
3034 /* Workaround Si errata on PCHx - configure jumbo frame flow. in e1000_setup_rctl()
3038 if (hw->mac.type >= e1000_pch2lan) { in e1000_setup_rctl()
3041 if (adapter->netdev->mtu > ETH_DATA_LEN) in e1000_setup_rctl()
3055 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in e1000_setup_rctl()
3061 if (adapter->netdev->mtu <= ETH_DATA_LEN) in e1000_setup_rctl()
3070 if (adapter->flags2 & FLAG2_CRC_STRIPPING) in e1000_setup_rctl()
3073 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ in e1000_setup_rctl()
3074 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { in e1000_setup_rctl()
3093 switch (adapter->rx_buffer_len) { in e1000_setup_rctl()
3115 /* 82571 and greater support packet-split where the protocol in e1000_setup_rctl()
3116 * header is placed in skb->data and the packet data is in e1000_setup_rctl()
3117 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. in e1000_setup_rctl()
3118 * In the case of a non-split, skb->data is linearly filled, in e1000_setup_rctl()
3119 * followed by the page buffers. Therefore, skb->data is in e1000_setup_rctl()
3129 pages = PAGE_USE_COUNT(adapter->netdev->mtu); in e1000_setup_rctl()
3131 adapter->rx_ps_pages = pages; in e1000_setup_rctl()
3133 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3135 if (adapter->rx_ps_pages) { in e1000_setup_rctl()
3141 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; in e1000_setup_rctl()
3143 switch (adapter->rx_ps_pages) { in e1000_setup_rctl()
3150 case 1: in e1000_setup_rctl()
3159 if (adapter->netdev->features & NETIF_F_RXALL) { in e1000_setup_rctl()
3177 adapter->flags &= ~FLAG_RESTART_NOW; in e1000_setup_rctl()
3181 * e1000_configure_rx - Configure Receive Unit after Reset
3188 struct e1000_hw *hw = &adapter->hw; in e1000_configure_rx()
3189 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_rx()
3193 if (adapter->rx_ps_pages) { in e1000_configure_rx()
3195 rdlen = rx_ring->count * in e1000_configure_rx()
3197 adapter->clean_rx = e1000_clean_rx_irq_ps; in e1000_configure_rx()
3198 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; in e1000_configure_rx()
3199 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { in e1000_configure_rx()
3200 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3201 adapter->clean_rx = e1000_clean_jumbo_rx_irq; in e1000_configure_rx()
3202 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; in e1000_configure_rx()
3204 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3205 adapter->clean_rx = e1000_clean_rx_irq; in e1000_configure_rx()
3206 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; in e1000_configure_rx()
3211 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000_configure_rx()
3216 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_rx()
3218 * is set). set GRAN=1 and write back up to 0x4 worth, and in e1000_configure_rx()
3226 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); in e1000_configure_rx()
3230 ew32(RDTR, adapter->rx_int_delay); in e1000_configure_rx()
3233 ew32(RADV, adapter->rx_abs_int_delay); in e1000_configure_rx()
3234 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3235 e1000e_write_itr(adapter, adapter->itr); in e1000_configure_rx()
3238 /* Auto-Mask interrupts upon ICR access */ in e1000_configure_rx()
3247 rdba = rx_ring->dma; in e1000_configure_rx()
3253 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3254 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3256 writel(0, rx_ring->head); in e1000_configure_rx()
3257 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_rx()
3260 writel(0, rx_ring->tail); in e1000_configure_rx()
3264 if (adapter->netdev->features & NETIF_F_RXCSUM) in e1000_configure_rx()
3270 /* With jumbo frames, excessive C-state transition latencies result in e1000_configure_rx()
3273 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000_configure_rx()
3275 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - in e1000_configure_rx()
3276 adapter->max_frame_size) * 8 / 1000; in e1000_configure_rx()
3278 if (adapter->flags & FLAG_IS_ICH) { in e1000_configure_rx()
3284 dev_info(&adapter->pdev->dev, in e1000_configure_rx()
3285 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); in e1000_configure_rx()
3286 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); in e1000_configure_rx()
3288 cpu_latency_qos_update_request(&adapter->pm_qos_req, in e1000_configure_rx()
3297 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3301 * Returns: -ENOMEM on failure
3308 struct e1000_hw *hw = &adapter->hw; in e1000e_write_mc_addr_list()
3315 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3321 return -ENOMEM; in e1000e_write_mc_addr_list()
3326 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in e1000e_write_mc_addr_list()
3328 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); in e1000e_write_mc_addr_list()
3335 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3339 * Returns: -ENOMEM on failure/insufficient address space
3346 struct e1000_hw *hw = &adapter->hw; in e1000e_write_uc_addr_list()
3350 rar_entries = hw->mac.ops.rar_get_count(hw); in e1000e_write_uc_addr_list()
3353 rar_entries--; in e1000e_write_uc_addr_list()
3356 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) in e1000e_write_uc_addr_list()
3357 rar_entries--; in e1000e_write_uc_addr_list()
3361 return -ENOMEM; in e1000e_write_uc_addr_list()
3374 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); in e1000e_write_uc_addr_list()
3376 return -ENOMEM; in e1000e_write_uc_addr_list()
3382 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3392 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3398 * promiscuous mode, and all-multi behavior.
3403 struct e1000_hw *hw = &adapter->hw; in e1000e_set_rx_mode()
3406 if (pm_runtime_suspended(netdev->dev.parent)) in e1000e_set_rx_mode()
3415 if (netdev->flags & IFF_PROMISC) { in e1000e_set_rx_mode()
3422 if (netdev->flags & IFF_ALLMULTI) { in e1000e_set_rx_mode()
3445 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in e1000e_set_rx_mode()
3453 struct e1000_hw *hw = &adapter->hw; in e1000e_setup_rss_hash()
3484 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3493 struct e1000_hw *hw = &adapter->hw; in e1000e_get_base_timinca()
3499 if ((hw->mac.type >= e1000_pch_lpt) && in e1000e_get_base_timinca()
3510 switch (hw->mac.type) { in e1000e_get_base_timinca()
3516 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3524 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3530 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3538 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3552 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3558 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3567 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3570 return -EINVAL; in e1000e_get_base_timinca()
3580 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3598 struct e1000_hw *hw = &adapter->hw; in e1000e_config_hwtstamp()
3607 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_config_hwtstamp()
3608 return -EINVAL; in e1000e_config_hwtstamp()
3610 switch (config->tx_type) { in e1000e_config_hwtstamp()
3617 return -ERANGE; in e1000e_config_hwtstamp()
3620 switch (config->rx_filter) { in e1000e_config_hwtstamp()
3672 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in e1000e_config_hwtstamp()
3678 * Delay Request messages but not both so fall-through to in e1000e_config_hwtstamp()
3687 config->rx_filter = HWTSTAMP_FILTER_ALL; in e1000e_config_hwtstamp()
3690 return -ERANGE; in e1000e_config_hwtstamp()
3693 adapter->hwtstamp_config = *config; in e1000e_config_hwtstamp()
3703 return -EAGAIN; in e1000e_config_hwtstamp()
3716 return -EAGAIN; in e1000e_config_hwtstamp()
3743 * e1000_configure - configure the hardware for Rx and Tx
3748 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure()
3750 e1000e_set_rx_mode(adapter->netdev); in e1000_configure()
3757 if (adapter->netdev->features & NETIF_F_RXHASH) in e1000_configure()
3761 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); in e1000_configure()
3765 * e1000e_power_up_phy - restore link in case the phy was powered down
3774 if (adapter->hw.phy.ops.power_up) in e1000e_power_up_phy()
3775 adapter->hw.phy.ops.power_up(&adapter->hw); in e1000e_power_up_phy()
3777 adapter->hw.mac.ops.setup_link(&adapter->hw); in e1000e_power_up_phy()
3781 * e1000_power_down_phy - Power down the PHY
3789 if (adapter->hw.phy.ops.power_down) in e1000_power_down_phy()
3790 adapter->hw.phy.ops.power_down(&adapter->hw); in e1000_power_down_phy()
3794 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3804 struct e1000_hw *hw = &adapter->hw; in e1000_flush_tx_ring()
3805 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_flush_tx_ring()
3813 BUG_ON(tdt != tx_ring->next_to_use); in e1000_flush_tx_ring()
3814 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); in e1000_flush_tx_ring()
3815 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); in e1000_flush_tx_ring()
3817 tx_desc->lower.data = cpu_to_le32(txd_lower | size); in e1000_flush_tx_ring()
3818 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3821 tx_ring->next_to_use++; in e1000_flush_tx_ring()
3822 if (tx_ring->next_to_use == tx_ring->count) in e1000_flush_tx_ring()
3823 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3824 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3829 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3837 struct e1000_hw *hw = &adapter->hw; in e1000_flush_rx_ring()
3848 /* update thresholds: prefetch threshold to 31, host threshold to 1 in e1000_flush_rx_ring()
3862 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3877 struct e1000_hw *hw = &adapter->hw; in e1000_flush_desc_rings()
3885 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3891 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3898 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3908 struct ptp_clock_info *info = &adapter->ptp_clock_info; in e1000e_systim_reset()
3909 struct e1000_hw *hw = &adapter->hw; in e1000e_systim_reset()
3914 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_systim_reset()
3917 if (info->adjfine) { in e1000e_systim_reset()
3919 ret_val = info->adjfine(info, adapter->ptp_delta); in e1000e_systim_reset()
3928 dev_warn(&adapter->pdev->dev, in e1000e_systim_reset()
3935 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_reset()
3936 timecounter_init(&adapter->tc, &adapter->cc, in e1000e_systim_reset()
3938 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_reset()
3941 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); in e1000e_systim_reset()
3945 * e1000e_reset - bring the hardware into a known good state
3949 * require a configuration cycle of the hardware - those cannot be
3955 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000e_reset()
3956 struct e1000_fc_info *fc = &adapter->hw.fc; in e1000e_reset()
3957 struct e1000_hw *hw = &adapter->hw; in e1000e_reset()
3959 u32 pba = adapter->pba; in e1000e_reset()
3965 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { in e1000e_reset()
3968 * rounded up to the next 1KB and expressed in KB. Likewise, in e1000e_reset()
3981 min_tx_space = (adapter->max_frame_size + in e1000e_reset()
3982 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; in e1000e_reset()
3986 min_rx_space = adapter->max_frame_size; in e1000e_reset()
3995 ((min_tx_space - tx_space) < pba)) { in e1000e_reset()
3996 pba -= min_tx_space - tx_space; in e1000e_reset()
4013 * - 90% of the Rx FIFO size, and in e1000e_reset()
4014 * - the full Rx FIFO size minus one full frame in e1000e_reset()
4016 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) in e1000e_reset()
4017 fc->pause_time = 0xFFFF; in e1000e_reset()
4019 fc->pause_time = E1000_FC_PAUSE_TIME; in e1000e_reset()
4020 fc->send_xon = true; in e1000e_reset()
4021 fc->current_mode = fc->requested_mode; in e1000e_reset()
4023 switch (hw->mac.type) { in e1000e_reset()
4026 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4029 fc->high_water = 0x2800; in e1000e_reset()
4030 fc->low_water = fc->high_water - 8; in e1000e_reset()
4036 ((pba << 10) - adapter->max_frame_size)); in e1000e_reset()
4038 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ in e1000e_reset()
4039 fc->low_water = fc->high_water - 8; in e1000e_reset()
4042 /* Workaround PCH LOM adapter hangs with certain network in e1000e_reset()
4045 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4046 fc->high_water = 0x3500; in e1000e_reset()
4047 fc->low_water = 0x1500; in e1000e_reset()
4049 fc->high_water = 0x5000; in e1000e_reset()
4050 fc->low_water = 0x3000; in e1000e_reset()
4052 fc->refresh_time = 0x1000; in e1000e_reset()
4064 fc->refresh_time = 0xFFFF; in e1000e_reset()
4065 fc->pause_time = 0xFFFF; in e1000e_reset()
4067 if (adapter->netdev->mtu <= ETH_DATA_LEN) { in e1000e_reset()
4068 fc->high_water = 0x05C20; in e1000e_reset()
4069 fc->low_water = 0x05048; in e1000e_reset()
4075 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; in e1000e_reset()
4076 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; in e1000e_reset()
4085 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, in e1000e_reset()
4091 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4092 if ((adapter->max_frame_size * 2) > (pba << 10)) { in e1000e_reset()
4093 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { in e1000e_reset()
4094 dev_info(&adapter->pdev->dev, in e1000e_reset()
4096 adapter->flags2 |= FLAG2_DISABLE_AIM; in e1000e_reset()
4099 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000e_reset()
4100 dev_info(&adapter->pdev->dev, in e1000e_reset()
4102 adapter->flags2 &= ~FLAG2_DISABLE_AIM; in e1000e_reset()
4103 adapter->itr = 20000; in e1000e_reset()
4104 e1000e_write_itr(adapter, adapter->itr); in e1000e_reset()
4108 if (hw->mac.type >= e1000_pch_spt) in e1000e_reset()
4111 mac->ops.reset_hw(hw); in e1000e_reset()
4116 if (adapter->flags & FLAG_HAS_AMT) in e1000e_reset()
4121 if (mac->ops.init_hw(hw)) in e1000e_reset()
4135 if (adapter->flags2 & FLAG2_HAS_EEE) { in e1000e_reset()
4139 switch (hw->phy.type) { in e1000e_reset()
4147 dev_err(&adapter->pdev->dev, in e1000e_reset()
4152 ret_val = hw->phy.ops.acquire(hw); in e1000e_reset()
4154 dev_err(&adapter->pdev->dev, in e1000e_reset()
4155 "EEE advertisement - unable to acquire PHY\n"); in e1000e_reset()
4160 hw->dev_spec.ich8lan.eee_disable ? in e1000e_reset()
4161 0 : adapter->eee_advert); in e1000e_reset()
4163 hw->phy.ops.release(hw); in e1000e_reset()
4166 if (!netif_running(adapter->netdev) && in e1000e_reset()
4167 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_reset()
4172 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && in e1000e_reset()
4173 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { in e1000e_reset()
4183 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4186 /* Fextnvm7 @ 0xe4[2] = 1 */ in e1000e_reset()
4200 * e1000e_trigger_lsc - trigger an LSC interrupt
4207 struct e1000_hw *hw = &adapter->hw; in e1000e_trigger_lsc()
4209 if (adapter->msix_entries) in e1000e_trigger_lsc()
4220 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_up()
4222 if (adapter->msix_entries) in e1000e_up()
4233 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_descriptors()
4235 if (!(adapter->flags2 & FLAG2_DMA_BURST)) in e1000e_flush_descriptors()
4239 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4240 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4248 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4249 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4258 * e1000e_down - quiesce the device and optionally reset the hardware
4264 struct net_device *netdev = adapter->netdev; in e1000e_down()
4265 struct e1000_hw *hw = &adapter->hw; in e1000e_down()
4271 set_bit(__E1000_DOWN, &adapter->state); in e1000e_down()
4277 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000e_down()
4294 napi_synchronize(&adapter->napi); in e1000e_down()
4296 del_timer_sync(&adapter->watchdog_timer); in e1000e_down()
4297 del_timer_sync(&adapter->phy_info_timer); in e1000e_down()
4299 spin_lock(&adapter->stats64_lock); in e1000e_down()
4301 spin_unlock(&adapter->stats64_lock); in e1000e_down()
4305 adapter->link_speed = 0; in e1000e_down()
4306 adapter->link_duplex = 0; in e1000e_down()
4309 if ((hw->mac.type >= e1000_pch2lan) && in e1000e_down()
4310 (adapter->netdev->mtu > ETH_DATA_LEN) && in e1000e_down()
4314 if (!pci_channel_offline(adapter->pdev)) { in e1000e_down()
4317 else if (hw->mac.type >= e1000_pch_spt) in e1000e_down()
4320 e1000_clean_tx_ring(adapter->tx_ring); in e1000e_down()
4321 e1000_clean_rx_ring(adapter->rx_ring); in e1000e_down()
4327 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000e_reinit_locked()
4331 clear_bit(__E1000_RESETTING, &adapter->state); in e1000e_reinit_locked()
4335 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4361 time_delta = systim_next - systim; in e1000e_sanitize_systim()
4376 * e1000e_read_systim - read SYSTIM register
4384 struct e1000_hw *hw = &adapter->hw; in e1000e_read_systim()
4391 * to fix that we test for overflow and if true, we re-read systime. in e1000e_read_systim()
4398 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4413 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) in e1000e_read_systim()
4420 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4432 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4441 struct net_device *netdev = adapter->netdev; in e1000_sw_init()
4443 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_sw_init()
4444 adapter->rx_ps_bsize0 = 128; in e1000_sw_init()
4445 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; in e1000_sw_init()
4446 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in e1000_sw_init()
4447 adapter->tx_ring_count = E1000_DEFAULT_TXD; in e1000_sw_init()
4448 adapter->rx_ring_count = E1000_DEFAULT_RXD; in e1000_sw_init()
4450 spin_lock_init(&adapter->stats64_lock); in e1000_sw_init()
4455 return -ENOMEM; in e1000_sw_init()
4458 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_sw_init()
4459 adapter->cc.read = e1000e_cyclecounter_read; in e1000_sw_init()
4460 adapter->cc.mask = CYCLECOUNTER_MASK(64); in e1000_sw_init()
4461 adapter->cc.mult = 1; in e1000_sw_init()
4464 spin_lock_init(&adapter->systim_lock); in e1000_sw_init()
4465 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); in e1000_sw_init()
4471 set_bit(__E1000_DOWN, &adapter->state); in e1000_sw_init()
4476 * e1000_intr_msi_test - Interrupt Handler
4484 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi_test()
4489 adapter->flags &= ~FLAG_MSI_TEST_FAILED; in e1000_intr_msi_test()
4500 * e1000_test_msi_interrupt - Returns 0 for successful test
4507 struct net_device *netdev = adapter->netdev; in e1000_test_msi_interrupt()
4508 struct e1000_hw *hw = &adapter->hw; in e1000_test_msi_interrupt()
4520 * MSI irq handler will unset this flag in e1000_test_msi_interrupt()
4522 adapter->flags |= FLAG_MSI_TEST_FAILED; in e1000_test_msi_interrupt()
4524 err = pci_enable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4528 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4529 netdev->name, netdev); in e1000_test_msi_interrupt()
4531 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4551 if (adapter->flags & FLAG_MSI_TEST_FAILED) { in e1000_test_msi_interrupt()
4552 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_test_msi_interrupt()
4553 e_info("MSI interrupt test failed, using legacy interrupt.\n"); in e1000_test_msi_interrupt()
4555 e_dbg("MSI interrupt test succeeded!\n"); in e1000_test_msi_interrupt()
4558 free_irq(adapter->pdev->irq, netdev); in e1000_test_msi_interrupt()
4559 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4567 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4577 if (!(adapter->flags & FLAG_MSI_ENABLED)) in e1000_test_msi()
4580 /* disable SERR in case the MSI write causes a master abort */ in e1000_test_msi()
4581 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4583 pci_write_config_word(adapter->pdev, PCI_COMMAND, in e1000_test_msi()
4588 /* re-enable SERR */ in e1000_test_msi()
4590 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4592 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); in e1000_test_msi()
4599 * e1000e_open - Called when a network interface is made active
4613 struct e1000_hw *hw = &adapter->hw; in e1000e_open()
4614 struct pci_dev *pdev = adapter->pdev; in e1000e_open()
4618 if (test_bit(__E1000_TESTING, &adapter->state)) in e1000e_open()
4619 return -EBUSY; in e1000e_open()
4621 pm_runtime_get_sync(&pdev->dev); in e1000e_open()
4627 err = e1000e_setup_tx_resources(adapter->tx_ring); in e1000e_open()
4632 err = e1000e_setup_rx_resources(adapter->rx_ring); in e1000e_open()
4639 if (adapter->flags & FLAG_HAS_AMT) { in e1000e_open()
4646 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_open()
4647 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) in e1000e_open()
4651 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); in e1000e_open()
4664 /* Work around PCIe errata with MSI interrupts causing some chipsets to in e1000e_open()
4665 * ignore e1000e MSI messages, which means we need to test our MSI in e1000e_open()
4668 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { in e1000e_open()
4677 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_open()
4679 napi_enable(&adapter->napi); in e1000e_open()
4683 adapter->tx_hang_recheck = false; in e1000e_open()
4685 hw->mac.get_link_status = true; in e1000e_open()
4686 pm_runtime_put(&pdev->dev); in e1000e_open()
4693 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_open()
4696 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_open()
4698 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_open()
4701 pm_runtime_put_sync(&pdev->dev); in e1000e_open()
4707 * e1000e_close - Disables a network interface
4712 * The close entry point is called when an interface is de-activated
4720 struct pci_dev *pdev = adapter->pdev; in e1000e_close()
4723 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_close()
4726 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_close()
4728 pm_runtime_get_sync(&pdev->dev); in e1000e_close()
4738 napi_disable(&adapter->napi); in e1000e_close()
4740 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_close()
4741 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_close()
4746 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) in e1000e_close()
4748 adapter->mng_vlan_id); in e1000e_close()
4753 if ((adapter->flags & FLAG_HAS_AMT) && in e1000e_close()
4754 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_close()
4757 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_close()
4759 pm_runtime_put_sync(&pdev->dev); in e1000e_close()
4765 * e1000_set_mac - Change the Ethernet Address of the NIC
4774 struct e1000_hw *hw = &adapter->hw; in e1000_set_mac()
4777 if (!is_valid_ether_addr(addr->sa_data)) in e1000_set_mac()
4778 return -EADDRNOTAVAIL; in e1000_set_mac()
4780 eth_hw_addr_set(netdev, addr->sa_data); in e1000_set_mac()
4781 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4783 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4785 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { in e1000_set_mac()
4787 e1000e_set_laa_state_82571(&adapter->hw, 1); in e1000_set_mac()
4796 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, in e1000_set_mac()
4797 adapter->hw.mac.rar_entry_count - 1); in e1000_set_mac()
4804 * e1000e_update_phy_task - work thread to update phy
4816 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_task()
4818 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_update_phy_task()
4824 if (hw->phy.type >= e1000_phy_82579) in e1000e_update_phy_task()
4829 * e1000_update_phy_info - timre call-back to update PHY info
4839 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_update_phy_info()
4842 schedule_work(&adapter->update_phy_task); in e1000_update_phy_info()
4846 * e1000e_update_phy_stats - Update the PHY statistics counters
4849 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4853 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_stats()
4857 ret_val = hw->phy.ops.acquire(hw); in e1000e_update_phy_stats()
4864 hw->phy.addr = 1; in e1000e_update_phy_stats()
4870 ret_val = hw->phy.ops.set_page(hw, in e1000e_update_phy_stats()
4877 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4878 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4880 adapter->stats.scc += phy_data; in e1000e_update_phy_stats()
4883 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4884 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4886 adapter->stats.ecol += phy_data; in e1000e_update_phy_stats()
4889 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4890 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4892 adapter->stats.mcc += phy_data; in e1000e_update_phy_stats()
4895 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4896 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4898 adapter->stats.latecol += phy_data; in e1000e_update_phy_stats()
4900 /* Collision Count - also used for adaptive IFS */ in e1000e_update_phy_stats()
4901 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); in e1000e_update_phy_stats()
4902 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); in e1000e_update_phy_stats()
4904 hw->mac.collision_delta = phy_data; in e1000e_update_phy_stats()
4907 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); in e1000e_update_phy_stats()
4908 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); in e1000e_update_phy_stats()
4910 adapter->stats.dc += phy_data; in e1000e_update_phy_stats()
4913 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); in e1000e_update_phy_stats()
4914 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); in e1000e_update_phy_stats()
4916 adapter->stats.tncrs += phy_data; in e1000e_update_phy_stats()
4919 hw->phy.ops.release(hw); in e1000e_update_phy_stats()
4923 * e1000e_update_stats - Update the board statistics counters
4928 struct net_device *netdev = adapter->netdev; in e1000e_update_stats()
4929 struct e1000_hw *hw = &adapter->hw; in e1000e_update_stats()
4930 struct pci_dev *pdev = adapter->pdev; in e1000e_update_stats()
4935 if (adapter->link_speed == 0) in e1000e_update_stats()
4940 adapter->stats.crcerrs += er32(CRCERRS); in e1000e_update_stats()
4941 adapter->stats.gprc += er32(GPRC); in e1000e_update_stats()
4942 adapter->stats.gorc += er32(GORCL); in e1000e_update_stats()
4944 adapter->stats.bprc += er32(BPRC); in e1000e_update_stats()
4945 adapter->stats.mprc += er32(MPRC); in e1000e_update_stats()
4946 adapter->stats.roc += er32(ROC); in e1000e_update_stats()
4948 adapter->stats.mpc += er32(MPC); in e1000e_update_stats()
4950 /* Half-duplex statistics */ in e1000e_update_stats()
4951 if (adapter->link_duplex == HALF_DUPLEX) { in e1000e_update_stats()
4952 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { in e1000e_update_stats()
4955 adapter->stats.scc += er32(SCC); in e1000e_update_stats()
4956 adapter->stats.ecol += er32(ECOL); in e1000e_update_stats()
4957 adapter->stats.mcc += er32(MCC); in e1000e_update_stats()
4958 adapter->stats.latecol += er32(LATECOL); in e1000e_update_stats()
4959 adapter->stats.dc += er32(DC); in e1000e_update_stats()
4961 hw->mac.collision_delta = er32(COLC); in e1000e_update_stats()
4963 if ((hw->mac.type != e1000_82574) && in e1000e_update_stats()
4964 (hw->mac.type != e1000_82583)) in e1000e_update_stats()
4965 adapter->stats.tncrs += er32(TNCRS); in e1000e_update_stats()
4967 adapter->stats.colc += hw->mac.collision_delta; in e1000e_update_stats()
4970 adapter->stats.xonrxc += er32(XONRXC); in e1000e_update_stats()
4971 adapter->stats.xontxc += er32(XONTXC); in e1000e_update_stats()
4972 adapter->stats.xoffrxc += er32(XOFFRXC); in e1000e_update_stats()
4973 adapter->stats.xofftxc += er32(XOFFTXC); in e1000e_update_stats()
4974 adapter->stats.gptc += er32(GPTC); in e1000e_update_stats()
4975 adapter->stats.gotc += er32(GOTCL); in e1000e_update_stats()
4977 adapter->stats.rnbc += er32(RNBC); in e1000e_update_stats()
4978 adapter->stats.ruc += er32(RUC); in e1000e_update_stats()
4980 adapter->stats.mptc += er32(MPTC); in e1000e_update_stats()
4981 adapter->stats.bptc += er32(BPTC); in e1000e_update_stats()
4985 hw->mac.tx_packet_delta = er32(TPT); in e1000e_update_stats()
4986 adapter->stats.tpt += hw->mac.tx_packet_delta; in e1000e_update_stats()
4988 adapter->stats.algnerrc += er32(ALGNERRC); in e1000e_update_stats()
4989 adapter->stats.rxerrc += er32(RXERRC); in e1000e_update_stats()
4990 adapter->stats.cexterr += er32(CEXTERR); in e1000e_update_stats()
4991 adapter->stats.tsctc += er32(TSCTC); in e1000e_update_stats()
4992 adapter->stats.tsctfc += er32(TSCTFC); in e1000e_update_stats()
4995 netdev->stats.multicast = adapter->stats.mprc; in e1000e_update_stats()
4996 netdev->stats.collisions = adapter->stats.colc; in e1000e_update_stats()
5003 netdev->stats.rx_errors = adapter->stats.rxerrc + in e1000e_update_stats()
5004 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_update_stats()
5005 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_update_stats()
5006 netdev->stats.rx_length_errors = adapter->stats.ruc + in e1000e_update_stats()
5007 adapter->stats.roc; in e1000e_update_stats()
5008 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; in e1000e_update_stats()
5009 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; in e1000e_update_stats()
5010 netdev->stats.rx_missed_errors = adapter->stats.mpc; in e1000e_update_stats()
5013 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_update_stats()
5014 netdev->stats.tx_aborted_errors = adapter->stats.ecol; in e1000e_update_stats()
5015 netdev->stats.tx_window_errors = adapter->stats.latecol; in e1000e_update_stats()
5016 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; in e1000e_update_stats()
5021 adapter->stats.mgptc += er32(MGTPTC); in e1000e_update_stats()
5022 adapter->stats.mgprc += er32(MGTPRC); in e1000e_update_stats()
5023 adapter->stats.mgpdc += er32(MGTPDC); in e1000e_update_stats()
5026 if (hw->mac.type >= e1000_pch_lpt) { in e1000e_update_stats()
5029 adapter->corr_errors += in e1000e_update_stats()
5031 adapter->uncorr_errors += in e1000e_update_stats()
5037 * e1000_phy_read_status - Update the PHY register status snapshot
5042 struct e1000_hw *hw = &adapter->hw; in e1000_phy_read_status()
5043 struct e1000_phy_regs *phy = &adapter->phy_regs; in e1000_phy_read_status()
5045 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && in e1000_phy_read_status()
5047 (adapter->hw.phy.media_type == e1000_media_type_copper)) { in e1000_phy_read_status()
5050 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); in e1000_phy_read_status()
5051 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); in e1000_phy_read_status()
5052 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); in e1000_phy_read_status()
5053 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); in e1000_phy_read_status()
5054 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); in e1000_phy_read_status()
5055 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); in e1000_phy_read_status()
5056 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); in e1000_phy_read_status()
5057 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); in e1000_phy_read_status()
5062 * Set values to typical power-on defaults in e1000_phy_read_status()
5064 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); in e1000_phy_read_status()
5065 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | in e1000_phy_read_status()
5068 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | in e1000_phy_read_status()
5070 phy->lpa = 0; in e1000_phy_read_status()
5071 phy->expansion = EXPANSION_ENABLENPAGE; in e1000_phy_read_status()
5072 phy->ctrl1000 = ADVERTISE_1000FULL; in e1000_phy_read_status()
5073 phy->stat1000 = 0; in e1000_phy_read_status()
5074 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); in e1000_phy_read_status()
5080 struct e1000_hw *hw = &adapter->hw; in e1000_print_link_info()
5084 netdev_info(adapter->netdev, in e1000_print_link_info()
5086 adapter->link_speed, in e1000_print_link_info()
5087 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", in e1000_print_link_info()
5095 struct e1000_hw *hw = &adapter->hw; in e1000e_has_link()
5104 switch (hw->phy.media_type) { in e1000e_has_link()
5106 if (hw->mac.get_link_status) { in e1000e_has_link()
5107 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5108 link_active = !hw->mac.get_link_status; in e1000e_has_link()
5114 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5118 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5119 link_active = hw->mac.serdes_has_link; in e1000e_has_link()
5126 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && in e1000e_has_link()
5138 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && in e1000e_enable_receives()
5139 (adapter->flags & FLAG_RESTART_NOW)) { in e1000e_enable_receives()
5140 struct e1000_hw *hw = &adapter->hw; in e1000e_enable_receives()
5144 adapter->flags &= ~FLAG_RESTART_NOW; in e1000e_enable_receives()
5150 struct e1000_hw *hw = &adapter->hw; in e1000e_check_82574_phy_workaround()
5156 adapter->phy_hang_count++; in e1000e_check_82574_phy_workaround()
5158 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5160 if (adapter->phy_hang_count > 1) { in e1000e_check_82574_phy_workaround()
5161 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5162 e_dbg("PHY appears hung - resetting\n"); in e1000e_check_82574_phy_workaround()
5163 schedule_work(&adapter->reset_task); in e1000e_check_82574_phy_workaround()
5168 * e1000_watchdog - Timer Call-back
5176 schedule_work(&adapter->watchdog_task); in e1000_watchdog()
5186 struct net_device *netdev = adapter->netdev; in e1000_watchdog_task()
5187 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000_watchdog_task()
5188 struct e1000_phy_info *phy = &adapter->hw.phy; in e1000_watchdog_task()
5189 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_watchdog_task()
5191 struct e1000_hw *hw = &adapter->hw; in e1000_watchdog_task()
5194 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5200 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5207 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) in e1000_watchdog_task()
5215 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5230 e1000_phy_hw_reset(&adapter->hw); in e1000_watchdog_task()
5236 mac->ops.get_link_up_info(&adapter->hw, in e1000_watchdog_task()
5237 &adapter->link_speed, in e1000_watchdog_task()
5238 &adapter->link_duplex); in e1000_watchdog_task()
5243 if (phy->speed_downgraded) in e1000_watchdog_task()
5250 if ((hw->phy.type == e1000_phy_igp_3 || in e1000_watchdog_task()
5251 hw->phy.type == e1000_phy_bm) && in e1000_watchdog_task()
5252 hw->mac.autoneg && in e1000_watchdog_task()
5253 (adapter->link_speed == SPEED_10 || in e1000_watchdog_task()
5254 adapter->link_speed == SPEED_100) && in e1000_watchdog_task()
5255 (adapter->link_duplex == HALF_DUPLEX)) { in e1000_watchdog_task()
5265 adapter->tx_timeout_factor = 1; in e1000_watchdog_task()
5266 switch (adapter->link_speed) { in e1000_watchdog_task()
5269 adapter->tx_timeout_factor = 16; in e1000_watchdog_task()
5273 adapter->tx_timeout_factor = 10; in e1000_watchdog_task()
5277 /* workaround: re-program speed mode bit after in e1000_watchdog_task()
5278 * link-up event in e1000_watchdog_task()
5280 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && in e1000_watchdog_task()
5296 /* Perform any post-link-up configuration before in e1000_watchdog_task()
5299 if (phy->ops.cfg_on_link_up) in e1000_watchdog_task()
5300 phy->ops.cfg_on_link_up(hw); in e1000_watchdog_task()
5305 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5306 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5311 adapter->link_speed = 0; in e1000_watchdog_task()
5312 adapter->link_duplex = 0; in e1000_watchdog_task()
5317 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5318 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5321 /* 8000ES2LAN requires a Rx packet buffer work-around in e1000_watchdog_task()
5325 if (adapter->flags & FLAG_RX_NEEDS_RESTART) in e1000_watchdog_task()
5326 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5328 pm_schedule_suspend(netdev->dev.parent, in e1000_watchdog_task()
5334 spin_lock(&adapter->stats64_lock); in e1000_watchdog_task()
5337 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; in e1000_watchdog_task()
5338 adapter->tpt_old = adapter->stats.tpt; in e1000_watchdog_task()
5339 mac->collision_delta = adapter->stats.colc - adapter->colc_old; in e1000_watchdog_task()
5340 adapter->colc_old = adapter->stats.colc; in e1000_watchdog_task()
5342 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; in e1000_watchdog_task()
5343 adapter->gorc_old = adapter->stats.gorc; in e1000_watchdog_task()
5344 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; in e1000_watchdog_task()
5345 adapter->gotc_old = adapter->stats.gotc; in e1000_watchdog_task()
5346 spin_unlock(&adapter->stats64_lock); in e1000_watchdog_task()
5353 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) in e1000_watchdog_task()
5354 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5357 if (adapter->flags & FLAG_RESTART_NOW) { in e1000_watchdog_task()
5358 schedule_work(&adapter->reset_task); in e1000_watchdog_task()
5363 e1000e_update_adaptive(&adapter->hw); in e1000_watchdog_task()
5366 if (adapter->itr_setting == 4) { in e1000_watchdog_task()
5369 * everyone else is between 2000-8000. in e1000_watchdog_task()
5371 u32 goc = (adapter->gotc + adapter->gorc) / 10000; in e1000_watchdog_task()
5372 u32 dif = (adapter->gotc > adapter->gorc ? in e1000_watchdog_task()
5373 adapter->gotc - adapter->gorc : in e1000_watchdog_task()
5374 adapter->gorc - adapter->gotc) / 10000; in e1000_watchdog_task()
5381 if (adapter->msix_entries) in e1000_watchdog_task()
5382 ew32(ICS, adapter->rx_ring->ims_val); in e1000_watchdog_task()
5390 adapter->detect_tx_hung = true; in e1000_watchdog_task()
5396 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5398 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) in e1000_watchdog_task()
5402 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { in e1000_watchdog_task()
5403 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && in e1000_watchdog_task()
5406 adapter->rx_hwtstamp_cleared++; in e1000_watchdog_task()
5408 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; in e1000_watchdog_task()
5413 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5414 mod_timer(&adapter->watchdog_timer, in e1000_watchdog_task()
5446 mss = skb_shinfo(skb)->gso_size; in e1000_tso()
5449 iph->tot_len = 0; in e1000_tso()
5450 iph->check = 0; in e1000_tso()
5451 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in e1000_tso()
5454 ipcse = skb_transport_offset(skb) - 1; in e1000_tso()
5460 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5462 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5465 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); in e1000_tso()
5467 i = tx_ring->next_to_use; in e1000_tso()
5469 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
5471 context_desc->lower_setup.ip_fields.ipcss = ipcss; in e1000_tso()
5472 context_desc->lower_setup.ip_fields.ipcso = ipcso; in e1000_tso()
5473 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); in e1000_tso()
5474 context_desc->upper_setup.tcp_fields.tucss = tucss; in e1000_tso()
5475 context_desc->upper_setup.tcp_fields.tucso = tucso; in e1000_tso()
5476 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5477 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); in e1000_tso()
5478 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; in e1000_tso()
5479 context_desc->cmd_and_length = cpu_to_le32(cmd_length); in e1000_tso()
5481 buffer_info->time_stamp = jiffies; in e1000_tso()
5482 buffer_info->next_to_watch = i; in e1000_tso()
5485 if (i == tx_ring->count) in e1000_tso()
5487 tx_ring->next_to_use = i; in e1000_tso()
5489 return 1; in e1000_tso()
5495 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_csum()
5502 if (skb->ip_summed != CHECKSUM_PARTIAL) in e1000_tx_csum()
5507 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in e1000_tx_csum()
5512 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in e1000_tx_csum()
5524 i = tx_ring->next_to_use; in e1000_tx_csum()
5525 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
5528 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5529 context_desc->upper_setup.tcp_fields.tucss = css; in e1000_tx_csum()
5530 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; in e1000_tx_csum()
5531 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5532 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5533 context_desc->cmd_and_length = cpu_to_le32(cmd_len); in e1000_tx_csum()
5535 buffer_info->time_stamp = jiffies; in e1000_tx_csum()
5536 buffer_info->next_to_watch = i; in e1000_tx_csum()
5539 if (i == tx_ring->count) in e1000_tx_csum()
5541 tx_ring->next_to_use = i; in e1000_tx_csum()
5550 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_map()
5551 struct pci_dev *pdev = adapter->pdev; in e1000_tx_map()
5557 i = tx_ring->next_to_use; in e1000_tx_map()
5560 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5563 buffer_info->length = size; in e1000_tx_map()
5564 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5565 buffer_info->next_to_watch = i; in e1000_tx_map()
5566 buffer_info->dma = dma_map_single(&pdev->dev, in e1000_tx_map()
5567 skb->data + offset, in e1000_tx_map()
5569 buffer_info->mapped_as_page = false; in e1000_tx_map()
5570 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5573 len -= size; in e1000_tx_map()
5579 if (i == tx_ring->count) in e1000_tx_map()
5585 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in e1000_tx_map()
5592 if (i == tx_ring->count) in e1000_tx_map()
5595 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5598 buffer_info->length = size; in e1000_tx_map()
5599 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5600 buffer_info->next_to_watch = i; in e1000_tx_map()
5601 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, in e1000_tx_map()
5604 buffer_info->mapped_as_page = true; in e1000_tx_map()
5605 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5608 len -= size; in e1000_tx_map()
5614 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5616 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5618 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
5619 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
5620 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
5621 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
5626 dev_err(&pdev->dev, "Tx DMA map failed\n"); in e1000_tx_map()
5627 buffer_info->dma = 0; in e1000_tx_map()
5629 count--; in e1000_tx_map()
5631 while (count--) { in e1000_tx_map()
5633 i += tx_ring->count; in e1000_tx_map()
5634 i--; in e1000_tx_map()
5635 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5644 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_queue()
5677 i = tx_ring->next_to_use; in e1000_tx_queue()
5680 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
5682 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_tx_queue()
5683 tx_desc->lower.data = cpu_to_le32(txd_lower | in e1000_tx_queue()
5684 buffer_info->length); in e1000_tx_queue()
5685 tx_desc->upper.data = cpu_to_le32(txd_upper); in e1000_tx_queue()
5688 if (i == tx_ring->count) in e1000_tx_queue()
5690 } while (--count > 0); in e1000_tx_queue()
5692 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); in e1000_tx_queue()
5694 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ in e1000_tx_queue()
5696 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); in e1000_tx_queue()
5700 * applicable for weak-ordered memory model archs, in e1000_tx_queue()
5701 * such as IA-64). in e1000_tx_queue()
5705 tx_ring->next_to_use = i; in e1000_tx_queue()
5712 struct e1000_hw *hw = &adapter->hw; in e1000_transfer_dhcp_info()
5716 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && in e1000_transfer_dhcp_info()
5717 (adapter->hw.mng_cookie.status & in e1000_transfer_dhcp_info()
5721 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) in e1000_transfer_dhcp_info()
5724 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) in e1000_transfer_dhcp_info()
5728 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); in e1000_transfer_dhcp_info()
5731 if (ip->protocol != IPPROTO_UDP) in e1000_transfer_dhcp_info()
5734 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); in e1000_transfer_dhcp_info()
5735 if (ntohs(udp->dest) != 67) in e1000_transfer_dhcp_info()
5738 offset = (u8 *)udp + 8 - skb->data; in e1000_transfer_dhcp_info()
5739 length = skb->len - offset; in e1000_transfer_dhcp_info()
5748 struct e1000_adapter *adapter = tx_ring->adapter; in __e1000_maybe_stop_tx()
5750 netif_stop_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5761 return -EBUSY; in __e1000_maybe_stop_tx()
5764 netif_start_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5765 ++adapter->restart_queue; in __e1000_maybe_stop_tx()
5771 BUG_ON(size > tx_ring->count); in e1000_maybe_stop_tx()
5782 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_xmit_frame()
5793 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_xmit_frame()
5798 if (skb->len <= 0) { in e1000_xmit_frame()
5809 mss = skb_shinfo(skb)->gso_size; in e1000_xmit_frame()
5813 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data in e1000_xmit_frame()
5815 * frags into skb->data in e1000_xmit_frame()
5818 /* we do this workaround for ES2LAN, but it is un-necessary, in e1000_xmit_frame()
5821 if (skb->data_len && (hdr_len == len)) { in e1000_xmit_frame()
5824 pull_size = min_t(unsigned int, 4, skb->data_len); in e1000_xmit_frame()
5835 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) in e1000_xmit_frame()
5839 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); in e1000_xmit_frame()
5841 nr_frags = skb_shinfo(skb)->nr_frags; in e1000_xmit_frame()
5843 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), in e1000_xmit_frame()
5844 adapter->tx_fifo_limit); in e1000_xmit_frame()
5846 if (adapter->hw.mac.tx_pkt_filtering) in e1000_xmit_frame()
5861 first = tx_ring->next_to_use; in e1000_xmit_frame()
5881 if (unlikely(skb->no_fcs)) in e1000_xmit_frame()
5885 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, in e1000_xmit_frame()
5888 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in e1000_xmit_frame()
5889 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { in e1000_xmit_frame()
5890 if (!adapter->tx_hwtstamp_skb) { in e1000_xmit_frame()
5891 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in e1000_xmit_frame()
5893 adapter->tx_hwtstamp_skb = skb_get(skb); in e1000_xmit_frame()
5894 adapter->tx_hwtstamp_start = jiffies; in e1000_xmit_frame()
5895 schedule_work(&adapter->tx_hwtstamp_work); in e1000_xmit_frame()
5897 adapter->tx_hwtstamp_skipped++; in e1000_xmit_frame()
5903 netdev_sent_queue(netdev, skb->len); in e1000_xmit_frame()
5907 ((MAX_SKB_FRAGS + 1) * in e1000_xmit_frame()
5909 adapter->tx_fifo_limit) + 4)); in e1000_xmit_frame()
5913 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_xmit_frame()
5915 tx_ring->next_to_use); in e1000_xmit_frame()
5917 writel(tx_ring->next_to_use, tx_ring->tail); in e1000_xmit_frame()
5921 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
5922 tx_ring->next_to_use = first; in e1000_xmit_frame()
5929 * e1000_tx_timeout - Respond to a Tx Hang
5938 adapter->tx_timeout_count++; in e1000_tx_timeout()
5939 schedule_work(&adapter->reset_task); in e1000_tx_timeout()
5949 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_reset_task()
5954 if (!(adapter->flags & FLAG_RESTART_NOW)) { in e1000_reset_task()
5963 * e1000e_get_stats64 - Get System Network Statistics
5974 spin_lock(&adapter->stats64_lock); in e1000e_get_stats64()
5977 stats->rx_bytes = adapter->stats.gorc; in e1000e_get_stats64()
5978 stats->rx_packets = adapter->stats.gprc; in e1000e_get_stats64()
5979 stats->tx_bytes = adapter->stats.gotc; in e1000e_get_stats64()
5980 stats->tx_packets = adapter->stats.gptc; in e1000e_get_stats64()
5981 stats->multicast = adapter->stats.mprc; in e1000e_get_stats64()
5982 stats->collisions = adapter->stats.colc; in e1000e_get_stats64()
5989 stats->rx_errors = adapter->stats.rxerrc + in e1000e_get_stats64()
5990 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_get_stats64()
5991 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_get_stats64()
5992 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; in e1000e_get_stats64()
5993 stats->rx_crc_errors = adapter->stats.crcerrs; in e1000e_get_stats64()
5994 stats->rx_frame_errors = adapter->stats.algnerrc; in e1000e_get_stats64()
5995 stats->rx_missed_errors = adapter->stats.mpc; in e1000e_get_stats64()
5998 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_get_stats64()
5999 stats->tx_aborted_errors = adapter->stats.ecol; in e1000e_get_stats64()
6000 stats->tx_window_errors = adapter->stats.latecol; in e1000e_get_stats64()
6001 stats->tx_carrier_errors = adapter->stats.tncrs; in e1000e_get_stats64()
6005 spin_unlock(&adapter->stats64_lock); in e1000e_get_stats64()
6009 * e1000_change_mtu - Change the Maximum Transfer Unit
6022 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { in e1000_change_mtu()
6024 return -EINVAL; in e1000_change_mtu()
6028 if ((adapter->hw.mac.type >= e1000_pch2lan) && in e1000_change_mtu()
6029 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && in e1000_change_mtu()
6032 return -EINVAL; in e1000_change_mtu()
6035 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000_change_mtu()
6037 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ in e1000_change_mtu()
6038 adapter->max_frame_size = max_frame; in e1000_change_mtu()
6040 netdev->mtu, new_mtu); in e1000_change_mtu()
6041 WRITE_ONCE(netdev->mtu, new_mtu); in e1000_change_mtu()
6043 pm_runtime_get_sync(netdev->dev.parent); in e1000_change_mtu()
6051 * i.e. RXBUFFER_2048 --> size-4096 slab in e1000_change_mtu()
6057 adapter->rx_buffer_len = 2048; in e1000_change_mtu()
6059 adapter->rx_buffer_len = 4096; in e1000_change_mtu()
6063 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_change_mtu()
6070 pm_runtime_put_sync(netdev->dev.parent); in e1000_change_mtu()
6072 clear_bit(__E1000_RESETTING, &adapter->state); in e1000_change_mtu()
6083 if (adapter->hw.phy.media_type != e1000_media_type_copper) in e1000_mii_ioctl()
6084 return -EOPNOTSUPP; in e1000_mii_ioctl()
6088 data->phy_id = adapter->hw.phy.addr; in e1000_mii_ioctl()
6093 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6095 data->val_out = adapter->phy_regs.bmcr; in e1000_mii_ioctl()
6098 data->val_out = adapter->phy_regs.bmsr; in e1000_mii_ioctl()
6101 data->val_out = (adapter->hw.phy.id >> 16); in e1000_mii_ioctl()
6104 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6107 data->val_out = adapter->phy_regs.advertise; in e1000_mii_ioctl()
6110 data->val_out = adapter->phy_regs.lpa; in e1000_mii_ioctl()
6113 data->val_out = adapter->phy_regs.expansion; in e1000_mii_ioctl()
6116 data->val_out = adapter->phy_regs.ctrl1000; in e1000_mii_ioctl()
6119 data->val_out = adapter->phy_regs.stat1000; in e1000_mii_ioctl()
6122 data->val_out = adapter->phy_regs.estatus; in e1000_mii_ioctl()
6125 return -EIO; in e1000_mii_ioctl()
6130 return -EOPNOTSUPP; in e1000_mii_ioctl()
6136 * e1000e_hwtstamp_set - control hardware time stamping
6157 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in e1000e_hwtstamp_set()
6158 return -EFAULT; in e1000e_hwtstamp_set()
6182 return copy_to_user(ifr->ifr_data, &config, in e1000e_hwtstamp_set()
6183 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6190 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, in e1000e_hwtstamp_get()
6191 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6206 return -EOPNOTSUPP; in e1000_ioctl()
6212 struct e1000_hw *hw = &adapter->hw; in e1000_init_phy_wakeup()
6220 retval = hw->phy.ops.acquire(hw); in e1000_init_phy_wakeup()
6231 /* copy MAC MTA to PHY MTA - only needed for pchlan */ in e1000_init_phy_wakeup()
6232 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6234 hw->phy.ops.write_reg_page(hw, BM_MTA(i), in e1000_init_phy_wakeup()
6236 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, in e1000_init_phy_wakeup()
6241 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); in e1000_init_phy_wakeup()
6258 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); in e1000_init_phy_wakeup()
6270 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); in e1000_init_phy_wakeup()
6271 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); in e1000_init_phy_wakeup()
6279 hw->phy.ops.release(hw); in e1000_init_phy_wakeup()
6288 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_lpic()
6291 pm_runtime_get_sync(netdev->dev.parent); in e1000e_flush_lpic()
6293 ret_val = hw->phy.ops.acquire(hw); in e1000e_flush_lpic()
6300 hw->phy.ops.release(hw); in e1000e_flush_lpic()
6303 pm_runtime_put_sync(netdev->dev.parent); in e1000e_flush_lpic()
6309 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_entry_flow()
6314 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_entry_flow()
6332 * 772_29[5] = 1 CS_Mode_Stay_In_K1 in e1000e_s0ix_entry_flow()
6339 * Force the SMBus in PHY page769_23[0] = 1 in e1000e_s0ix_entry_flow()
6340 * Force the SMBus in MAC CTRL_EXT[11] = 1 in e1000e_s0ix_entry_flow()
6349 /* DFT control: PHY bit: page769_20[0] = 1 in e1000e_s0ix_entry_flow()
6350 * page769_20[7] - PHY PLL stop in e1000e_s0ix_entry_flow()
6351 * page769_20[8] - PHY go to the electrical idle in e1000e_s0ix_entry_flow()
6352 * page769_20[9] - PHY serdes disable in e1000e_s0ix_entry_flow()
6353 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6458 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_exit_flow()
6465 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_exit_flow()
6478 * If this takes more than 1 second, show a warning indicating a in e1000e_s0ix_exit_flow()
6600 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_freeze()
6603 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_freeze()
6614 e1000e_disable_pcie_master(&adapter->hw); in e1000e_pm_freeze()
6623 struct e1000_hw *hw = &adapter->hw; in __e1000_shutdown()
6630 else if (device_may_wakeup(&pdev->dev)) in __e1000_shutdown()
6631 wufc = adapter->wol; in __e1000_shutdown()
6643 /* turn on all-multi mode if wake on multicast is enabled */ in __e1000_shutdown()
6652 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) in __e1000_shutdown()
6656 if (adapter->hw.phy.media_type == e1000_media_type_fiber || in __e1000_shutdown()
6657 adapter->hw.phy.media_type == in __e1000_shutdown()
6668 if (adapter->flags & FLAG_IS_ICH) in __e1000_shutdown()
6669 e1000_suspend_workarounds_ich8lan(&adapter->hw); in __e1000_shutdown()
6671 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_shutdown()
6690 if (adapter->hw.phy.type == e1000_phy_igp_3) { in __e1000_shutdown()
6691 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); in __e1000_shutdown()
6692 } else if (hw->mac.type >= e1000_pch_lpt) { in __e1000_shutdown()
6708 if ((hw->phy.type >= e1000_phy_i217) && in __e1000_shutdown()
6709 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { in __e1000_shutdown()
6712 retval = hw->phy.ops.acquire(hw); in __e1000_shutdown()
6717 if (adapter->eee_advert & in __e1000_shutdown()
6718 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6721 if (adapter->eee_advert & in __e1000_shutdown()
6722 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6730 hw->phy.ops.release(hw); in __e1000_shutdown()
6741 /* The pci-e switch on some quad port adapters will report a in __e1000_shutdown()
6744 * downstream port of the pci-e switch. in __e1000_shutdown()
6750 if (adapter->flags & FLAG_IS_QUAD_PORT) { in __e1000_shutdown()
6751 struct pci_dev *us_dev = pdev->bus->self; in __e1000_shutdown()
6771 * __e1000e_disable_aspm - Disable ASPM states
6773 * @state: bit-mask of ASPM states to disable
6780 struct pci_dev *parent = pdev->bus->self; in __e1000e_disable_aspm()
6810 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", in __e1000e_disable_aspm()
6822 /* Double-check ASPM control. If not disabled by the above, the in __e1000e_disable_aspm()
6844 * e1000e_disable_aspm - Disable ASPM states.
6846 * @state: bit-mask of ASPM states to disable
6857 * e1000e_disable_aspm_locked - Disable ASPM states.
6859 * @state: bit-mask of ASPM states to disable
6866 __e1000e_disable_aspm(pdev, state, 1); in e1000e_disable_aspm_locked()
6897 struct e1000_hw *hw = &adapter->hw; in __e1000_resume()
6900 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in __e1000_resume()
6902 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in __e1000_resume()
6909 if (hw->mac.type >= e1000_pch2lan) in __e1000_resume()
6910 e1000_resume_workarounds_pchlan(&adapter->hw); in __e1000_resume()
6915 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_resume()
6918 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); in __e1000_resume()
6920 e_info("PHY Wakeup cause - %s\n", in __e1000_resume()
6928 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6933 e_info("MAC Wakeup cause - %s\n", in __e1000_resume()
6952 if (!(adapter->flags & FLAG_HAS_AMT)) in __e1000_resume()
6978 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_suspend()
6993 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_resume()
7009 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; in e1000e_pm_runtime_idle()
7012 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; in e1000e_pm_runtime_idle()
7016 return -EBUSY; in e1000e_pm_runtime_idle()
7026 pdev->pme_poll = true; in e1000e_pm_runtime_resume()
7032 if (netdev->flags & IFF_UP) in e1000e_pm_runtime_resume()
7044 if (netdev->flags & IFF_UP) { in e1000e_pm_runtime_suspend()
7047 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_runtime_suspend()
7050 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_runtime_suspend()
7058 return -EBUSY; in e1000e_pm_runtime_suspend()
7068 e1000e_pm_freeze(&pdev->dev); in e1000_shutdown()
7080 if (adapter->msix_entries) { in e1000_intr_msix()
7084 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7090 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7096 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7109 * Polling 'interrupt' - used by things like netconsole to send skbs
7110 * without having to re-enable interrupts. It's not called while
7117 switch (adapter->int_mode) { in e1000_netpoll()
7119 e1000_intr_msix(adapter->pdev->irq, netdev); in e1000_netpoll()
7122 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7123 e1000_intr_msi(adapter->pdev->irq, netdev); in e1000_netpoll()
7124 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7127 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7128 e1000_intr(adapter->pdev->irq, netdev); in e1000_netpoll()
7129 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7136 * e1000_io_error_detected - called when PCI error is detected
7146 e1000e_pm_freeze(&pdev->dev); in e1000_io_error_detected()
7158 * e1000_io_slot_reset - called after the pci bus has been reset.
7161 * Restart the card from scratch, as if from a cold-boot. Implementation
7162 * resembles the first-half of the e1000e_pm_resume routine.
7168 struct e1000_hw *hw = &adapter->hw; in e1000_io_slot_reset()
7173 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_io_slot_reset()
7175 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_io_slot_reset()
7182 dev_err(&pdev->dev, in e1000_io_slot_reset()
7183 "Cannot re-enable PCI device after reset.\n"); in e1000_io_slot_reset()
7186 pdev->state_saved = true; in e1000_io_slot_reset()
7202 * e1000_io_resume - called when traffic can start flowing again.
7207 * second-half of the e1000e_pm_resume routine.
7216 e1000e_pm_thaw(&pdev->dev); in e1000_io_resume()
7222 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_io_resume()
7228 struct e1000_hw *hw = &adapter->hw; in e1000_print_device_info()
7229 struct net_device *netdev = adapter->netdev; in e1000_print_device_info()
7236 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : in e1000_print_device_info()
7239 netdev->dev_addr); in e1000_print_device_info()
7241 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); in e1000_print_device_info()
7247 hw->mac.type, hw->phy.type, pba_str); in e1000_print_device_info()
7252 struct e1000_hw *hw = &adapter->hw; in e1000_eeprom_checks()
7256 if (hw->mac.type != e1000_82573) in e1000_eeprom_checks()
7259 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); in e1000_eeprom_checks()
7263 dev_warn(&adapter->pdev->dev, in e1000_eeprom_checks()
7272 struct e1000_hw *hw = &adapter->hw; in e1000_fix_features()
7275 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) in e1000_fix_features()
7293 netdev_features_t changed = features ^ netdev->features; in e1000_set_features()
7296 adapter->flags |= FLAG_TSO_FORCE; in e1000_set_features()
7305 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7310 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) in e1000_set_features()
7311 adapter->flags2 |= FLAG2_CRC_STRIPPING; in e1000_set_features()
7313 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7317 netdev->features = features; in e1000_set_features()
7324 return 1; in e1000_set_features()
7350 * e1000_probe - Device Initialization Routine
7365 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; in e1000_probe()
7375 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_probe()
7377 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_probe()
7386 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in e1000_probe()
7388 dev_err(&pdev->dev, in e1000_probe()
7405 err = -ENOMEM; in e1000_probe()
7410 SET_NETDEV_DEV(netdev, &pdev->dev); in e1000_probe()
7412 netdev->irq = pdev->irq; in e1000_probe()
7416 hw = &adapter->hw; in e1000_probe()
7417 adapter->netdev = netdev; in e1000_probe()
7418 adapter->pdev = pdev; in e1000_probe()
7419 adapter->ei = ei; in e1000_probe()
7420 adapter->pba = ei->pba; in e1000_probe()
7421 adapter->flags = ei->flags; in e1000_probe()
7422 adapter->flags2 = ei->flags2; in e1000_probe()
7423 adapter->hw.adapter = adapter; in e1000_probe()
7424 adapter->hw.mac.type = ei->mac; in e1000_probe()
7425 adapter->max_hw_frame_size = ei->max_hw_frame_size; in e1000_probe()
7426 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in e1000_probe()
7431 err = -EIO; in e1000_probe()
7432 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); in e1000_probe()
7433 if (!adapter->hw.hw_addr) in e1000_probe()
7436 if ((adapter->flags & FLAG_HAS_FLASH) && in e1000_probe()
7437 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && in e1000_probe()
7438 (hw->mac.type < e1000_pch_spt)) { in e1000_probe()
7439 flash_start = pci_resource_start(pdev, 1); in e1000_probe()
7440 flash_len = pci_resource_len(pdev, 1); in e1000_probe()
7441 adapter->hw.flash_address = ioremap(flash_start, flash_len); in e1000_probe()
7442 if (!adapter->hw.flash_address) in e1000_probe()
7447 if (adapter->flags2 & FLAG2_HAS_EEE) in e1000_probe()
7448 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in e1000_probe()
7451 netdev->netdev_ops = &e1000e_netdev_ops; in e1000_probe()
7453 netdev->watchdog_timeo = 5 * HZ; in e1000_probe()
7454 netif_napi_add(netdev, &adapter->napi, e1000e_poll); in e1000_probe()
7455 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e1000_probe()
7457 netdev->mem_start = mmio_start; in e1000_probe()
7458 netdev->mem_end = mmio_start + mmio_len; in e1000_probe()
7460 adapter->bd_number = cards_found++; in e1000_probe()
7469 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in e1000_probe()
7470 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in e1000_probe()
7471 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in e1000_probe()
7473 err = ei->get_variants(adapter); in e1000_probe()
7477 if ((adapter->flags & FLAG_IS_ICH) && in e1000_probe()
7478 (adapter->flags & FLAG_READ_ONLY_NVM) && in e1000_probe()
7479 (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7480 e1000e_write_protect_nvm_ich8lan(&adapter->hw); in e1000_probe()
7482 hw->mac.ops.get_bus_info(&adapter->hw); in e1000_probe()
7484 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7487 if (adapter->hw.phy.media_type == e1000_media_type_copper) { in e1000_probe()
7488 adapter->hw.phy.mdix = AUTO_ALL_MODES; in e1000_probe()
7489 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7490 adapter->hw.phy.ms_type = e1000_ms_hw_default; in e1000_probe()
7493 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7494 dev_info(&pdev->dev, in e1000_probe()
7498 netdev->features = (NETIF_F_SG | in e1000_probe()
7511 if (!(adapter->flags & FLAG_TSO_FORCE)) { in e1000_probe()
7512 switch (adapter->link_speed) { in e1000_probe()
7516 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7517 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7520 netdev->features |= NETIF_F_TSO; in e1000_probe()
7521 netdev->features |= NETIF_F_TSO6; in e1000_probe()
7527 if (hw->mac.type == e1000_pch_spt) { in e1000_probe()
7528 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7529 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7533 /* Set user-changeable features (subset of all device features) */ in e1000_probe()
7534 netdev->hw_features = netdev->features; in e1000_probe()
7535 netdev->hw_features |= NETIF_F_RXFCS; in e1000_probe()
7536 netdev->priv_flags |= IFF_SUPP_NOFCS; in e1000_probe()
7537 netdev->hw_features |= NETIF_F_RXALL; in e1000_probe()
7539 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) in e1000_probe()
7540 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in e1000_probe()
7542 netdev->vlan_features |= (NETIF_F_SG | in e1000_probe()
7547 netdev->priv_flags |= IFF_UNICAST_FLT; in e1000_probe()
7549 netdev->features |= NETIF_F_HIGHDMA; in e1000_probe()
7550 netdev->vlan_features |= NETIF_F_HIGHDMA; in e1000_probe()
7552 /* MTU range: 68 - max_hw_frame_size */ in e1000_probe()
7553 netdev->min_mtu = ETH_MIN_MTU; in e1000_probe()
7554 netdev->max_mtu = adapter->max_hw_frame_size - in e1000_probe()
7557 if (e1000e_enable_mng_pass_thru(&adapter->hw)) in e1000_probe()
7558 adapter->flags |= FLAG_MNG_PT_ENABLED; in e1000_probe()
7563 adapter->hw.mac.ops.reset_hw(&adapter->hw); in e1000_probe()
7569 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7572 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in e1000_probe()
7573 err = -EIO; in e1000_probe()
7581 if (e1000e_read_mac_addr(&adapter->hw)) in e1000_probe()
7582 dev_err(&pdev->dev, in e1000_probe()
7585 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in e1000_probe()
7587 if (!is_valid_ether_addr(netdev->dev_addr)) { in e1000_probe()
7588 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", in e1000_probe()
7589 netdev->dev_addr); in e1000_probe()
7590 err = -EIO; in e1000_probe()
7594 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7595 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7597 INIT_WORK(&adapter->reset_task, e1000_reset_task); in e1000_probe()
7598 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); in e1000_probe()
7599 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); in e1000_probe()
7600 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); in e1000_probe()
7601 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); in e1000_probe()
7604 adapter->hw.mac.autoneg = 1; in e1000_probe()
7605 adapter->fc_autoneg = true; in e1000_probe()
7606 adapter->hw.fc.requested_mode = e1000_fc_default; in e1000_probe()
7607 adapter->hw.fc.current_mode = e1000_fc_default; in e1000_probe()
7608 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7610 /* Initial Wake on LAN setting - If APM wake is enabled in in e1000_probe()
7613 if (adapter->flags & FLAG_APME_IN_WUC) { in e1000_probe()
7617 if ((hw->mac.type > e1000_ich10lan) && in e1000_probe()
7619 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; in e1000_probe()
7620 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { in e1000_probe()
7621 if (adapter->flags & FLAG_APME_CHECK_PORT_B && in e1000_probe()
7622 (adapter->hw.bus.func == 1)) in e1000_probe()
7623 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7625 1, &eeprom_data); in e1000_probe()
7627 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7629 1, &eeprom_data); in e1000_probe()
7636 adapter->eeprom_wol |= E1000_WUFC_MAG; in e1000_probe()
7642 if (!(adapter->flags & FLAG_HAS_WOL)) in e1000_probe()
7643 adapter->eeprom_wol = 0; in e1000_probe()
7646 adapter->wol = adapter->eeprom_wol; in e1000_probe()
7649 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || in e1000_probe()
7650 (hw->mac.ops.check_mng_mode(hw))) in e1000_probe()
7651 device_wakeup_enable(&pdev->dev); in e1000_probe()
7654 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); in e1000_probe()
7658 adapter->eeprom_vers = 0; in e1000_probe()
7671 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7674 if (hw->mac.type >= e1000_pch_cnp) in e1000_probe()
7675 adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; in e1000_probe()
7677 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in e1000_probe()
7687 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); in e1000_probe()
7690 pm_runtime_put_noidle(&pdev->dev); in e1000_probe()
7695 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7698 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7699 e1000_phy_hw_reset(&adapter->hw); in e1000_probe()
7701 kfree(adapter->tx_ring); in e1000_probe()
7702 kfree(adapter->rx_ring); in e1000_probe()
7704 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7705 iounmap(adapter->hw.flash_address); in e1000_probe()
7708 iounmap(adapter->hw.hw_addr); in e1000_probe()
7720 * e1000_remove - Device Removal Routine
7725 * Hot-Plug event, or because the driver is going to be removed from
7738 set_bit(__E1000_DOWN, &adapter->state); in e1000_remove()
7739 del_timer_sync(&adapter->watchdog_timer); in e1000_remove()
7740 del_timer_sync(&adapter->phy_info_timer); in e1000_remove()
7742 cancel_work_sync(&adapter->reset_task); in e1000_remove()
7743 cancel_work_sync(&adapter->watchdog_task); in e1000_remove()
7744 cancel_work_sync(&adapter->downshift_task); in e1000_remove()
7745 cancel_work_sync(&adapter->update_phy_task); in e1000_remove()
7746 cancel_work_sync(&adapter->print_hang_task); in e1000_remove()
7748 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_remove()
7749 cancel_work_sync(&adapter->tx_hwtstamp_work); in e1000_remove()
7750 if (adapter->tx_hwtstamp_skb) { in e1000_remove()
7751 dev_consume_skb_any(adapter->tx_hwtstamp_skb); in e1000_remove()
7752 adapter->tx_hwtstamp_skb = NULL; in e1000_remove()
7759 pm_runtime_get_noresume(&pdev->dev); in e1000_remove()
7767 kfree(adapter->tx_ring); in e1000_remove()
7768 kfree(adapter->rx_ring); in e1000_remove()
7770 iounmap(adapter->hw.hw_addr); in e1000_remove()
7771 if ((adapter->hw.flash_address) && in e1000_remove()
7772 (adapter->hw.mac.type < e1000_pch_spt)) in e1000_remove()
7773 iounmap(adapter->hw.flash_address); in e1000_remove()
7949 * e1000_init_module - Driver Registration Routine
7957 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); in e1000_init_module()
7964 * e1000_exit_module - Driver Exit Cleanup Routine