Lines Matching +full:0 +full:xffffff7f

37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
76 {E1000_RDLEN(0), "RDLEN"},
77 {E1000_RDH(0), "RDH"},
78 {E1000_RDT(0), "RDT"},
80 {E1000_RXDCTL(0), "RXDCTL"},
82 {E1000_RDBAL(0), "RDBAL"},
83 {E1000_RDBAH(0), "RDBAH"},
92 {E1000_TDBAL(0), "TDBAL"},
93 {E1000_TDBAH(0), "TDBAH"},
94 {E1000_TDLEN(0), "TDLEN"},
95 {E1000_TDH(0), "TDH"},
96 {E1000_TDT(0), "TDT"},
98 {E1000_TXDCTL(0), "TXDCTL"},
100 {E1000_TARC(0), "TARC"},
108 {0, NULL}
146 int n = 0; in e1000_regdump()
151 case E1000_RXDCTL(0): in e1000_regdump()
152 for (n = 0; n < 2; n++) in e1000_regdump()
155 case E1000_TXDCTL(0): in e1000_regdump()
156 for (n = 0; n < 2; n++) in e1000_regdump()
159 case E1000_TARC(0): in e1000_regdump()
160 for (n = 0; n < 2; n++) in e1000_regdump()
169 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
170 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
179 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
217 int i = 0; in e1000e_dump()
246 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
258 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
262 * 0 | Buffer Address [63:0] (Reserved on Write Back) | in e1000e_dump()
266 * 63 48 47 36 35 32 31 24 23 16 15 0 in e1000e_dump()
268 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload in e1000e_dump()
269 * 63 48 47 40 39 32 31 16 15 8 7 0 in e1000e_dump()
271 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | in e1000e_dump()
275 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 in e1000e_dump()
277 * Extended Data Descriptor (DTYP=0x1) in e1000e_dump()
279 * 0 | Buffer Address [63:0] | in e1000e_dump()
283 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 in e1000e_dump()
285 …pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp … in e1000e_dump()
287 …pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp … in e1000e_dump()
288 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
301 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", in e1000e_dump()
323 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
337 * 0 | Buffer Address 0 [63:0] | in e1000e_dump()
339 * 8 | Buffer Address 1 [63:0] | in e1000e_dump()
341 * 16 | Buffer Address 2 [63:0] | in e1000e_dump()
343 * 24 | Buffer Address 3 [63:0] | in e1000e_dump()
346 …pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->d… in e1000e_dump()
349 * 63 48 47 32 31 13 12 8 7 4 3 0 in e1000e_dump()
351 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | in e1000e_dump()
356 * 63 48 47 32 31 20 19 0 in e1000e_dump()
359 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
376 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
384 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", in e1000e_dump()
400 case 0: in e1000e_dump()
404 * 0 | Buffer Address [63:0] | in e1000e_dump()
409 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
412 * 63 48 47 32 31 24 23 4 3 0 in e1000e_dump()
415 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
421 * 63 48 47 32 31 20 19 0 in e1000e_dump()
425 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
442 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
448 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", in e1000e_dump()
505 memset(hwtstamps, 0, sizeof(*hwtstamps)); in e1000e_systim_to_hwtstamp()
664 skb_trim(skb, 0); in e1000_alloc_rx_buffers()
703 i = 0; in e1000_alloc_rx_buffers()
734 for (j = 0; j < PS_PAGE_BUFFERS; j++) { in e1000_alloc_rx_buffers_ps()
739 ~cpu_to_le64(0); in e1000_alloc_rx_buffers_ps()
750 0, PAGE_SIZE, in e1000_alloc_rx_buffers_ps()
789 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
806 i = 0; in e1000_alloc_rx_buffers_ps()
839 skb_trim(skb, 0); in e1000_alloc_jumbo_rx_buffers()
863 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
876 i = 0; in e1000_alloc_jumbo_rx_buffers()
882 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
925 int cleaned_count = 0; in e1000_clean_rx_irq()
927 unsigned int total_rx_bytes = 0, total_rx_packets = 0; in e1000_clean_rx_irq()
949 i = 0; in e1000_clean_rx_irq()
959 buffer_info->dma = 0; in e1000_clean_rx_irq()
1036 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1042 cleaned_count = 0; in e1000_clean_rx_irq()
1075 buffer_info->dma = 0; in e1000_put_txbuf()
1084 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1122 if (er32(TDH(0)) == er32(TDT(0))) { in e1000_print_hw_hang()
1223 unsigned int count = 0; in e1000_clean_tx_irq()
1224 unsigned int total_tx_bytes = 0, total_tx_packets = 0; in e1000_clean_tx_irq()
1225 unsigned int bytes_compl = 0, pkts_compl = 0; in e1000_clean_tx_irq()
1251 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1255 i = 0; in e1000_clean_tx_irq()
1323 int cleaned_count = 0; in e1000_clean_rx_irq_ps()
1325 unsigned int total_rx_bytes = 0, total_rx_packets = 0; in e1000_clean_rx_irq_ps()
1344 i = 0; in e1000_clean_rx_irq_ps()
1354 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1389 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1397 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1421 for (j = 0; j < PS_PAGE_BUFFERS; j++) { in e1000_clean_rx_irq_ps()
1429 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1430 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1461 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1468 cleaned_count = 0; in e1000_clean_rx_irq_ps()
1516 int cleaned_count = 0; in e1000_clean_jumbo_rx_irq()
1518 unsigned int total_rx_bytes = 0, total_rx_packets = 0; in e1000_clean_jumbo_rx_irq()
1539 i = 0; in e1000_clean_jumbo_rx_irq()
1549 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1571 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1572 0, length); in e1000_clean_jumbo_rx_irq()
1577 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1589 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1612 skb_fill_page_desc(skb, 0, in e1000_clean_jumbo_rx_irq()
1613 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1641 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1647 cleaned_count = 0; in e1000_clean_jumbo_rx_irq()
1680 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1694 buffer_info->dma = 0; in e1000_clean_rx_ring()
1707 for (j = 0; j < PS_PAGE_BUFFERS; j++) { in e1000_clean_rx_ring()
1713 ps_page->dma = 0; in e1000_clean_rx_ring()
1726 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1728 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1729 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1801 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1802 adapter->total_tx_packets = 0; in e1000_intr_msi()
1803 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1804 adapter->total_rx_packets = 0; in e1000_intr_msi()
1880 adapter->total_tx_bytes = 0; in e1000_intr()
1881 adapter->total_tx_packets = 0; in e1000_intr()
1882 adapter->total_rx_bytes = 0; in e1000_intr()
1883 adapter->total_rx_packets = 0; in e1000_intr()
1920 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1921 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1944 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1947 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1951 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1952 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1970 int vector = 0; in e1000_configure_msix()
1971 u32 ctrl_ext, ivar = 0; in e1000_configure_msix()
1973 adapter->eiac_mask = 0; in e1000_configure_msix()
2060 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2067 if (err > 0) in e1000e_set_interrupt_capability()
2103 int err = 0, vector = 0; in e1000_request_msix()
2108 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2112 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2124 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2128 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2138 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2144 return 0; in e1000_request_msix()
2169 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2192 int vector = 0; in e1000_free_irq()
2216 ew32(IMC, ~0); in e1000_irq_disable()
2218 ew32(EIAC_82574, 0); in e1000_irq_disable()
2224 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2317 return 0; in e1000_alloc_ring_dma()
2324 * Return 0 on success, negative on failure
2344 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2345 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2347 return 0; in e1000e_setup_tx_resources()
2358 * Returns 0 on success, negative on failure
2371 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2390 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2391 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2394 return 0; in e1000e_setup_rx_resources()
2397 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2418 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2425 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2427 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2429 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2430 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2468 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2498 if (packets == 0) in e1000_update_itr()
2549 new_itr = 0; in e1000_set_itr()
2613 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; in e1000e_write_itr()
2618 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2645 return 0; in e1000_alloc_queues()
2664 int tx_cleaned = 1, work_done = 0; in e1000e_poll()
2705 return 0; in e1000_vlan_rx_add_vid()
2709 index = (vid >> 5) & 0x7F; in e1000_vlan_rx_add_vid()
2711 vfta |= BIT((vid & 0x1F)); in e1000_vlan_rx_add_vid()
2717 return 0; in e1000_vlan_rx_add_vid()
2732 return 0; in e1000_vlan_rx_kill_vid()
2737 index = (vid >> 5) & 0x7F; in e1000_vlan_rx_kill_vid()
2739 vfta &= ~BIT((vid & 0x1F)); in e1000_vlan_rx_kill_vid()
2745 return 0; in e1000_vlan_rx_kill_vid()
2839 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2871 for (i = 0, j = 0; i < 8; i++) { in e1000_init_manageability_pt()
2889 for (i = 0, j = 0; i < 8; i++) in e1000_init_manageability_pt()
2890 if (er32(MDEF(i)) == 0) { in e1000_init_manageability_pt()
2923 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); in e1000_configure_tx()
2924 ew32(TDBAH(0), (tdba >> 32)); in e1000_configure_tx()
2925 ew32(TDLEN(0), tdlen); in e1000_configure_tx()
2926 ew32(TDH(0), 0); in e1000_configure_tx()
2927 ew32(TDT(0), 0); in e1000_configure_tx()
2928 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2929 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2931 writel(0, tx_ring->head); in e1000_configure_tx()
2933 e1000e_update_tdt_wa(tx_ring, 0); in e1000_configure_tx()
2935 writel(0, tx_ring->tail); in e1000_configure_tx()
2943 u32 txdctl = er32(TXDCTL(0)); in e1000_configure_tx()
2952 * pthresh = 0x1f ==> prefetch if internal cache 31 or less in e1000_configure_tx()
2957 ew32(TXDCTL(0), txdctl); in e1000_configure_tx()
2960 ew32(TXDCTL(1), er32(TXDCTL(0))); in e1000_configure_tx()
2969 tarc = er32(TARC(0)); in e1000_configure_tx()
2975 ew32(TARC(0), tarc); in e1000_configure_tx()
2980 tarc = er32(TARC(0)); in e1000_configure_tx()
2982 ew32(TARC(0), tarc); in e1000_configure_tx()
3010 reg_val = er32(TARC(0)); in e1000_configure_tx()
3017 ew32(TARC(0), reg_val); in e1000_configure_tx()
3022 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3032 u32 pages = 0; in e1000_setup_rctl()
3078 phy_data &= 0xfff8; in e1000_setup_rctl()
3083 phy_data &= 0x0fff; in e1000_setup_rctl()
3085 e1e_wphy(hw, 0x10, 0x2823); in e1000_setup_rctl()
3086 e1e_wphy(hw, 0x11, 0x0003); in e1000_setup_rctl()
3133 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3136 u32 psrctl = 0; in e1000_setup_rctl()
3218 * is set). set GRAN=1 and write back up to 0x4 worth, and in e1000_configure_rx()
3219 * enable prefetching of 0x20 Rx descriptors in e1000_configure_rx()
3223 * pthresh = 0x20 in e1000_configure_rx()
3225 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); in e1000_configure_rx()
3234 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3240 ew32(IAM, 0xffffffff); in e1000_configure_rx()
3248 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); in e1000_configure_rx()
3249 ew32(RDBAH(0), (rdba >> 32)); in e1000_configure_rx()
3250 ew32(RDLEN(0), rdlen); in e1000_configure_rx()
3251 ew32(RDH(0), 0); in e1000_configure_rx()
3252 ew32(RDT(0), 0); in e1000_configure_rx()
3253 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3254 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3256 writel(0, rx_ring->head); in e1000_configure_rx()
3258 e1000e_update_rdt_wa(rx_ring, 0); in e1000_configure_rx()
3260 writel(0, rx_ring->tail); in e1000_configure_rx()
3279 u32 rxdctl = er32(RXDCTL(0)); in e1000_configure_rx()
3281 ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8)); in e1000_configure_rx()
3302 * 0 on no addresses written
3315 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3316 return 0; in e1000e_write_mc_addr_list()
3324 i = 0; in e1000e_write_mc_addr_list()
3340 * 0 on no addresses written
3348 int count = 0; in e1000e_write_uc_addr_list()
3375 if (ret_val < 0) in e1000e_write_uc_addr_list()
3382 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3383 ew32(RAH(rar_entries), 0); in e1000e_write_uc_addr_list()
3384 ew32(RAL(rar_entries), 0); in e1000e_write_uc_addr_list()
3430 if (count < 0) in e1000e_set_rx_mode()
3439 if (count < 0) in e1000e_set_rx_mode()
3459 for (i = 0; i < 10; i++) in e1000e_setup_rss_hash()
3462 /* Direct all traffic to queue 0 */ in e1000e_setup_rss_hash()
3463 for (i = 0; i < 32; i++) in e1000e_setup_rss_hash()
3464 ew32(RETA(i), 0); in e1000e_setup_rss_hash()
3504 if (!(fextnvm7 & BIT(0))) { in e1000e_get_base_timinca()
3505 ew32(FEXTNVM7, fextnvm7 | BIT(0)); in e1000e_get_base_timinca()
3576 return 0; in e1000e_get_base_timinca()
3601 u32 rxmtrl = 0; in e1000e_config_hwtstamp()
3602 u16 rxudp = 0; in e1000e_config_hwtstamp()
3612 tsync_tx_ctl = 0; in e1000e_config_hwtstamp()
3622 tsync_rx_ctl = 0; in e1000e_config_hwtstamp()
3739 return 0; in e1000e_config_hwtstamp()
3812 tdt = er32(TDT(0)); in e1000_flush_tx_ring()
3818 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3823 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3824 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3844 rxdctl = er32(RXDCTL(0)); in e1000_flush_rx_ring()
3846 rxdctl &= 0xffffc000; in e1000_flush_rx_ring()
3851 rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); in e1000_flush_rx_ring()
3853 ew32(RXDCTL(0), rxdctl); in e1000_flush_rx_ring()
3884 tdlen = er32(TDLEN(0)); in e1000_flush_desc_rings()
3977 pba &= 0xffff; in e1000e_reset()
4017 fc->pause_time = 0xFFFF; in e1000e_reset()
4029 fc->high_water = 0x2800; in e1000e_reset()
4046 fc->high_water = 0x3500; in e1000e_reset()
4047 fc->low_water = 0x1500; in e1000e_reset()
4049 fc->high_water = 0x5000; in e1000e_reset()
4050 fc->low_water = 0x3000; in e1000e_reset()
4052 fc->refresh_time = 0x1000; in e1000e_reset()
4064 fc->refresh_time = 0xFFFF; in e1000e_reset()
4065 fc->pause_time = 0xFFFF; in e1000e_reset()
4068 fc->high_water = 0x05C20; in e1000e_reset()
4069 fc->low_water = 0x05048; in e1000e_reset()
4091 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4097 e1000e_write_itr(adapter, 0); in e1000e_reset()
4119 ew32(WUC, 0); in e1000e_reset()
4161 0 : adapter->eee_advert); in e1000e_reset()
4174 u16 phy_data = 0; in e1000e_reset()
4183 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4186 /* Fextnvm7 @ 0xe4[2] = 1 */ in e1000e_reset()
4190 /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ in e1000e_reset()
4305 adapter->link_speed = 0; in e1000e_down()
4306 adapter->link_duplex = 0; in e1000e_down()
4354 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { in e1000e_sanitize_systim()
4363 /* VMWare users have seen incvalue of zero, don't div / 0 */ in e1000e_sanitize_systim()
4364 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); in e1000e_sanitize_systim()
4368 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) in e1000e_sanitize_systim()
4398 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4472 return 0; in e1000_sw_init()
4500 * e1000_test_msi_interrupt - Returns 0 for successful test
4528 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4567 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4578 return 0; in e1000_test_msi()
4602 * Returns 0 on success, negative value on failure
4690 return 0; in e1000e_open()
4710 * Returns 0, this is not allowed to fail
4761 return 0; in e1000e_close()
4769 * Returns 0 on success, negative on failure
4783 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4790 * between the time RAR[0] gets clobbered and the time it in e1000_set_mac()
4793 * are dropped. Eventually the LAA will be in RAR[0] and in e1000_set_mac()
4800 return 0; in e1000_set_mac()
4935 if (adapter->link_speed == 0) in e1000e_update_stats()
5070 phy->lpa = 0; in e1000_phy_read_status()
5073 phy->stat1000 = 0; in e1000_phy_read_status()
5097 s32 ret_val = 0; in e1000e_has_link()
5158 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5161 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5190 u32 dmoff_exit_timeout = 100, tries = 0; in e1000_watchdog_task()
5284 tarc0 = er32(TARC(0)); in e1000_watchdog_task()
5286 ew32(TARC(0), tarc0); in e1000_watchdog_task()
5290 * after setting TARC(0) in e1000_watchdog_task()
5311 adapter->link_speed = 0; in e1000_watchdog_task()
5312 adapter->link_duplex = 0; in e1000_watchdog_task()
5375 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; in e1000_watchdog_task()
5393 * reset from the other port. Set the appropriate LAA in RAR[0] in e1000_watchdog_task()
5396 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5418 #define E1000_TX_FLAGS_CSUM 0x00000001
5419 #define E1000_TX_FLAGS_VLAN 0x00000002
5420 #define E1000_TX_FLAGS_TSO 0x00000004
5421 #define E1000_TX_FLAGS_IPV4 0x00000008
5422 #define E1000_TX_FLAGS_NO_FCS 0x00000010
5423 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5424 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5433 u32 cmd_length = 0; in e1000_tso()
5434 u16 ipcse = 0, mss; in e1000_tso()
5439 return 0; in e1000_tso()
5441 err = skb_cow_head(skb, 0); in e1000_tso()
5442 if (err < 0) in e1000_tso()
5449 iph->tot_len = 0; in e1000_tso()
5450 iph->check = 0; in e1000_tso()
5452 0, IPPROTO_TCP, 0); in e1000_tso()
5457 ipcse = 0; in e1000_tso()
5476 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5486 i = 0; in e1000_tso()
5528 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5531 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5532 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5540 i = 0; in e1000_tx_csum()
5554 unsigned int offset = 0, size, count = 0, i; in e1000_tx_map()
5580 i = 0; in e1000_tx_map()
5584 for (f = 0; f < nr_frags; f++) { in e1000_tx_map()
5588 offset = 0; in e1000_tx_map()
5593 i = 0; in e1000_tx_map()
5627 buffer_info->dma = 0; in e1000_tx_map()
5632 if (i == 0) in e1000_tx_map()
5639 return 0; in e1000_tx_map()
5647 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; in e1000_tx_queue()
5689 i = 0; in e1000_tx_queue()
5690 } while (--count > 0); in e1000_tx_queue()
5719 return 0; in e1000_transfer_dhcp_info()
5722 return 0; in e1000_transfer_dhcp_info()
5725 return 0; in e1000_transfer_dhcp_info()
5732 return 0; in e1000_transfer_dhcp_info()
5736 return 0; in e1000_transfer_dhcp_info()
5743 return 0; in e1000_transfer_dhcp_info()
5766 return 0; in __e1000_maybe_stop_tx()
5774 return 0; in e1000_maybe_stop_tx()
5784 unsigned int tx_flags = 0; in e1000_xmit_frame()
5788 int count = 0; in e1000_xmit_frame()
5798 if (skb->len <= 0) { in e1000_xmit_frame()
5842 for (f = 0; f < nr_frags; f++) in e1000_xmit_frame()
5864 if (tso < 0) { in e1000_xmit_frame()
5884 /* if count is 0 then mapping error has occurred */ in e1000_xmit_frame()
5912 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { in e1000_xmit_frame()
5921 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
6013 * Returns 0 on success, negative on failure
6074 return 0; in e1000_change_mtu()
6093 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6104 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6132 return 0; in e1000_mii_ioctl()
6183 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6191 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6232 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6235 (u16)(mac_reg & 0xFFFF)); in e1000_init_phy_wakeup()
6237 (u16)((mac_reg >> 16) & 0xFFFF)); in e1000_init_phy_wakeup()
6339 * Force the SMBus in PHY page769_23[0] = 1 in e1000e_s0ix_entry_flow()
6349 /* DFT control: PHY bit: page769_20[0] = 1 in e1000e_s0ix_entry_flow()
6353 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6356 phy_data |= BIT(0); in e1000e_s0ix_entry_flow()
6412 mac_data &= ~BIT(0); in e1000e_s0ix_entry_flow()
6426 ew32(TDFH, 0); in e1000e_s0ix_entry_flow()
6429 ew32(TDFT, 0); in e1000e_s0ix_entry_flow()
6432 ew32(TDFHS, 0); in e1000e_s0ix_entry_flow()
6435 ew32(TDFTS, 0); in e1000e_s0ix_entry_flow()
6438 ew32(TDFPC, 0); in e1000e_s0ix_entry_flow()
6441 ew32(RDFH, 0); in e1000e_s0ix_entry_flow()
6444 ew32(RDFT, 0); in e1000e_s0ix_entry_flow()
6447 ew32(RDFHS, 0); in e1000e_s0ix_entry_flow()
6450 ew32(RDFTS, 0); in e1000e_s0ix_entry_flow()
6453 ew32(RDFPC, 0); in e1000e_s0ix_entry_flow()
6462 u32 i = 0; in e1000e_s0ix_exit_flow()
6510 mac_data &= 0xFFF7FFFF; in e1000e_s0ix_exit_flow()
6517 phy_data &= 0xFBFF; in e1000e_s0ix_exit_flow()
6522 * 772_29[5] = 0 CS_Mode_Stay_In_K1 in e1000e_s0ix_exit_flow()
6525 phy_data &= 0xFFDF; in e1000e_s0ix_exit_flow()
6529 * Unforce the SMBus in PHY page769_23[0] = 0 in e1000e_s0ix_exit_flow()
6530 * Unforce the SMBus in MAC CTRL_EXT[11] = 0 in e1000e_s0ix_exit_flow()
6542 mac_data &= 0xFFFFFFF7; in e1000e_s0ix_exit_flow()
6548 mac_data |= BIT(0); in e1000e_s0ix_exit_flow()
6553 mac_data &= 0xFFBFFFFF; in e1000e_s0ix_exit_flow()
6582 mac_data &= 0xFFFFFF7F; in e1000e_s0ix_exit_flow()
6616 return 0; in e1000e_pm_freeze()
6625 int retval = 0; in __e1000_shutdown()
6633 wufc = 0; in __e1000_shutdown()
6684 ew32(WUC, 0); in __e1000_shutdown()
6685 ew32(WUFC, 0); in __e1000_shutdown()
6710 u16 lpi_ctrl = 0; in __e1000_shutdown()
6755 return 0; in __e1000_shutdown()
6767 return 0; in __e1000_shutdown()
6781 u16 aspm_dis_mask = 0; in __e1000e_disable_aspm()
6853 __e1000e_disable_aspm(pdev, state, 0); in e1000e_disable_aspm()
6873 int rc = 0; in e1000e_pm_thaw()
6898 u16 aspm_disable_flag = 0; in __e1000_resume()
6928 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6941 ew32(WUS, ~0); in __e1000_resume()
6955 return 0; in __e1000_resume()
6982 return 0; in e1000e_pm_suspend()
7061 return 0; in e1000e_pm_runtime_suspend()
7083 vector = 0; in e1000_intr_msix()
7169 u16 aspm_disable_flag = 0; in e1000_io_slot_reset()
7190 pci_enable_wake(pdev, PCI_D3hot, 0); in e1000_io_slot_reset()
7191 pci_enable_wake(pdev, PCI_D3cold, 0); in e1000_io_slot_reset()
7194 ew32(WUS, ~0); in e1000_io_slot_reset()
7254 u16 buf = 0; in e1000_eeprom_checks()
7261 if (!ret_val && (!(buf & BIT(0)))) { in e1000_eeprom_checks()
7301 return 0; in e1000_set_features()
7354 * Returns 0 on success, negative on failure
7369 u16 aspm_disable_flag = 0; in e1000_probe()
7370 u16 eeprom_data = 0; in e1000_probe()
7373 s32 ret_val = 0; in e1000_probe()
7428 mmio_start = pci_resource_start(pdev, 0); in e1000_probe()
7429 mmio_len = pci_resource_len(pdev, 0); in e1000_probe()
7484 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7489 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7568 for (i = 0;; i++) { in e1000_probe()
7569 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7594 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7595 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7608 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7643 adapter->eeprom_wol = 0; in e1000_probe()
7658 adapter->eeprom_vers = 0; in e1000_probe()
7692 return 0; in e1000_probe()
7921 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */