Lines Matching +full:rx +full:- +full:sched +full:- +full:sp
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
29 #include <linux/sched/signal.h>
52 #include <linux/dma-mapping.h>
56 #include <linux/hwmon-sysfs.h>
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
123 * and dev->tx_timeout() should be called to fix the problem
146 /* Do not place this n-ring entries value into the tp struct itself,
150 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
190 * the 5701 in the normal rx path. Doing so saves a device structure
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
472 writel(val, tp->regs + off); in tg3_write32()
477 return readl(tp->regs + off); in tg3_read32()
482 writel(val, tp->aperegs + off); in tg3_ape_write32()
487 return readl(tp->aperegs + off); in tg3_ape_read32()
494 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_write_indirect_reg32()
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); in tg3_write_indirect_reg32()
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); in tg3_write_indirect_reg32()
497 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_write_indirect_reg32()
502 writel(val, tp->regs + off); in tg3_write_flush_reg32()
503 readl(tp->regs + off); in tg3_write_flush_reg32()
511 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_read_indirect_reg32()
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); in tg3_read_indirect_reg32()
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); in tg3_read_indirect_reg32()
514 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_read_indirect_reg32()
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + in tg3_write_indirect_mbox()
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + in tg3_write_indirect_mbox()
533 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_write_indirect_mbox()
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); in tg3_write_indirect_mbox()
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); in tg3_write_indirect_mbox()
536 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_write_indirect_mbox()
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, in tg3_write_indirect_mbox()
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); in tg3_write_indirect_mbox()
553 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_read_indirect_mbox()
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); in tg3_read_indirect_mbox()
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); in tg3_read_indirect_mbox()
556 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_read_indirect_mbox()
568 /* Non-posted methods */ in _tw32_flush()
569 tp->write32(tp, off, val); in _tw32_flush()
575 tp->read32(tp, off); in _tw32_flush()
586 tp->write32_mbox(tp, off, val); in tw32_mailbox_flush()
590 tp->read32_mbox(tp, off); in tw32_mailbox_flush()
595 void __iomem *mbox = tp->regs + off; in tg3_write32_tx_mbox()
606 return readl(tp->regs + off + GRCMBOX_BASE); in tg3_read32_mbox_5906()
611 writel(val, tp->regs + off + GRCMBOX_BASE); in tg3_write32_mbox_5906()
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620 #define tw32(reg, val) tp->write32(tp, reg, val)
623 #define tr32(reg) tp->read32(tp, reg)
633 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_write_mem()
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); in tg3_write_mem()
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); in tg3_write_mem()
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); in tg3_write_mem()
647 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_write_mem()
660 spin_lock_irqsave(&tp->indirect_lock, flags); in tg3_read_mem()
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); in tg3_read_mem()
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); in tg3_read_mem()
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); in tg3_read_mem()
674 spin_unlock_irqrestore(&tp->indirect_lock, flags); in tg3_read_mem()
697 if (!tp->pci_fn) in tg3_ape_lock_init()
700 bit = 1 << tp->pci_fn; in tg3_ape_lock_init()
723 if (!tp->pci_fn) in tg3_ape_lock()
726 bit = 1 << tp->pci_fn; in tg3_ape_lock()
735 return -EINVAL; in tg3_ape_lock()
755 if (pci_channel_offline(tp->pdev)) in tg3_ape_lock()
764 ret = -EBUSY; in tg3_ape_lock()
784 if (!tp->pci_fn) in tg3_ape_unlock()
787 bit = 1 << tp->pci_fn; in tg3_ape_unlock()
813 return -EBUSY; in tg3_ape_event_lock()
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; in tg3_ape_event_lock()
825 return timeout_us ? 0 : -EBUSY; in tg3_ape_event_lock()
856 return -ENODEV; in tg3_ape_scratchpad_read()
860 return -EAGAIN; in tg3_ape_scratchpad_read()
872 len -= length; in tg3_ape_scratchpad_read()
876 return -EAGAIN; in tg3_ape_scratchpad_read()
897 return -EAGAIN; in tg3_ape_scratchpad_read()
899 for (i = 0; length; i += 4, length -= 4) { in tg3_ape_scratchpad_read()
917 return -EAGAIN; in tg3_ape_send_event()
921 return -EAGAIN; in tg3_ape_send_event()
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); in tg3_ape_driver_state_change()
964 if (device_may_wakeup(&tp->pdev->dev) && in tg3_ape_driver_state_change()
990 time_before(jiffies, tp->ape_hb_jiffies + interval)) in tg3_send_ape_heartbeat()
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); in tg3_send_ape_heartbeat()
994 tp->ape_hb_jiffies = jiffies; in tg3_send_ape_heartbeat()
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); in tg3_disable_ints()
1003 for (i = 0; i < tp->irq_max; i++) in tg3_disable_ints()
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); in tg3_disable_ints()
1011 tp->irq_sync = 0; in tg3_enable_ints()
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); in tg3_enable_ints()
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; in tg3_enable_ints()
1018 for (i = 0; i < tp->irq_cnt; i++) { in tg3_enable_ints()
1019 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_enable_ints()
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); in tg3_enable_ints()
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); in tg3_enable_ints()
1025 tp->coal_now |= tnapi->coal_now; in tg3_enable_ints()
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) in tg3_enable_ints()
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); in tg3_enable_ints()
1033 tw32(HOSTCC_MODE, tp->coal_now); in tg3_enable_ints()
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); in tg3_enable_ints()
1040 struct tg3 *tp = tnapi->tp; in tg3_has_work()
1041 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_has_work()
1046 if (sblk->status & SD_STATUS_LINK_CHG) in tg3_has_work()
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons) in tg3_has_work()
1054 /* check for RX work to do */ in tg3_has_work()
1055 if (tnapi->rx_rcb_prod_idx && in tg3_has_work()
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) in tg3_has_work()
1069 struct tg3 *tp = tnapi->tp; in tg3_int_reenable()
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); in tg3_int_reenable()
1078 tw32(HOSTCC_MODE, tp->coalesce_mode | in tg3_int_reenable()
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now); in tg3_int_reenable()
1096 tp->pci_clock_ctrl = clock_ctrl; in tg3_switch_clocks()
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { in __tg3_readphy()
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); in __tg3_readphy()
1130 tg3_ape_lock(tp, tp->phy_ape_lock); in __tg3_readphy()
1152 loops -= 1; in __tg3_readphy()
1155 ret = -EBUSY; in __tg3_readphy()
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { in __tg3_readphy()
1162 tw32_f(MAC_MI_MODE, tp->mi_mode); in __tg3_readphy()
1166 tg3_ape_unlock(tp, tp->phy_ape_lock); in __tg3_readphy()
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val); in tg3_readphy()
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && in __tg3_writephy()
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { in __tg3_writephy()
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); in __tg3_writephy()
1193 tg3_ape_lock(tp, tp->phy_ape_lock); in __tg3_writephy()
1213 loops -= 1; in __tg3_writephy()
1216 ret = -EBUSY; in __tg3_writephy()
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { in __tg3_writephy()
1221 tw32_f(MAC_MI_MODE, tp->mi_mode); in __tg3_writephy()
1225 tg3_ape_unlock(tp, tp->phy_ape_lock); in __tg3_writephy()
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val); in tg3_writephy()
1362 return -EBUSY; in tg3_bmcr_reset()
1365 while (limit--) { in tg3_bmcr_reset()
1368 return -EBUSY; in tg3_bmcr_reset()
1377 return -EBUSY; in tg3_bmcr_reset()
1384 struct tg3 *tp = bp->priv; in tg3_mdio_read()
1387 spin_lock_bh(&tp->lock); in tg3_mdio_read()
1390 val = -EIO; in tg3_mdio_read()
1392 spin_unlock_bh(&tp->lock); in tg3_mdio_read()
1399 struct tg3 *tp = bp->priv; in tg3_mdio_write()
1402 spin_lock_bh(&tp->lock); in tg3_mdio_write()
1405 ret = -EIO; in tg3_mdio_write()
1407 spin_unlock_bh(&tp->lock); in tg3_mdio_write()
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_mdio_config_5785()
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { in tg3_mdio_config_5785()
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { in tg3_mdio_config_5785()
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; in tg3_mdio_start()
1496 tw32_f(MAC_MI_MODE, tp->mi_mode); in tg3_mdio_start()
1513 tp->phy_addr = tp->pci_fn + 1; in tg3_mdio_init()
1521 tp->phy_addr += 7; in tg3_mdio_init()
1525 addr = ssb_gige_get_phyaddr(tp->pdev); in tg3_mdio_init()
1528 tp->phy_addr = addr; in tg3_mdio_init()
1530 tp->phy_addr = TG3_PHY_MII_ADDR; in tg3_mdio_init()
1537 tp->mdio_bus = mdiobus_alloc(); in tg3_mdio_init()
1538 if (tp->mdio_bus == NULL) in tg3_mdio_init()
1539 return -ENOMEM; in tg3_mdio_init()
1541 tp->mdio_bus->name = "tg3 mdio bus"; in tg3_mdio_init()
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); in tg3_mdio_init()
1543 tp->mdio_bus->priv = tp; in tg3_mdio_init()
1544 tp->mdio_bus->parent = &tp->pdev->dev; in tg3_mdio_init()
1545 tp->mdio_bus->read = &tg3_mdio_read; in tg3_mdio_init()
1546 tp->mdio_bus->write = &tg3_mdio_write; in tg3_mdio_init()
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); in tg3_mdio_init()
1557 i = mdiobus_register(tp->mdio_bus); in tg3_mdio_init()
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); in tg3_mdio_init()
1560 mdiobus_free(tp->mdio_bus); in tg3_mdio_init()
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_mdio_init()
1566 if (!phydev || !phydev->drv) { in tg3_mdio_init()
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n"); in tg3_mdio_init()
1568 mdiobus_unregister(tp->mdio_bus); in tg3_mdio_init()
1569 mdiobus_free(tp->mdio_bus); in tg3_mdio_init()
1570 return -ENODEV; in tg3_mdio_init()
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { in tg3_mdio_init()
1575 phydev->interface = PHY_INTERFACE_MODE_GMII; in tg3_mdio_init()
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; in tg3_mdio_init()
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | in tg3_mdio_init()
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII; in tg3_mdio_init()
1590 phydev->interface = PHY_INTERFACE_MODE_MII; in tg3_mdio_init()
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; in tg3_mdio_init()
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET; in tg3_mdio_init()
1608 mdiobus_unregister(tp->mdio_bus); in tg3_mdio_fini()
1609 mdiobus_free(tp->mdio_bus); in tg3_mdio_fini()
1613 /* tp->lock is held. */
1622 tp->last_event_jiffies = jiffies; in tg3_generate_fw_event()
1627 /* tp->lock is held. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 + in tg3_wait_for_event_ack()
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - in tg3_wait_for_event_ack()
1650 if (pci_channel_offline(tp->pdev)) in tg3_wait_for_event_ack()
1657 /* tp->lock is held. */
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { in tg3_phy_gather_ump_data()
1692 /* tp->lock is held. */
1714 /* tp->lock is held. */
1718 /* Wait for RX cpu to ACK the previous event. */ in tg3_stop_fw()
1725 /* Wait for RX cpu to ACK this event. */ in tg3_stop_fw()
1730 /* tp->lock is held. */
1759 /* tp->lock is held. */
1780 /* tp->lock is held. */
1824 if (pci_channel_offline(tp->pdev)) in tg3_poll_fw()
1825 return -ENODEV; in tg3_poll_fw()
1829 return -ENODEV; in tg3_poll_fw()
1837 if (pci_channel_offline(tp->pdev)) { in tg3_poll_fw()
1840 netdev_info(tp->dev, "No firmware running\n"); in tg3_poll_fw()
1857 netdev_info(tp->dev, "No firmware running\n"); in tg3_poll_fw()
1872 if (!netif_carrier_ok(tp->dev)) { in tg3_link_report()
1873 netif_info(tp, link, tp->dev, "Link is down\n"); in tg3_link_report()
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", in tg3_link_report()
1877 (tp->link_config.active_speed == SPEED_1000 ? in tg3_link_report()
1879 (tp->link_config.active_speed == SPEED_100 ? in tg3_link_report()
1881 (tp->link_config.active_duplex == DUPLEX_FULL ? in tg3_link_report()
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", in tg3_link_report()
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? in tg3_link_report()
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? in tg3_link_report()
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) in tg3_link_report()
1891 netdev_info(tp->dev, "EEE is %s\n", in tg3_link_report()
1892 tp->setlpicnt ? "enabled" : "disabled"); in tg3_link_report()
1897 tp->link_up = netif_carrier_ok(tp->dev); in tg3_link_report()
1964 u32 old_rx_mode = tp->rx_mode; in tg3_setup_flow_control()
1965 u32 old_tx_mode = tp->tx_mode; in tg3_setup_flow_control()
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg; in tg3_setup_flow_control()
1970 autoneg = tp->link_config.autoneg; in tg3_setup_flow_control()
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) in tg3_setup_flow_control()
1978 flowctrl = tp->link_config.flowctrl; in tg3_setup_flow_control()
1980 tp->link_config.active_flowctrl = flowctrl; in tg3_setup_flow_control()
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; in tg3_setup_flow_control()
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; in tg3_setup_flow_control()
1987 if (old_rx_mode != tp->rx_mode) in tg3_setup_flow_control()
1988 tw32_f(MAC_RX_MODE, tp->rx_mode); in tg3_setup_flow_control()
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; in tg3_setup_flow_control()
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; in tg3_setup_flow_control()
1995 if (old_tx_mode != tp->tx_mode) in tg3_setup_flow_control()
1996 tw32_f(MAC_TX_MODE, tp->tx_mode); in tg3_setup_flow_control()
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_adjust_link()
2006 spin_lock_bh(&tp->lock); in tg3_adjust_link()
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | in tg3_adjust_link()
2011 oldflowctrl = tp->link_config.active_flowctrl; in tg3_adjust_link()
2013 if (phydev->link) { in tg3_adjust_link()
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) in tg3_adjust_link()
2019 else if (phydev->speed == SPEED_1000 || in tg3_adjust_link()
2025 if (phydev->duplex == DUPLEX_HALF) in tg3_adjust_link()
2029 tp->link_config.flowctrl); in tg3_adjust_link()
2031 if (phydev->pause) in tg3_adjust_link()
2033 if (phydev->asym_pause) in tg3_adjust_link()
2041 if (mac_mode != tp->mac_mode) { in tg3_adjust_link()
2042 tp->mac_mode = mac_mode; in tg3_adjust_link()
2043 tw32_f(MAC_MODE, tp->mac_mode); in tg3_adjust_link()
2048 if (phydev->speed == SPEED_10) in tg3_adjust_link()
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) in tg3_adjust_link()
2067 if (phydev->link != tp->old_link || in tg3_adjust_link()
2068 phydev->speed != tp->link_config.active_speed || in tg3_adjust_link()
2069 phydev->duplex != tp->link_config.active_duplex || in tg3_adjust_link()
2070 oldflowctrl != tp->link_config.active_flowctrl) in tg3_adjust_link()
2073 tp->old_link = phydev->link; in tg3_adjust_link()
2074 tp->link_config.active_speed = phydev->speed; in tg3_adjust_link()
2075 tp->link_config.active_duplex = phydev->duplex; in tg3_adjust_link()
2077 spin_unlock_bh(&tp->lock); in tg3_adjust_link()
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) in tg3_phy_init()
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_phy_init()
2096 phydev = phy_connect(tp->dev, phydev_name(phydev), in tg3_phy_init()
2097 tg3_adjust_link, phydev->interface); in tg3_phy_init()
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); in tg3_phy_init()
2104 switch (phydev->interface) { in tg3_phy_init()
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_init()
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); in tg3_phy_init()
2119 return -EINVAL; in tg3_phy_init()
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; in tg3_phy_init()
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_phy_start()
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_phy_start()
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { in tg3_phy_start()
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; in tg3_phy_start()
2140 phydev->speed = tp->link_config.speed; in tg3_phy_start()
2141 phydev->duplex = tp->link_config.duplex; in tg3_phy_start()
2142 phydev->autoneg = tp->link_config.autoneg; in tg3_phy_start()
2144 phydev->advertising, tp->link_config.advertising); in tg3_phy_start()
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_phy_stop()
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); in tg3_phy_stop()
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { in tg3_phy_fini()
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); in tg3_phy_fini()
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; in tg3_phy_fini()
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) in tg3_phy_set_extloopbk()
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { in tg3_phy_set_extloopbk()
2177 /* Cannot do read-modify-write on 5401 */ in tg3_phy_set_extloopbk()
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) in tg3_phy_toggle_apd()
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_phy_toggle_apd()
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) in tg3_phy_toggle_automdix()
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_phy_toggle_automdix()
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) in tg3_phy_set_wirespeed()
2308 if (!tp->phy_otp) in tg3_phy_apply_otp()
2311 otp = tp->phy_otp; in tg3_phy_apply_otp()
2344 struct ethtool_keee *dest = &tp->eee; in tg3_eee_pull_config()
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) in tg3_eee_pull_config()
2358 dest->eee_active = 1; in tg3_eee_pull_config()
2360 dest->eee_active = 0; in tg3_eee_pull_config()
2365 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val); in tg3_eee_pull_config()
2370 dest->eee_enabled = !!val; in tg3_eee_pull_config()
2371 mii_eee_cap1_mod_linkmode_t(dest->advertised, val); in tg3_eee_pull_config()
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); in tg3_eee_pull_config()
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; in tg3_eee_pull_config()
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) in tg3_phy_eee_adjust()
2388 tp->setlpicnt = 0; in tg3_phy_eee_adjust()
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE && in tg3_phy_eee_adjust()
2392 tp->link_config.active_duplex == DUPLEX_FULL && in tg3_phy_eee_adjust()
2393 (tp->link_config.active_speed == SPEED_100 || in tg3_phy_eee_adjust()
2394 tp->link_config.active_speed == SPEED_1000)) { in tg3_phy_eee_adjust()
2397 if (tp->link_config.active_speed == SPEED_1000) in tg3_phy_eee_adjust()
2405 if (tp->eee.eee_active) in tg3_phy_eee_adjust()
2406 tp->setlpicnt = 2; in tg3_phy_eee_adjust()
2409 if (!tp->setlpicnt) { in tg3_phy_eee_adjust()
2425 if (tp->link_config.active_speed == SPEED_1000 && in tg3_phy_eee_enable()
2444 while (limit--) { in tg3_wait_macro_done()
2453 return -EBUSY; in tg3_wait_macro_done()
2482 return -EBUSY; in tg3_phy_write_and_check_testpat()
2490 return -EBUSY; in tg3_phy_write_and_check_testpat()
2496 return -EBUSY; in tg3_phy_write_and_check_testpat()
2506 return -EBUSY; in tg3_phy_write_and_check_testpat()
2516 return -EBUSY; in tg3_phy_write_and_check_testpat()
2538 return -EBUSY; in tg3_phy_reset_chanpat()
2566 /* Set full-duplex, 1000 mbps. */ in tg3_phy_reset_5703_4_5()
2587 } while (--retries); in tg3_phy_reset_5703_4_5()
2614 netif_carrier_off(tp->dev); in tg3_carrier_off()
2615 tp->link_up = false; in tg3_carrier_off()
2621 netdev_warn(tp->dev, in tg3_warn_mgmt_link_flap()
2622 "Management side-band traffic will be interrupted during phy settings change\n"); in tg3_warn_mgmt_link_flap()
2626 * link unless the FORCE argument is non-zero.
2641 return -EBUSY; in tg3_phy_reset()
2643 if (netif_running(tp->dev) && tp->link_up) { in tg3_phy_reset()
2644 netif_carrier_off(tp->dev); in tg3_phy_reset()
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) in tg3_phy_reset()
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) in tg3_phy_reset()
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && in tg3_phy_reset()
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { in tg3_phy_reset()
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { in tg3_phy_reset()
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { in tg3_phy_reset()
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { in tg3_phy_reset()
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { in tg3_phy_reset()
2736 /* Cannot do read-modify-write on 5401 */ in tg3_phy_reset()
2739 /* Set bit 14 with read-modify-write to preserve other bits */ in tg3_phy_reset()
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; in tg3_set_function_status()
2817 return -EIO; in tg3_pwrsrc_switch_to_vmain()
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, in tg3_pwrsrc_switch_to_vmain()
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, in tg3_pwrsrc_switch_to_vmain()
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; in tg3_pwrsrc_die_with_vmain()
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | in tg3_pwrsrc_switch_to_vaux()
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || in tg3_pwrsrc_switch_to_vaux()
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { in tg3_pwrsrc_switch_to_vaux()
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ in tg3_pwrsrc_switch_to_vaux()
2879 tp->grc_local_ctrl; in tg3_pwrsrc_switch_to_vaux()
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | in tg3_pwrsrc_switch_to_vaux()
2903 no_gpio2 = tp->nic_sram_data_cfg & in tg3_pwrsrc_switch_to_vaux()
2916 tp->grc_local_ctrl | grc_local_ctrl, in tg3_pwrsrc_switch_to_vaux()
2922 tp->grc_local_ctrl | grc_local_ctrl, in tg3_pwrsrc_switch_to_vaux()
2928 tp->grc_local_ctrl | grc_local_ctrl, in tg3_pwrsrc_switch_to_vaux()
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { in tg3_frob_aux_power()
2978 dev_peer = pci_get_drvdata(tp->pdev_peer); in tg3_frob_aux_power()
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) in tg3_5700_link_polarity()
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { in tg3_5700_link_polarity()
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) in tg3_phy_power_bug()
3027 if (!tp->pci_fn) in tg3_phy_power_bug()
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && in tg3_phy_power_bug()
3033 !tp->pci_fn) in tg3_phy_power_bug()
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && in tg3_phy_led_bug()
3047 !tp->pci_fn) in tg3_phy_led_bug()
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) in tg3_power_down_phy()
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { in tg3_power_down_phy()
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_power_down_phy()
3129 /* tp->lock is held. */
3135 if (tp->nvram_lock_cnt == 0) { in tg3_nvram_lock()
3144 return -ENODEV; in tg3_nvram_lock()
3147 tp->nvram_lock_cnt++; in tg3_nvram_lock()
3152 /* tp->lock is held. */
3156 if (tp->nvram_lock_cnt > 0) in tg3_nvram_unlock()
3157 tp->nvram_lock_cnt--; in tg3_nvram_unlock()
3158 if (tp->nvram_lock_cnt == 0) in tg3_nvram_unlock()
3163 /* tp->lock is held. */
3173 /* tp->lock is held. */
3190 return -EINVAL; in tg3_nvram_read_using_eeprom()
3210 return -EBUSY; in tg3_nvram_read_using_eeprom()
3239 return -EBUSY; in tg3_nvram_exec_cmd()
3250 (tp->nvram_jedecnum == JEDEC_ATMEL)) in tg3_nvram_phys_addr()
3252 addr = ((addr / tp->nvram_pagesize) << in tg3_nvram_phys_addr()
3254 (addr % tp->nvram_pagesize); in tg3_nvram_phys_addr()
3265 (tp->nvram_jedecnum == JEDEC_ATMEL)) in tg3_nvram_logical_addr()
3268 tp->nvram_pagesize) + in tg3_nvram_logical_addr()
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); in tg3_nvram_logical_addr()
3278 * machine, the 32-bit value will be byteswapped.
3290 return -EINVAL; in tg3_nvram_read()
3363 rc = -EBUSY; in tg3_nvram_write_block_using_eeprom()
3376 u32 pagesize = tp->nvram_pagesize; in tg3_nvram_write_block_unbuffered()
3377 u32 pagemask = pagesize - 1; in tg3_nvram_write_block_unbuffered()
3383 return -ENOMEM; in tg3_nvram_write_block_unbuffered()
3405 len -= size; in tg3_nvram_write_block_unbuffered()
3409 offset = offset + (pagesize - page_off); in tg3_nvram_write_block_unbuffered()
3451 else if (j == (pagesize - 4)) in tg3_nvram_write_block_unbuffered()
3483 page_off = offset % tp->nvram_pagesize; in tg3_nvram_write_block_buffered()
3491 if (page_off == (tp->nvram_pagesize - 4)) in tg3_nvram_write_block_buffered()
3494 if (i == (len - 4)) in tg3_nvram_write_block_buffered()
3504 (tp->nvram_jedecnum == JEDEC_ST) && in tg3_nvram_write_block_buffered()
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & in tg3_nvram_write_block()
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); in tg3_nvram_write_block()
3580 /* tp->lock is held. */
3591 if (pci_channel_offline(tp->pdev)) in tg3_pause_cpu()
3592 return -EBUSY; in tg3_pause_cpu()
3595 return (i == iters) ? -EBUSY : 0; in tg3_pause_cpu()
3598 /* tp->lock is held. */
3610 /* tp->lock is held. */
3616 /* tp->lock is held. */
3623 /* tp->lock is held. */
3629 /* tp->lock is held. */
3646 * There is only an Rx CPU for the 5750 derivative in the in tg3_halt_cpu()
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n", in tg3_halt_cpu()
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); in tg3_halt_cpu()
3658 return -ENODEV; in tg3_halt_cpu()
3676 * tp->fw->size minus headers. in tg3_fw_data_len()
3686 if (tp->fw_len == 0xffffffff) in tg3_fw_data_len()
3687 fw_len = be32_to_cpu(fw_hdr->len); in tg3_fw_data_len()
3689 fw_len = tp->fw->size; in tg3_fw_data_len()
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); in tg3_fw_data_len()
3694 /* tp->lock is held. */
3701 int total_len = tp->fw->size; in tg3_load_firmware_cpu()
3704 netdev_err(tp->dev, in tg3_load_firmware_cpu()
3707 return -EINVAL; in tg3_load_firmware_cpu()
3735 total_len -= TG3_FW_HDR_LEN; in tg3_load_firmware_cpu()
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + in tg3_load_firmware_cpu()
3747 total_len -= be32_to_cpu(fw_hdr->len); in tg3_load_firmware_cpu()
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); in tg3_load_firmware_cpu()
3760 /* tp->lock is held. */
3778 return (i == iters) ? -EBUSY : 0; in tg3_pause_cpu_and_set_pc()
3781 /* tp->lock is held. */
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; in tg3_load_5701_a0_firmware_fix()
3791 length = end_address_of_bss - start_address_of_text. in tg3_load_5701_a0_firmware_fix()
3807 /* Now startup only the RX cpu. */ in tg3_load_5701_a0_firmware_fix()
3809 be32_to_cpu(fw_hdr->base_addr)); in tg3_load_5701_a0_firmware_fix()
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " in tg3_load_5701_a0_firmware_fix()
3814 be32_to_cpu(fw_hdr->base_addr)); in tg3_load_5701_a0_firmware_fix()
3815 return -ENODEV; in tg3_load_5701_a0_firmware_fix()
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n"); in tg3_validate_rxcpu_state()
3841 return -EBUSY; in tg3_validate_rxcpu_state()
3846 netdev_warn(tp->dev, in tg3_validate_rxcpu_state()
3848 return -EEXIST; in tg3_validate_rxcpu_state()
3854 /* tp->lock is held. */
3865 if (!tp->fw) in tg3_load_57766_firmware()
3870 * data to be written to non-contiguous locations. in tg3_load_57766_firmware()
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; in tg3_load_57766_firmware()
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) in tg3_load_57766_firmware()
3895 /* tp->lock is held. */
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; in tg3_load_tso_firmware()
3909 length = end_address_of_bss - start_address_of_text. in tg3_load_tso_firmware()
3913 cpu_scratch_size = tp->fw_len; in tg3_load_tso_firmware()
3932 be32_to_cpu(fw_hdr->base_addr)); in tg3_load_tso_firmware()
3934 netdev_err(tp->dev, in tg3_load_tso_firmware()
3937 be32_to_cpu(fw_hdr->base_addr)); in tg3_load_tso_firmware()
3938 return -ENODEV; in tg3_load_tso_firmware()
3945 /* tp->lock is held. */
3959 index -= 4; in __tg3_set_one_mac_addr()
3965 /* tp->lock is held. */
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); in __tg3_set_mac_addr()
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); in __tg3_set_mac_addr()
3983 addr_high = (tp->dev->dev_addr[0] + in __tg3_set_mac_addr()
3984 tp->dev->dev_addr[1] + in __tg3_set_mac_addr()
3985 tp->dev->dev_addr[2] + in __tg3_set_mac_addr()
3986 tp->dev->dev_addr[3] + in __tg3_set_mac_addr()
3987 tp->dev->dev_addr[4] + in __tg3_set_mac_addr()
3988 tp->dev->dev_addr[5]) & in __tg3_set_mac_addr()
3999 pci_write_config_dword(tp->pdev, in tg3_enable_register_access()
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); in tg3_enable_register_access()
4009 err = pci_set_power_state(tp->pdev, PCI_D0); in tg3_power_up()
4014 netdev_err(tp->dev, "Transition to D0 failed\n"); in tg3_power_up()
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, in tg3_power_down_prepare()
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) && in tg3_power_down_prepare()
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && in tg3_power_down_prepare()
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { in tg3_power_down_prepare()
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_power_down_prepare()
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; in tg3_power_down_prepare()
4053 tp->link_config.speed = phydev->speed; in tg3_power_down_prepare()
4054 tp->link_config.duplex = phydev->duplex; in tg3_power_down_prepare()
4055 tp->link_config.autoneg = phydev->autoneg; in tg3_power_down_prepare()
4057 &tp->link_config.advertising, in tg3_power_down_prepare()
4058 phydev->advertising); in tg3_power_down_prepare()
4082 linkmode_copy(phydev->advertising, advertising); in tg3_power_down_prepare()
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; in tg3_power_down_prepare()
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) in tg3_power_down_prepare()
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; in tg3_power_down_prepare()
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) in tg3_power_down_prepare()
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { in tg3_power_down_prepare()
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { in tg3_power_down_prepare()
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) in tg3_power_down_prepare()
4142 else if (tp->phy_flags & in tg3_power_down_prepare()
4144 if (tp->link_config.active_speed == SPEED_1000) in tg3_power_down_prepare()
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; in tg3_power_down_prepare()
4165 tw32(MAC_LED_CTRL, tp->led_ctrl); in tg3_power_down_prepare()
4189 base_val = tp->pci_clock_ctrl; in tg3_power_down_prepare()
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, in tg3_power_down_prepare()
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, in tg3_power_down_prepare()
4235 tp->pci_clock_ctrl | newbits3, 40); in tg3_power_down_prepare()
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); in tg3_power_down()
4272 pci_set_power_state(tp->pdev, PCI_D3hot); in tg3_power_down()
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_aux_stat_to_speed_duplex()
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_autoneg_cfg()
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) in tg3_phy_autoneg_cfg()
4357 if (!tp->eee.eee_enabled) in tg3_phy_autoneg_cfg()
4362 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val); in tg3_phy_autoneg_cfg()
4397 if (tp->link_config.autoneg == AUTONEG_ENABLE || in tg3_phy_copper_begin()
4398 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { in tg3_phy_copper_begin()
4401 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && in tg3_phy_copper_begin()
4402 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { in tg3_phy_copper_begin()
4408 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { in tg3_phy_copper_begin()
4409 if (!(tp->phy_flags & in tg3_phy_copper_begin()
4417 adv = tp->link_config.advertising; in tg3_phy_copper_begin()
4418 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) in tg3_phy_copper_begin()
4422 fc = tp->link_config.flowctrl; in tg3_phy_copper_begin()
4427 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && in tg3_phy_copper_begin()
4428 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { in tg3_phy_copper_begin()
4442 tp->link_config.active_speed = tp->link_config.speed; in tg3_phy_copper_begin()
4443 tp->link_config.active_duplex = tp->link_config.duplex; in tg3_phy_copper_begin()
4454 switch (tp->link_config.speed) { in tg3_phy_copper_begin()
4468 if (tp->link_config.duplex == DUPLEX_FULL) in tg3_phy_copper_begin()
4502 tp->link_config.autoneg = AUTONEG_DISABLE; in tg3_phy_pull_config()
4503 tp->link_config.advertising = 0; in tg3_phy_pull_config()
4506 err = -EIO; in tg3_phy_pull_config()
4510 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) in tg3_phy_pull_config()
4513 tp->link_config.speed = SPEED_10; in tg3_phy_pull_config()
4516 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) in tg3_phy_pull_config()
4519 tp->link_config.speed = SPEED_100; in tg3_phy_pull_config()
4522 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_pull_config()
4523 tp->link_config.speed = SPEED_1000; in tg3_phy_pull_config()
4532 tp->link_config.duplex = DUPLEX_FULL; in tg3_phy_pull_config()
4534 tp->link_config.duplex = DUPLEX_HALF; in tg3_phy_pull_config()
4536 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; in tg3_phy_pull_config()
4542 tp->link_config.autoneg = AUTONEG_ENABLE; in tg3_phy_pull_config()
4543 tp->link_config.advertising = ADVERTISED_Autoneg; in tg3_phy_pull_config()
4546 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { in tg3_phy_pull_config()
4554 tp->link_config.advertising |= adv | ADVERTISED_TP; in tg3_phy_pull_config()
4556 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); in tg3_phy_pull_config()
4558 tp->link_config.advertising |= ADVERTISED_FIBRE; in tg3_phy_pull_config()
4561 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_pull_config()
4564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { in tg3_phy_pull_config()
4576 tp->link_config.flowctrl = adv; in tg3_phy_pull_config()
4582 tp->link_config.advertising |= adv; in tg3_phy_pull_config()
4612 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) in tg3_phy_eee_config_ok()
4617 if (tp->eee.eee_enabled) { in tg3_phy_eee_config_ok()
4618 if (!linkmode_equal(tp->eee.advertised, eee.advertised) || in tg3_phy_eee_config_ok()
4619 tp->eee.tx_lpi_timer != eee.tx_lpi_timer || in tg3_phy_eee_config_ok()
4620 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) in tg3_phy_eee_config_ok()
4635 advertising = tp->link_config.advertising; in tg3_phy_copper_an_config_ok()
4639 if (tp->link_config.active_duplex == DUPLEX_FULL) { in tg3_phy_copper_an_config_ok()
4640 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); in tg3_phy_copper_an_config_ok()
4650 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_copper_an_config_ok()
4679 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_copper_fetch_rmtadv()
4692 tp->link_config.rmt_adv = lpeth; in tg3_phy_copper_fetch_rmtadv()
4699 if (curr_link_up != tp->link_up) { in tg3_test_and_report_link_chg()
4701 netif_carrier_on(tp->dev); in tg3_test_and_report_link_chg()
4703 netif_carrier_off(tp->dev); in tg3_test_and_report_link_chg()
4704 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) in tg3_test_and_report_link_chg()
4705 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_test_and_report_link_chg()
4742 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | in tg3_setup_eee()
4752 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); in tg3_setup_eee()
4756 (tp->eee.tx_lpi_timer & 0xffff)); in tg3_setup_eee()
4774 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { in tg3_setup_copper_phy()
4776 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); in tg3_setup_copper_phy()
4782 /* Some third-party PHYs need to be reset on link going in tg3_setup_copper_phy()
4788 tp->link_up) { in tg3_setup_copper_phy()
4797 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { in tg3_setup_copper_phy()
4818 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == in tg3_setup_copper_phy()
4821 tp->link_config.active_speed == SPEED_1000) { in tg3_setup_copper_phy()
4842 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) in tg3_setup_copper_phy()
4844 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) in tg3_setup_copper_phy()
4849 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) in tg3_setup_copper_phy()
4859 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; in tg3_setup_copper_phy()
4860 tp->link_config.rmt_adv = 0; in tg3_setup_copper_phy()
4862 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { in tg3_setup_copper_phy()
4911 tp->link_config.active_speed = current_speed; in tg3_setup_copper_phy()
4912 tp->link_config.active_duplex = current_duplex; in tg3_setup_copper_phy()
4914 if (tp->link_config.autoneg == AUTONEG_ENABLE) { in tg3_setup_copper_phy()
4928 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && in tg3_setup_copper_phy()
4935 tp->link_config.speed == current_speed && in tg3_setup_copper_phy()
4936 tp->link_config.duplex == current_duplex) { in tg3_setup_copper_phy()
4942 tp->link_config.active_duplex == DUPLEX_FULL) { in tg3_setup_copper_phy()
4945 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_setup_copper_phy()
4954 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; in tg3_setup_copper_phy()
4961 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { in tg3_setup_copper_phy()
4969 tp->link_config.active_speed = current_speed; in tg3_setup_copper_phy()
4970 tp->link_config.active_duplex = current_duplex; in tg3_setup_copper_phy()
4975 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) in tg3_setup_copper_phy()
4979 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; in tg3_setup_copper_phy()
4981 if (tp->link_config.active_speed == SPEED_100 || in tg3_setup_copper_phy()
4982 tp->link_config.active_speed == SPEED_10) in tg3_setup_copper_phy()
4983 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; in tg3_setup_copper_phy()
4985 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_setup_copper_phy()
4986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) in tg3_setup_copper_phy()
4987 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; in tg3_setup_copper_phy()
4989 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_setup_copper_phy()
4998 if (tp->link_config.active_speed == SPEED_10) in tg3_setup_copper_phy()
5000 else if (tp->link_config.active_speed == SPEED_100) in tg3_setup_copper_phy()
5003 else if (tp->link_config.active_speed == SPEED_1000) in tg3_setup_copper_phy()
5011 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; in tg3_setup_copper_phy()
5012 if (tp->link_config.active_duplex == DUPLEX_HALF) in tg3_setup_copper_phy()
5013 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; in tg3_setup_copper_phy()
5017 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) in tg3_setup_copper_phy()
5018 tp->mac_mode |= MAC_MODE_LINK_POLARITY; in tg3_setup_copper_phy()
5020 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; in tg3_setup_copper_phy()
5026 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && in tg3_setup_copper_phy()
5028 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; in tg3_setup_copper_phy()
5029 tw32_f(MAC_MI_MODE, tp->mi_mode); in tg3_setup_copper_phy()
5033 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_copper_phy()
5048 tp->link_config.active_speed == SPEED_1000 && in tg3_setup_copper_phy()
5062 if (tp->link_config.active_speed == SPEED_100 || in tg3_setup_copper_phy()
5063 tp->link_config.active_speed == SPEED_10) in tg3_setup_copper_phy()
5064 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, in tg3_setup_copper_phy()
5067 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, in tg3_setup_copper_phy()
5136 #define ANEG_FAILED -1
5148 if (ap->state == ANEG_STATE_UNKNOWN) { in tg3_fiber_aneg_smachine()
5149 ap->rxconfig = 0; in tg3_fiber_aneg_smachine()
5150 ap->link_time = 0; in tg3_fiber_aneg_smachine()
5151 ap->cur_time = 0; in tg3_fiber_aneg_smachine()
5152 ap->ability_match_cfg = 0; in tg3_fiber_aneg_smachine()
5153 ap->ability_match_count = 0; in tg3_fiber_aneg_smachine()
5154 ap->ability_match = 0; in tg3_fiber_aneg_smachine()
5155 ap->idle_match = 0; in tg3_fiber_aneg_smachine()
5156 ap->ack_match = 0; in tg3_fiber_aneg_smachine()
5158 ap->cur_time++; in tg3_fiber_aneg_smachine()
5163 if (rx_cfg_reg != ap->ability_match_cfg) { in tg3_fiber_aneg_smachine()
5164 ap->ability_match_cfg = rx_cfg_reg; in tg3_fiber_aneg_smachine()
5165 ap->ability_match = 0; in tg3_fiber_aneg_smachine()
5166 ap->ability_match_count = 0; in tg3_fiber_aneg_smachine()
5168 if (++ap->ability_match_count > 1) { in tg3_fiber_aneg_smachine()
5169 ap->ability_match = 1; in tg3_fiber_aneg_smachine()
5170 ap->ability_match_cfg = rx_cfg_reg; in tg3_fiber_aneg_smachine()
5174 ap->ack_match = 1; in tg3_fiber_aneg_smachine()
5176 ap->ack_match = 0; in tg3_fiber_aneg_smachine()
5178 ap->idle_match = 0; in tg3_fiber_aneg_smachine()
5180 ap->idle_match = 1; in tg3_fiber_aneg_smachine()
5181 ap->ability_match_cfg = 0; in tg3_fiber_aneg_smachine()
5182 ap->ability_match_count = 0; in tg3_fiber_aneg_smachine()
5183 ap->ability_match = 0; in tg3_fiber_aneg_smachine()
5184 ap->ack_match = 0; in tg3_fiber_aneg_smachine()
5189 ap->rxconfig = rx_cfg_reg; in tg3_fiber_aneg_smachine()
5192 switch (ap->state) { in tg3_fiber_aneg_smachine()
5194 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) in tg3_fiber_aneg_smachine()
5195 ap->state = ANEG_STATE_AN_ENABLE; in tg3_fiber_aneg_smachine()
5199 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); in tg3_fiber_aneg_smachine()
5200 if (ap->flags & MR_AN_ENABLE) { in tg3_fiber_aneg_smachine()
5201 ap->link_time = 0; in tg3_fiber_aneg_smachine()
5202 ap->cur_time = 0; in tg3_fiber_aneg_smachine()
5203 ap->ability_match_cfg = 0; in tg3_fiber_aneg_smachine()
5204 ap->ability_match_count = 0; in tg3_fiber_aneg_smachine()
5205 ap->ability_match = 0; in tg3_fiber_aneg_smachine()
5206 ap->idle_match = 0; in tg3_fiber_aneg_smachine()
5207 ap->ack_match = 0; in tg3_fiber_aneg_smachine()
5209 ap->state = ANEG_STATE_RESTART_INIT; in tg3_fiber_aneg_smachine()
5211 ap->state = ANEG_STATE_DISABLE_LINK_OK; in tg3_fiber_aneg_smachine()
5216 ap->link_time = ap->cur_time; in tg3_fiber_aneg_smachine()
5217 ap->flags &= ~(MR_NP_LOADED); in tg3_fiber_aneg_smachine()
5218 ap->txconfig = 0; in tg3_fiber_aneg_smachine()
5220 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; in tg3_fiber_aneg_smachine()
5221 tw32_f(MAC_MODE, tp->mac_mode); in tg3_fiber_aneg_smachine()
5225 ap->state = ANEG_STATE_RESTART; in tg3_fiber_aneg_smachine()
5229 delta = ap->cur_time - ap->link_time; in tg3_fiber_aneg_smachine()
5231 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; in tg3_fiber_aneg_smachine()
5241 ap->flags &= ~(MR_TOGGLE_TX); in tg3_fiber_aneg_smachine()
5242 ap->txconfig = ANEG_CFG_FD; in tg3_fiber_aneg_smachine()
5243 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); in tg3_fiber_aneg_smachine()
5245 ap->txconfig |= ANEG_CFG_PS1; in tg3_fiber_aneg_smachine()
5247 ap->txconfig |= ANEG_CFG_PS2; in tg3_fiber_aneg_smachine()
5248 tw32(MAC_TX_AUTO_NEG, ap->txconfig); in tg3_fiber_aneg_smachine()
5249 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; in tg3_fiber_aneg_smachine()
5250 tw32_f(MAC_MODE, tp->mac_mode); in tg3_fiber_aneg_smachine()
5253 ap->state = ANEG_STATE_ABILITY_DETECT; in tg3_fiber_aneg_smachine()
5257 if (ap->ability_match != 0 && ap->rxconfig != 0) in tg3_fiber_aneg_smachine()
5258 ap->state = ANEG_STATE_ACK_DETECT_INIT; in tg3_fiber_aneg_smachine()
5262 ap->txconfig |= ANEG_CFG_ACK; in tg3_fiber_aneg_smachine()
5263 tw32(MAC_TX_AUTO_NEG, ap->txconfig); in tg3_fiber_aneg_smachine()
5264 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; in tg3_fiber_aneg_smachine()
5265 tw32_f(MAC_MODE, tp->mac_mode); in tg3_fiber_aneg_smachine()
5268 ap->state = ANEG_STATE_ACK_DETECT; in tg3_fiber_aneg_smachine()
5272 if (ap->ack_match != 0) { in tg3_fiber_aneg_smachine()
5273 if ((ap->rxconfig & ~ANEG_CFG_ACK) == in tg3_fiber_aneg_smachine()
5274 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { in tg3_fiber_aneg_smachine()
5275 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; in tg3_fiber_aneg_smachine()
5277 ap->state = ANEG_STATE_AN_ENABLE; in tg3_fiber_aneg_smachine()
5279 } else if (ap->ability_match != 0 && in tg3_fiber_aneg_smachine()
5280 ap->rxconfig == 0) { in tg3_fiber_aneg_smachine()
5281 ap->state = ANEG_STATE_AN_ENABLE; in tg3_fiber_aneg_smachine()
5286 if (ap->rxconfig & ANEG_CFG_INVAL) { in tg3_fiber_aneg_smachine()
5290 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | in tg3_fiber_aneg_smachine()
5299 if (ap->rxconfig & ANEG_CFG_FD) in tg3_fiber_aneg_smachine()
5300 ap->flags |= MR_LP_ADV_FULL_DUPLEX; in tg3_fiber_aneg_smachine()
5301 if (ap->rxconfig & ANEG_CFG_HD) in tg3_fiber_aneg_smachine()
5302 ap->flags |= MR_LP_ADV_HALF_DUPLEX; in tg3_fiber_aneg_smachine()
5303 if (ap->rxconfig & ANEG_CFG_PS1) in tg3_fiber_aneg_smachine()
5304 ap->flags |= MR_LP_ADV_SYM_PAUSE; in tg3_fiber_aneg_smachine()
5305 if (ap->rxconfig & ANEG_CFG_PS2) in tg3_fiber_aneg_smachine()
5306 ap->flags |= MR_LP_ADV_ASYM_PAUSE; in tg3_fiber_aneg_smachine()
5307 if (ap->rxconfig & ANEG_CFG_RF1) in tg3_fiber_aneg_smachine()
5308 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; in tg3_fiber_aneg_smachine()
5309 if (ap->rxconfig & ANEG_CFG_RF2) in tg3_fiber_aneg_smachine()
5310 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; in tg3_fiber_aneg_smachine()
5311 if (ap->rxconfig & ANEG_CFG_NP) in tg3_fiber_aneg_smachine()
5312 ap->flags |= MR_LP_ADV_NEXT_PAGE; in tg3_fiber_aneg_smachine()
5314 ap->link_time = ap->cur_time; in tg3_fiber_aneg_smachine()
5316 ap->flags ^= (MR_TOGGLE_TX); in tg3_fiber_aneg_smachine()
5317 if (ap->rxconfig & 0x0008) in tg3_fiber_aneg_smachine()
5318 ap->flags |= MR_TOGGLE_RX; in tg3_fiber_aneg_smachine()
5319 if (ap->rxconfig & ANEG_CFG_NP) in tg3_fiber_aneg_smachine()
5320 ap->flags |= MR_NP_RX; in tg3_fiber_aneg_smachine()
5321 ap->flags |= MR_PAGE_RX; in tg3_fiber_aneg_smachine()
5323 ap->state = ANEG_STATE_COMPLETE_ACK; in tg3_fiber_aneg_smachine()
5328 if (ap->ability_match != 0 && in tg3_fiber_aneg_smachine()
5329 ap->rxconfig == 0) { in tg3_fiber_aneg_smachine()
5330 ap->state = ANEG_STATE_AN_ENABLE; in tg3_fiber_aneg_smachine()
5333 delta = ap->cur_time - ap->link_time; in tg3_fiber_aneg_smachine()
5335 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { in tg3_fiber_aneg_smachine()
5336 ap->state = ANEG_STATE_IDLE_DETECT_INIT; in tg3_fiber_aneg_smachine()
5338 if ((ap->txconfig & ANEG_CFG_NP) == 0 && in tg3_fiber_aneg_smachine()
5339 !(ap->flags & MR_NP_RX)) { in tg3_fiber_aneg_smachine()
5340 ap->state = ANEG_STATE_IDLE_DETECT_INIT; in tg3_fiber_aneg_smachine()
5349 ap->link_time = ap->cur_time; in tg3_fiber_aneg_smachine()
5350 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; in tg3_fiber_aneg_smachine()
5351 tw32_f(MAC_MODE, tp->mac_mode); in tg3_fiber_aneg_smachine()
5354 ap->state = ANEG_STATE_IDLE_DETECT; in tg3_fiber_aneg_smachine()
5359 if (ap->ability_match != 0 && in tg3_fiber_aneg_smachine()
5360 ap->rxconfig == 0) { in tg3_fiber_aneg_smachine()
5361 ap->state = ANEG_STATE_AN_ENABLE; in tg3_fiber_aneg_smachine()
5364 delta = ap->cur_time - ap->link_time; in tg3_fiber_aneg_smachine()
5367 ap->state = ANEG_STATE_LINK_OK; in tg3_fiber_aneg_smachine()
5372 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); in tg3_fiber_aneg_smachine()
5402 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; in fiber_autoneg()
5406 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); in fiber_autoneg()
5422 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; in fiber_autoneg()
5423 tw32_f(MAC_MODE, tp->mac_mode); in fiber_autoneg()
5461 /* Enable auto-lock and comdet, select txclk for tx. */ in tg3_init_bcm8002()
5506 /* preserve bits 0-11,13,14 for signal pre-emphasis */ in tg3_setup_fiber_hw_autoneg()
5507 /* preserve bits 20-23 for voltage regulator */ in tg3_setup_fiber_hw_autoneg()
5513 if (tp->link_config.autoneg != AUTONEG_ENABLE) { in tg3_setup_fiber_hw_autoneg()
5534 /* Want auto-negotiation. */ in tg3_setup_fiber_hw_autoneg()
5537 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); in tg3_setup_fiber_hw_autoneg()
5544 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && in tg3_setup_fiber_hw_autoneg()
5545 tp->serdes_counter && in tg3_setup_fiber_hw_autoneg()
5549 tp->serdes_counter--; in tg3_setup_fiber_hw_autoneg()
5560 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; in tg3_setup_fiber_hw_autoneg()
5561 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_setup_fiber_hw_autoneg()
5581 tp->link_config.rmt_adv = in tg3_setup_fiber_hw_autoneg()
5586 tp->serdes_counter = 0; in tg3_setup_fiber_hw_autoneg()
5587 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_setup_fiber_hw_autoneg()
5589 if (tp->serdes_counter) in tg3_setup_fiber_hw_autoneg()
5590 tp->serdes_counter--; in tg3_setup_fiber_hw_autoneg()
5606 /* Link parallel detection - link is up */ in tg3_setup_fiber_hw_autoneg()
5614 tp->phy_flags |= in tg3_setup_fiber_hw_autoneg()
5616 tp->serdes_counter = in tg3_setup_fiber_hw_autoneg()
5623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; in tg3_setup_fiber_hw_autoneg()
5624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_setup_fiber_hw_autoneg()
5638 if (tp->link_config.autoneg == AUTONEG_ENABLE) { in tg3_setup_fiber_by_hand()
5655 tp->link_config.rmt_adv = in tg3_setup_fiber_by_hand()
5685 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); in tg3_setup_fiber_by_hand()
5688 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_by_hand()
5705 orig_pause_cfg = tp->link_config.active_flowctrl; in tg3_setup_fiber_phy()
5706 orig_active_speed = tp->link_config.active_speed; in tg3_setup_fiber_phy()
5707 orig_active_duplex = tp->link_config.active_duplex; in tg3_setup_fiber_phy()
5710 tp->link_up && in tg3_setup_fiber_phy()
5727 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); in tg3_setup_fiber_phy()
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; in tg3_setup_fiber_phy()
5729 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_phy()
5732 if (tp->phy_id == TG3_PHY_ID_BCM8002) in tg3_setup_fiber_phy()
5739 tp->link_config.rmt_adv = 0; in tg3_setup_fiber_phy()
5747 tp->napi[0].hw_status->status = in tg3_setup_fiber_phy()
5749 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); in tg3_setup_fiber_phy()
5764 if (tp->link_config.autoneg == AUTONEG_ENABLE && in tg3_setup_fiber_phy()
5765 tp->serdes_counter == 0) { in tg3_setup_fiber_phy()
5766 tw32_f(MAC_MODE, (tp->mac_mode | in tg3_setup_fiber_phy()
5769 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_phy()
5774 tp->link_config.active_speed = SPEED_1000; in tg3_setup_fiber_phy()
5775 tp->link_config.active_duplex = DUPLEX_FULL; in tg3_setup_fiber_phy()
5776 tw32(MAC_LED_CTRL, (tp->led_ctrl | in tg3_setup_fiber_phy()
5780 tp->link_config.active_speed = SPEED_UNKNOWN; in tg3_setup_fiber_phy()
5781 tp->link_config.active_duplex = DUPLEX_UNKNOWN; in tg3_setup_fiber_phy()
5782 tw32(MAC_LED_CTRL, (tp->led_ctrl | in tg3_setup_fiber_phy()
5788 u32 now_pause_cfg = tp->link_config.active_flowctrl; in tg3_setup_fiber_phy()
5790 orig_active_speed != tp->link_config.active_speed || in tg3_setup_fiber_phy()
5791 orig_active_duplex != tp->link_config.active_duplex) in tg3_setup_fiber_phy()
5815 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; in tg3_setup_fiber_mii_phy()
5818 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_setup_fiber_mii_phy()
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_setup_fiber_mii_phy()
5826 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; in tg3_setup_fiber_mii_phy()
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; in tg3_setup_fiber_mii_phy()
5838 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_mii_phy()
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_setup_fiber_mii_phy()
5847 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_mii_phy()
5855 tp->link_config.rmt_adv = 0; in tg3_setup_fiber_mii_phy()
5868 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && in tg3_setup_fiber_mii_phy()
5869 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { in tg3_setup_fiber_mii_phy()
5871 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { in tg3_setup_fiber_mii_phy()
5880 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); in tg3_setup_fiber_mii_phy()
5881 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); in tg3_setup_fiber_mii_phy()
5889 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; in tg3_setup_fiber_mii_phy()
5890 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_setup_fiber_mii_phy()
5900 if (tp->link_config.duplex == DUPLEX_FULL) in tg3_setup_fiber_mii_phy()
5910 if (tp->link_up) { in tg3_setup_fiber_mii_phy()
5934 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_setup_fiber_mii_phy()
5962 tp->link_config.rmt_adv = in tg3_setup_fiber_mii_phy()
5976 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; in tg3_setup_fiber_mii_phy()
5977 if (tp->link_config.active_duplex == DUPLEX_HALF) in tg3_setup_fiber_mii_phy()
5978 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; in tg3_setup_fiber_mii_phy()
5980 tw32_f(MAC_MODE, tp->mac_mode); in tg3_setup_fiber_mii_phy()
5985 tp->link_config.active_speed = current_speed; in tg3_setup_fiber_mii_phy()
5986 tp->link_config.active_duplex = current_duplex; in tg3_setup_fiber_mii_phy()
5994 if (tp->serdes_counter) { in tg3_serdes_parallel_detect()
5996 tp->serdes_counter--; in tg3_serdes_parallel_detect()
6000 if (!tp->link_up && in tg3_serdes_parallel_detect()
6001 (tp->link_config.autoneg == AUTONEG_ENABLE)) { in tg3_serdes_parallel_detect()
6027 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; in tg3_serdes_parallel_detect()
6030 } else if (tp->link_up && in tg3_serdes_parallel_detect()
6031 (tp->link_config.autoneg == AUTONEG_ENABLE) && in tg3_serdes_parallel_detect()
6032 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { in tg3_serdes_parallel_detect()
6046 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_serdes_parallel_detect()
6057 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) in tg3_setup_phy()
6059 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) in tg3_setup_phy()
6088 if (tp->link_config.active_speed == SPEED_1000 && in tg3_setup_phy()
6089 tp->link_config.active_duplex == DUPLEX_HALF) in tg3_setup_phy()
6097 if (tp->link_up) { in tg3_setup_phy()
6099 tp->coal.stats_block_coalesce_usecs); in tg3_setup_phy()
6107 if (!tp->link_up) in tg3_setup_phy()
6109 tp->pwrmgmt_thresh; in tg3_setup_phy()
6118 /* tp->lock must be held */
6131 /* tp->lock must be held */
6148 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; in tg3_get_ts_info()
6151 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | in tg3_get_ts_info()
6156 if (tp->ptp_clock) in tg3_get_ts_info()
6157 info->phc_index = ptp_clock_index(tp->ptp_clock); in tg3_get_ts_info()
6159 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); in tg3_get_ts_info()
6161 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | in tg3_get_ts_info()
6201 tp->ptp_adjust += delta; in tg3_ptp_adjtime()
6215 ns += tp->ptp_adjust; in tg3_ptp_gettimex()
6233 tp->ptp_adjust = 0; in tg3_ptp_settime()
6246 switch (rq->type) { in tg3_ptp_enable()
6249 if (rq->perout.flags) in tg3_ptp_enable()
6250 return -EOPNOTSUPP; in tg3_ptp_enable()
6252 if (rq->perout.index != 0) in tg3_ptp_enable()
6253 return -EINVAL; in tg3_ptp_enable()
6262 nsec = rq->perout.start.sec * 1000000000ULL + in tg3_ptp_enable()
6263 rq->perout.start.nsec; in tg3_ptp_enable()
6265 if (rq->perout.period.sec || rq->perout.period.nsec) { in tg3_ptp_enable()
6266 netdev_warn(tp->dev, in tg3_ptp_enable()
6267 "Device supports only a one-shot timesync output, period must be 0\n"); in tg3_ptp_enable()
6268 rval = -EINVAL; in tg3_ptp_enable()
6273 netdev_warn(tp->dev, in tg3_ptp_enable()
6275 rval = -EINVAL; in tg3_ptp_enable()
6299 return -EOPNOTSUPP; in tg3_ptp_enable()
6306 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + in tg3_hwclock_to_timestamp()
6307 tp->ptp_adjust); in tg3_hwclock_to_timestamp()
6322 if (tp->ptp_txts_retrycnt > 2) in tg3_ptp_ts_aux_work()
6327 if (hwclock != tp->pre_tx_ts) { in tg3_ptp_ts_aux_work()
6329 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); in tg3_ptp_ts_aux_work()
6332 tp->ptp_txts_retrycnt++; in tg3_ptp_ts_aux_work()
6335 dev_consume_skb_any(tp->tx_tstamp_skb); in tg3_ptp_ts_aux_work()
6336 tp->tx_tstamp_skb = NULL; in tg3_ptp_ts_aux_work()
6337 tp->ptp_txts_retrycnt = 0; in tg3_ptp_ts_aux_work()
6338 tp->pre_tx_ts = 0; in tg3_ptp_ts_aux_work()
6339 return -1; in tg3_ptp_ts_aux_work()
6359 /* tp->lock must be held */
6367 tp->ptp_adjust = 0; in tg3_ptp_init()
6368 tp->ptp_info = tg3_ptp_caps; in tg3_ptp_init()
6371 /* tp->lock must be held */
6377 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); in tg3_ptp_resume()
6378 tp->ptp_adjust = 0; in tg3_ptp_resume()
6383 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) in tg3_ptp_fini()
6386 ptp_clock_unregister(tp->ptp_clock); in tg3_ptp_fini()
6387 tp->ptp_clock = NULL; in tg3_ptp_fini()
6388 tp->ptp_adjust = 0; in tg3_ptp_fini()
6389 dev_consume_skb_any(tp->tx_tstamp_skb); in tg3_ptp_fini()
6390 tp->tx_tstamp_skb = NULL; in tg3_ptp_fini()
6395 return tp->irq_sync; in tg3_irq_sync()
6465 if (tp->pdev->error_state != pci_channel_io_normal) { in tg3_dump_state()
6466 netdev_err(tp->dev, "PCI channel ERROR!\n"); in tg3_dump_state()
6486 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", in tg3_dump_state()
6493 for (i = 0; i < tp->irq_cnt; i++) { in tg3_dump_state()
6494 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_dump_state()
6497 netdev_err(tp->dev, in tg3_dump_state()
6500 tnapi->hw_status->status, in tg3_dump_state()
6501 tnapi->hw_status->status_tag, in tg3_dump_state()
6502 tnapi->hw_status->rx_jumbo_consumer, in tg3_dump_state()
6503 tnapi->hw_status->rx_consumer, in tg3_dump_state()
6504 tnapi->hw_status->rx_mini_consumer, in tg3_dump_state()
6505 tnapi->hw_status->idx[0].rx_producer, in tg3_dump_state()
6506 tnapi->hw_status->idx[0].tx_consumer); in tg3_dump_state()
6508 netdev_err(tp->dev, in tg3_dump_state()
6511 tnapi->last_tag, tnapi->last_irq_tag, in tg3_dump_state()
6512 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, in tg3_dump_state()
6513 tnapi->rx_rcb_ptr, in tg3_dump_state()
6514 tnapi->prodring.rx_std_prod_idx, in tg3_dump_state()
6515 tnapi->prodring.rx_std_cons_idx, in tg3_dump_state()
6516 tnapi->prodring.rx_jmb_prod_idx, in tg3_dump_state()
6517 tnapi->prodring.rx_jmb_cons_idx); in tg3_dump_state()
6521 /* This is called whenever we suspect that the system chipset is re-
6530 tp->write32_tx_mbox == tg3_write_indirect_mbox); in tg3_tx_recover()
6532 netdev_warn(tp->dev, in tg3_tx_recover()
6533 "The system may be re-ordering memory-mapped I/O " in tg3_tx_recover()
6545 return tnapi->tx_pending - in tg3_tx_avail()
6546 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); in tg3_tx_avail()
6555 struct tg3 *tp = tnapi->tp; in tg3_tx()
6556 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; in tg3_tx()
6557 u32 sw_idx = tnapi->tx_cons; in tg3_tx()
6559 int index = tnapi - tp->napi; in tg3_tx()
6563 index--; in tg3_tx()
6565 txq = netdev_get_tx_queue(tp->dev, index); in tg3_tx()
6568 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; in tg3_tx()
6570 struct sk_buff *skb = ri->skb; in tg3_tx()
6578 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { in tg3_tx()
6583 if (hwclock != tp->pre_tx_ts) { in tg3_tx()
6586 tp->pre_tx_ts = 0; in tg3_tx()
6588 tp->tx_tstamp_skb = skb; in tg3_tx()
6593 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), in tg3_tx()
6596 ri->skb = NULL; in tg3_tx()
6598 while (ri->fragmented) { in tg3_tx()
6599 ri->fragmented = false; in tg3_tx()
6601 ri = &tnapi->tx_buffers[sw_idx]; in tg3_tx()
6606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in tg3_tx()
6607 ri = &tnapi->tx_buffers[sw_idx]; in tg3_tx()
6608 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) in tg3_tx()
6611 dma_unmap_page(&tp->pdev->dev, in tg3_tx()
6613 skb_frag_size(&skb_shinfo(skb)->frags[i]), in tg3_tx()
6616 while (ri->fragmented) { in tg3_tx()
6617 ri->fragmented = false; in tg3_tx()
6619 ri = &tnapi->tx_buffers[sw_idx]; in tg3_tx()
6626 bytes_compl += skb->len; in tg3_tx()
6631 ptp_schedule_worker(tp->ptp_clock, 0); in tg3_tx()
6641 tnapi->tx_cons = sw_idx; in tg3_tx()
6673 if (!ri->data) in tg3_rx_data_free()
6676 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, in tg3_rx_data_free()
6678 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); in tg3_rx_data_free()
6679 ri->data = NULL; in tg3_rx_data_free()
6686 * of the RX descriptor are invariant, see tg3_init_rings.
6689 * posting buffers we only dirty the first cache line of the RX
6690 * descriptor (containing the address). Whereas for the RX status
6691 * buffers the cpu only reads the last cacheline of the RX descriptor
6706 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; in tg3_alloc_rx_data()
6707 desc = &tpr->rx_std[dest_idx]; in tg3_alloc_rx_data()
6708 map = &tpr->rx_std_buffers[dest_idx]; in tg3_alloc_rx_data()
6709 data_size = tp->rx_pkt_map_sz; in tg3_alloc_rx_data()
6713 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; in tg3_alloc_rx_data()
6714 desc = &tpr->rx_jmb[dest_idx].std; in tg3_alloc_rx_data()
6715 map = &tpr->rx_jmb_buffers[dest_idx]; in tg3_alloc_rx_data()
6720 return -EINVAL; in tg3_alloc_rx_data()
6739 return -ENOMEM; in tg3_alloc_rx_data()
6741 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), in tg3_alloc_rx_data()
6743 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { in tg3_alloc_rx_data()
6745 return -EIO; in tg3_alloc_rx_data()
6748 map->data = data; in tg3_alloc_rx_data()
6751 desc->addr_hi = ((u64)mapping >> 32); in tg3_alloc_rx_data()
6752 desc->addr_lo = ((u64)mapping & 0xffffffff); in tg3_alloc_rx_data()
6758 * members of the RX descriptor are invariant. See notes above
6766 struct tg3 *tp = tnapi->tp; in tg3_recycle_rx()
6769 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; in tg3_recycle_rx()
6774 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; in tg3_recycle_rx()
6775 dest_desc = &dpr->rx_std[dest_idx]; in tg3_recycle_rx()
6776 dest_map = &dpr->rx_std_buffers[dest_idx]; in tg3_recycle_rx()
6777 src_desc = &spr->rx_std[src_idx]; in tg3_recycle_rx()
6778 src_map = &spr->rx_std_buffers[src_idx]; in tg3_recycle_rx()
6782 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; in tg3_recycle_rx()
6783 dest_desc = &dpr->rx_jmb[dest_idx].std; in tg3_recycle_rx()
6784 dest_map = &dpr->rx_jmb_buffers[dest_idx]; in tg3_recycle_rx()
6785 src_desc = &spr->rx_jmb[src_idx].std; in tg3_recycle_rx()
6786 src_map = &spr->rx_jmb_buffers[src_idx]; in tg3_recycle_rx()
6793 dest_map->data = src_map->data; in tg3_recycle_rx()
6796 dest_desc->addr_hi = src_desc->addr_hi; in tg3_recycle_rx()
6797 dest_desc->addr_lo = src_desc->addr_lo; in tg3_recycle_rx()
6804 src_map->data = NULL; in tg3_recycle_rx()
6807 /* The RX ring scheme is composed of multiple rings which post fresh
6813 * RX buffer was obtained from. The chip simply takes the original
6819 * it is first placed into the on-chip ram. When the packet's length
6824 * The "separate ring for rx status" scheme may sound queer, but it makes
6826 * to the buffer post rings, and only the chip writes to the rx status
6827 * rings, then cache lines never move beyond shared-modified state.
6833 struct tg3 *tp = tnapi->tp; in tg3_rx()
6836 u32 sw_idx = tnapi->rx_rcb_ptr; in tg3_rx()
6839 struct tg3_rx_prodring_set *tpr = &tnapi->prodring; in tg3_rx()
6841 hw_idx = *(tnapi->rx_rcb_prod_idx); in tg3_rx()
6849 std_prod_idx = tpr->rx_std_prod_idx; in tg3_rx()
6850 jmb_prod_idx = tpr->rx_jmb_prod_idx; in tg3_rx()
6853 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; in tg3_rx()
6861 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; in tg3_rx()
6862 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; in tg3_rx()
6864 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; in tg3_rx()
6866 data = ri->data; in tg3_rx()
6870 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; in tg3_rx()
6872 data = ri->data; in tg3_rx()
6879 if (desc->err_vlan & RXD_ERR_MASK) { in tg3_rx()
6885 tnapi->rx_dropped++; in tg3_rx()
6890 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - in tg3_rx()
6893 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == in tg3_rx()
6895 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == in tg3_rx()
6910 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, in tg3_rx()
6918 ri->data = NULL; in tg3_rx()
6933 skb = netdev_alloc_skb(tp->dev, in tg3_rx()
6939 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, in tg3_rx()
6941 memcpy(skb->data, in tg3_rx()
6944 dma_sync_single_for_device(&tp->pdev->dev, dma_addr, in tg3_rx()
6953 if ((tp->dev->features & NETIF_F_RXCSUM) && in tg3_rx()
6954 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && in tg3_rx()
6955 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) in tg3_rx()
6957 skb->ip_summed = CHECKSUM_UNNECESSARY; in tg3_rx()
6961 skb->protocol = eth_type_trans(skb, tp->dev); in tg3_rx()
6963 if (len > (tp->dev->mtu + ETH_HLEN) && in tg3_rx()
6964 skb->protocol != htons(ETH_P_8021Q) && in tg3_rx()
6965 skb->protocol != htons(ETH_P_8021AD)) { in tg3_rx()
6970 if (desc->type_flags & RXD_FLAG_VLAN && in tg3_rx()
6971 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) in tg3_rx()
6973 desc->err_vlan & RXD_VLAN_MASK); in tg3_rx()
6975 napi_gro_receive(&tnapi->napi, skb); in tg3_rx()
6978 budget--; in tg3_rx()
6983 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { in tg3_rx()
6984 tpr->rx_std_prod_idx = std_prod_idx & in tg3_rx()
6985 tp->rx_std_ring_mask; in tg3_rx()
6987 tpr->rx_std_prod_idx); in tg3_rx()
6993 sw_idx &= tp->rx_ret_ring_mask; in tg3_rx()
6997 hw_idx = *(tnapi->rx_rcb_prod_idx); in tg3_rx()
7003 tnapi->rx_rcb_ptr = sw_idx; in tg3_rx()
7004 tw32_rx_mbox(tnapi->consmbox, sw_idx); in tg3_rx()
7006 /* Refill RX ring(s). */ in tg3_rx()
7012 tpr->rx_std_prod_idx = std_prod_idx & in tg3_rx()
7013 tp->rx_std_ring_mask; in tg3_rx()
7015 tpr->rx_std_prod_idx); in tg3_rx()
7018 tpr->rx_jmb_prod_idx = jmb_prod_idx & in tg3_rx()
7019 tp->rx_jmb_ring_mask; in tg3_rx()
7021 tpr->rx_jmb_prod_idx); in tg3_rx()
7029 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; in tg3_rx()
7030 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; in tg3_rx()
7032 if (tnapi != &tp->napi[1]) { in tg3_rx()
7033 tp->rx_refill = true; in tg3_rx()
7034 napi_schedule(&tp->napi[1].napi); in tg3_rx()
7045 struct tg3_hw_status *sblk = tp->napi[0].hw_status; in tg3_poll_link()
7047 if (sblk->status & SD_STATUS_LINK_CHG) { in tg3_poll_link()
7048 sblk->status = SD_STATUS_UPDATED | in tg3_poll_link()
7049 (sblk->status & ~SD_STATUS_LINK_CHG); in tg3_poll_link()
7050 spin_lock(&tp->lock); in tg3_poll_link()
7060 spin_unlock(&tp->lock); in tg3_poll_link()
7073 src_prod_idx = spr->rx_std_prod_idx; in tg3_rx_prodring_xfer()
7080 if (spr->rx_std_cons_idx == src_prod_idx) in tg3_rx_prodring_xfer()
7083 if (spr->rx_std_cons_idx < src_prod_idx) in tg3_rx_prodring_xfer()
7084 cpycnt = src_prod_idx - spr->rx_std_cons_idx; in tg3_rx_prodring_xfer()
7086 cpycnt = tp->rx_std_ring_mask + 1 - in tg3_rx_prodring_xfer()
7087 spr->rx_std_cons_idx; in tg3_rx_prodring_xfer()
7090 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); in tg3_rx_prodring_xfer()
7092 si = spr->rx_std_cons_idx; in tg3_rx_prodring_xfer()
7093 di = dpr->rx_std_prod_idx; in tg3_rx_prodring_xfer()
7096 if (dpr->rx_std_buffers[i].data) { in tg3_rx_prodring_xfer()
7097 cpycnt = i - di; in tg3_rx_prodring_xfer()
7098 err = -ENOSPC; in tg3_rx_prodring_xfer()
7112 memcpy(&dpr->rx_std_buffers[di], in tg3_rx_prodring_xfer()
7113 &spr->rx_std_buffers[si], in tg3_rx_prodring_xfer()
7118 sbd = &spr->rx_std[si]; in tg3_rx_prodring_xfer()
7119 dbd = &dpr->rx_std[di]; in tg3_rx_prodring_xfer()
7120 dbd->addr_hi = sbd->addr_hi; in tg3_rx_prodring_xfer()
7121 dbd->addr_lo = sbd->addr_lo; in tg3_rx_prodring_xfer()
7124 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & in tg3_rx_prodring_xfer()
7125 tp->rx_std_ring_mask; in tg3_rx_prodring_xfer()
7126 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & in tg3_rx_prodring_xfer()
7127 tp->rx_std_ring_mask; in tg3_rx_prodring_xfer()
7131 src_prod_idx = spr->rx_jmb_prod_idx; in tg3_rx_prodring_xfer()
7138 if (spr->rx_jmb_cons_idx == src_prod_idx) in tg3_rx_prodring_xfer()
7141 if (spr->rx_jmb_cons_idx < src_prod_idx) in tg3_rx_prodring_xfer()
7142 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; in tg3_rx_prodring_xfer()
7144 cpycnt = tp->rx_jmb_ring_mask + 1 - in tg3_rx_prodring_xfer()
7145 spr->rx_jmb_cons_idx; in tg3_rx_prodring_xfer()
7148 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); in tg3_rx_prodring_xfer()
7150 si = spr->rx_jmb_cons_idx; in tg3_rx_prodring_xfer()
7151 di = dpr->rx_jmb_prod_idx; in tg3_rx_prodring_xfer()
7154 if (dpr->rx_jmb_buffers[i].data) { in tg3_rx_prodring_xfer()
7155 cpycnt = i - di; in tg3_rx_prodring_xfer()
7156 err = -ENOSPC; in tg3_rx_prodring_xfer()
7170 memcpy(&dpr->rx_jmb_buffers[di], in tg3_rx_prodring_xfer()
7171 &spr->rx_jmb_buffers[si], in tg3_rx_prodring_xfer()
7176 sbd = &spr->rx_jmb[si].std; in tg3_rx_prodring_xfer()
7177 dbd = &dpr->rx_jmb[di].std; in tg3_rx_prodring_xfer()
7178 dbd->addr_hi = sbd->addr_hi; in tg3_rx_prodring_xfer()
7179 dbd->addr_lo = sbd->addr_lo; in tg3_rx_prodring_xfer()
7182 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & in tg3_rx_prodring_xfer()
7183 tp->rx_jmb_ring_mask; in tg3_rx_prodring_xfer()
7184 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & in tg3_rx_prodring_xfer()
7185 tp->rx_jmb_ring_mask; in tg3_rx_prodring_xfer()
7193 struct tg3 *tp = tnapi->tp; in tg3_poll_work()
7196 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { in tg3_poll_work()
7202 if (!tnapi->rx_rcb_prod_idx) in tg3_poll_work()
7205 /* run RX thread, within the bounds set by NAPI. in tg3_poll_work()
7206 * All RX "locking" is done by ensuring outside in tg3_poll_work()
7207 * code synchronizes with tg3->napi.poll() in tg3_poll_work()
7209 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) in tg3_poll_work()
7210 work_done += tg3_rx(tnapi, budget - work_done); in tg3_poll_work()
7212 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { in tg3_poll_work()
7213 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; in tg3_poll_work()
7215 u32 std_prod_idx = dpr->rx_std_prod_idx; in tg3_poll_work()
7216 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; in tg3_poll_work()
7218 tp->rx_refill = false; in tg3_poll_work()
7219 for (i = 1; i <= tp->rxq_cnt; i++) in tg3_poll_work()
7221 &tp->napi[i].prodring); in tg3_poll_work()
7225 if (std_prod_idx != dpr->rx_std_prod_idx) in tg3_poll_work()
7227 dpr->rx_std_prod_idx); in tg3_poll_work()
7229 if (jmb_prod_idx != dpr->rx_jmb_prod_idx) in tg3_poll_work()
7231 dpr->rx_jmb_prod_idx); in tg3_poll_work()
7234 tw32_f(HOSTCC_MODE, tp->coal_now); in tg3_poll_work()
7242 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) in tg3_reset_task_schedule()
7243 schedule_work(&tp->reset_task); in tg3_reset_task_schedule()
7248 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) in tg3_reset_task_cancel()
7249 cancel_work_sync(&tp->reset_task); in tg3_reset_task_cancel()
7256 struct tg3 *tp = tnapi->tp; in tg3_poll_msix()
7258 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_poll_msix()
7269 /* tp->last_tag is used in tg3_int_reenable() below in tg3_poll_msix()
7273 tnapi->last_tag = sblk->status_tag; in tg3_poll_msix()
7274 tnapi->last_irq_tag = tnapi->last_tag; in tg3_poll_msix()
7277 /* check for RX/TX work to do */ in tg3_poll_msix()
7278 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && in tg3_poll_msix()
7279 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { in tg3_poll_msix()
7284 if (tnapi == &tp->napi[1] && tp->rx_refill) in tg3_poll_msix()
7289 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); in tg3_poll_msix()
7294 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { in tg3_poll_msix()
7295 tw32(HOSTCC_MODE, tp->coalesce_mode | in tg3_poll_msix()
7297 tnapi->coal_now); in tg3_poll_msix()
7324 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); in tg3_process_error()
7329 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); in tg3_process_error()
7334 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); in tg3_process_error()
7350 struct tg3 *tp = tnapi->tp; in tg3_poll()
7352 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_poll()
7355 if (sblk->status & SD_STATUS_ERROR) in tg3_poll()
7369 /* tp->last_tag is used in tg3_int_reenable() below in tg3_poll()
7373 tnapi->last_tag = sblk->status_tag; in tg3_poll()
7374 tnapi->last_irq_tag = tnapi->last_tag; in tg3_poll()
7377 sblk->status &= ~SD_STATUS_UPDATED; in tg3_poll()
7400 for (i = tp->irq_cnt - 1; i >= 0; i--) in tg3_napi_disable()
7401 napi_disable(&tp->napi[i].napi); in tg3_napi_disable()
7408 for (i = 0; i < tp->irq_cnt; i++) in tg3_napi_enable()
7409 napi_enable(&tp->napi[i].napi); in tg3_napi_enable()
7416 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll); in tg3_napi_init()
7417 for (i = 1; i < tp->irq_cnt; i++) in tg3_napi_init()
7418 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix); in tg3_napi_init()
7425 for (i = 0; i < tp->irq_cnt; i++) in tg3_napi_fini()
7426 netif_napi_del(&tp->napi[i].napi); in tg3_napi_fini()
7431 netif_trans_update(tp->dev); /* prevent tx timeout */ in tg3_netif_stop()
7433 netif_carrier_off(tp->dev); in tg3_netif_stop()
7434 netif_tx_disable(tp->dev); in tg3_netif_stop()
7437 /* tp->lock must be held */
7446 netif_tx_wake_all_queues(tp->dev); in tg3_netif_start()
7448 if (tp->link_up) in tg3_netif_start()
7449 netif_carrier_on(tp->dev); in tg3_netif_start()
7452 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; in tg3_netif_start()
7457 __releases(tp->lock) in tg3_irq_quiesce()
7458 __acquires(tp->lock) in tg3_irq_quiesce()
7462 BUG_ON(tp->irq_sync); in tg3_irq_quiesce()
7464 tp->irq_sync = 1; in tg3_irq_quiesce()
7467 spin_unlock_bh(&tp->lock); in tg3_irq_quiesce()
7469 for (i = 0; i < tp->irq_cnt; i++) in tg3_irq_quiesce()
7470 synchronize_irq(tp->napi[i].irq_vec); in tg3_irq_quiesce()
7472 spin_lock_bh(&tp->lock); in tg3_irq_quiesce()
7476 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7482 spin_lock_bh(&tp->lock); in tg3_full_lock()
7489 spin_unlock_bh(&tp->lock); in tg3_full_unlock()
7492 /* One-shot MSI handler - Chip automatically disables interrupt
7498 struct tg3 *tp = tnapi->tp; in tg3_msi_1shot()
7500 prefetch(tnapi->hw_status); in tg3_msi_1shot()
7501 if (tnapi->rx_rcb) in tg3_msi_1shot()
7502 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); in tg3_msi_1shot()
7505 napi_schedule(&tnapi->napi); in tg3_msi_1shot()
7510 /* MSI ISR - No need to check for interrupt sharing and no need to
7517 struct tg3 *tp = tnapi->tp; in tg3_msi()
7519 prefetch(tnapi->hw_status); in tg3_msi()
7520 if (tnapi->rx_rcb) in tg3_msi()
7521 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); in tg3_msi()
7523 * Writing any value to intr-mbox-0 clears PCI INTA# and in tg3_msi()
7524 * chip-internal interrupt pending events. in tg3_msi()
7525 * Writing non-zero to intr-mbox-0 additional tells the in tg3_msi()
7526 * NIC to stop sending us irqs, engaging "in-intr-handler" in tg3_msi()
7529 tw32_mailbox(tnapi->int_mbox, 0x00000001); in tg3_msi()
7531 napi_schedule(&tnapi->napi); in tg3_msi()
7539 struct tg3 *tp = tnapi->tp; in tg3_interrupt()
7540 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_interrupt()
7548 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { in tg3_interrupt()
7557 * Writing any value to intr-mbox-0 clears PCI INTA# and in tg3_interrupt()
7558 * chip-internal interrupt pending events. in tg3_interrupt()
7559 * Writing non-zero to intr-mbox-0 additional tells the in tg3_interrupt()
7560 * NIC to stop sending us irqs, engaging "in-intr-handler" in tg3_interrupt()
7563 * Flush the mailbox to de-assert the IRQ immediately to prevent in tg3_interrupt()
7570 sblk->status &= ~SD_STATUS_UPDATED; in tg3_interrupt()
7572 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); in tg3_interrupt()
7573 napi_schedule(&tnapi->napi); in tg3_interrupt()
7575 /* No work, shared interrupt perhaps? re-enable in tg3_interrupt()
7588 struct tg3 *tp = tnapi->tp; in tg3_interrupt_tagged()
7589 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_interrupt_tagged()
7597 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { in tg3_interrupt_tagged()
7606 * writing any value to intr-mbox-0 clears PCI INTA# and in tg3_interrupt_tagged()
7607 * chip-internal interrupt pending events. in tg3_interrupt_tagged()
7608 * writing non-zero to intr-mbox-0 additional tells the in tg3_interrupt_tagged()
7609 * NIC to stop sending us irqs, engaging "in-intr-handler" in tg3_interrupt_tagged()
7612 * Flush the mailbox to de-assert the IRQ immediately to prevent in tg3_interrupt_tagged()
7624 tnapi->last_irq_tag = sblk->status_tag; in tg3_interrupt_tagged()
7629 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); in tg3_interrupt_tagged()
7631 napi_schedule(&tnapi->napi); in tg3_interrupt_tagged()
7641 struct tg3 *tp = tnapi->tp; in tg3_test_isr()
7642 struct tg3_hw_status *sblk = tnapi->hw_status; in tg3_test_isr()
7644 if ((sblk->status & SD_STATUS_UPDATED) || in tg3_test_isr()
7661 for (i = 0; i < tp->irq_cnt; i++) in tg3_poll_controller()
7662 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); in tg3_poll_controller()
7700 /* Test for DMA addresses > 40-bit */
7717 txbd->addr_hi = ((u64) mapping >> 32); in tg3_tx_set_bd()
7718 txbd->addr_lo = ((u64) mapping & 0xffffffff); in tg3_tx_set_bd()
7719 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); in tg3_tx_set_bd()
7720 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); in tg3_tx_set_bd()
7727 struct tg3 *tp = tnapi->tp; in tg3_tx_frag_set()
7742 if (tp->dma_limit) { in tg3_tx_frag_set()
7745 while (len > tp->dma_limit && *budget) { in tg3_tx_frag_set()
7746 u32 frag_len = tp->dma_limit; in tg3_tx_frag_set()
7747 len -= tp->dma_limit; in tg3_tx_frag_set()
7751 len += tp->dma_limit / 2; in tg3_tx_frag_set()
7752 frag_len = tp->dma_limit / 2; in tg3_tx_frag_set()
7755 tnapi->tx_buffers[*entry].fragmented = true; in tg3_tx_frag_set()
7757 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, in tg3_tx_frag_set()
7759 *budget -= 1; in tg3_tx_frag_set()
7768 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, in tg3_tx_frag_set()
7770 *budget -= 1; in tg3_tx_frag_set()
7774 tnapi->tx_buffers[prvidx].fragmented = false; in tg3_tx_frag_set()
7778 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, in tg3_tx_frag_set()
7790 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; in tg3_tx_skb_unmap()
7792 skb = txb->skb; in tg3_tx_skb_unmap()
7793 txb->skb = NULL; in tg3_tx_skb_unmap()
7795 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), in tg3_tx_skb_unmap()
7798 while (txb->fragmented) { in tg3_tx_skb_unmap()
7799 txb->fragmented = false; in tg3_tx_skb_unmap()
7801 txb = &tnapi->tx_buffers[entry]; in tg3_tx_skb_unmap()
7805 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tg3_tx_skb_unmap()
7808 txb = &tnapi->tx_buffers[entry]; in tg3_tx_skb_unmap()
7810 dma_unmap_page(&tnapi->tp->pdev->dev, in tg3_tx_skb_unmap()
7814 while (txb->fragmented) { in tg3_tx_skb_unmap()
7815 txb->fragmented = false; in tg3_tx_skb_unmap()
7817 txb = &tnapi->tx_buffers[entry]; in tg3_tx_skb_unmap()
7822 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7828 struct tg3 *tp = tnapi->tp; in tigon3_dma_hwbug_workaround()
7836 int more_headroom = 4 - ((unsigned long)skb->data & 3); in tigon3_dma_hwbug_workaround()
7844 ret = -1; in tigon3_dma_hwbug_workaround()
7847 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, in tigon3_dma_hwbug_workaround()
7848 new_skb->len, DMA_TO_DEVICE); in tigon3_dma_hwbug_workaround()
7850 if (dma_mapping_error(&tp->pdev->dev, new_addr)) { in tigon3_dma_hwbug_workaround()
7852 ret = -1; in tigon3_dma_hwbug_workaround()
7858 tnapi->tx_buffers[*entry].skb = new_skb; in tigon3_dma_hwbug_workaround()
7859 dma_unmap_addr_set(&tnapi->tx_buffers[*entry], in tigon3_dma_hwbug_workaround()
7863 new_skb->len, base_flags, in tigon3_dma_hwbug_workaround()
7865 tg3_tx_skb_unmap(tnapi, save_entry, -1); in tigon3_dma_hwbug_workaround()
7867 ret = -1; in tigon3_dma_hwbug_workaround()
7882 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; in tg3_tso_bug_gso_check()
7893 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; in tg3_tso_bug()
7912 segs = skb_gso_segment(skb, tp->dev->features & in tg3_tso_bug()
7915 tnapi->tx_dropped++; in tg3_tso_bug()
7921 __tg3_start_xmit(seg, tp->dev); in tg3_tso_bug()
7936 int i = -1, would_hit_hwbug; in __tg3_start_xmit()
7947 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; in __tg3_start_xmit()
7954 * and TX reclaim runs via tp->napi.poll inside of a software in __tg3_start_xmit()
7958 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { in __tg3_start_xmit()
7969 entry = tnapi->tx_prod; in __tg3_start_xmit()
7972 mss = skb_shinfo(skb)->gso_size; in __tg3_start_xmit()
7982 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN; in __tg3_start_xmit()
7987 if (skb->protocol == htons(ETH_P_8021Q) || in __tg3_start_xmit()
7988 skb->protocol == htons(ETH_P_8021AD)) { in __tg3_start_xmit()
8001 ip_csum = iph->check; in __tg3_start_xmit()
8002 ip_tot_len = iph->tot_len; in __tg3_start_xmit()
8003 iph->check = 0; in __tg3_start_xmit()
8004 iph->tot_len = htons(mss + hdr_len); in __tg3_start_xmit()
8011 tcp_csum = tcph->check; in __tg3_start_xmit()
8016 tcph->check = 0; in __tg3_start_xmit()
8019 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in __tg3_start_xmit()
8032 if (tcp_opt_len || iph->ihl > 5) { in __tg3_start_xmit()
8035 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); in __tg3_start_xmit()
8039 if (tcp_opt_len || iph->ihl > 5) { in __tg3_start_xmit()
8042 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); in __tg3_start_xmit()
8046 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in __tg3_start_xmit()
8050 if (skb->protocol == htons(ETH_P_8021Q) || in __tg3_start_xmit()
8051 skb->protocol == htons(ETH_P_8021AD)) { in __tg3_start_xmit()
8060 !mss && skb->len > VLAN_ETH_FRAME_LEN) in __tg3_start_xmit()
8068 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && in __tg3_start_xmit()
8071 if (!tp->pre_tx_ts) { in __tg3_start_xmit()
8072 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in __tg3_start_xmit()
8074 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); in __tg3_start_xmit()
8081 mapping = dma_map_single(&tp->pdev->dev, skb->data, len, in __tg3_start_xmit()
8083 if (dma_mapping_error(&tp->pdev->dev, mapping)) in __tg3_start_xmit()
8087 tnapi->tx_buffers[entry].skb = skb; in __tg3_start_xmit()
8088 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); in __tg3_start_xmit()
8096 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), in __tg3_start_xmit()
8099 } else if (skb_shinfo(skb)->nr_frags > 0) { in __tg3_start_xmit()
8110 last = skb_shinfo(skb)->nr_frags - 1; in __tg3_start_xmit()
8112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __tg3_start_xmit()
8115 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, in __tg3_start_xmit()
8118 tnapi->tx_buffers[entry].skb = NULL; in __tg3_start_xmit()
8119 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, in __tg3_start_xmit()
8121 if (dma_mapping_error(&tp->pdev->dev, mapping)) in __tg3_start_xmit()
8136 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); in __tg3_start_xmit()
8143 iph->check = ip_csum; in __tg3_start_xmit()
8144 iph->tot_len = ip_tot_len; in __tg3_start_xmit()
8146 tcph->check = tcp_csum; in __tg3_start_xmit()
8153 entry = tnapi->tx_prod; in __tg3_start_xmit()
8161 netdev_tx_sent_queue(txq, skb->len); in __tg3_start_xmit()
8166 tnapi->tx_prod = entry; in __tg3_start_xmit()
8183 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); in __tg3_start_xmit()
8184 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; in __tg3_start_xmit()
8188 tnapi->tx_dropped++; in __tg3_start_xmit()
8217 tnapi = &tp->napi[skb_queue_mapping]; in tg3_start_xmit()
8222 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); in tg3_start_xmit()
8231 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | in tg3_mac_loopback()
8234 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; in tg3_mac_loopback()
8237 tp->mac_mode |= MAC_MODE_LINK_POLARITY; in tg3_mac_loopback()
8239 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) in tg3_mac_loopback()
8240 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; in tg3_mac_loopback()
8242 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; in tg3_mac_loopback()
8244 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; in tg3_mac_loopback()
8247 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || in tg3_mac_loopback()
8249 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; in tg3_mac_loopback()
8252 tw32(MAC_MODE, tp->mac_mode); in tg3_mac_loopback()
8264 return -EIO; in tg3_phy_lpbk_set()
8275 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { in tg3_phy_lpbk_set()
8285 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { in tg3_phy_lpbk_set()
8301 if (tp->phy_flags & TG3_PHYFLG_IS_FET) in tg3_phy_lpbk_set()
8306 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && in tg3_phy_lpbk_set()
8316 /* Reset to prevent losing 1st rx packet intermittently */ in tg3_phy_lpbk_set()
8317 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && in tg3_phy_lpbk_set()
8321 tw32_f(MAC_RX_MODE, tp->rx_mode); in tg3_phy_lpbk_set()
8324 mac_mode = tp->mac_mode & in tg3_phy_lpbk_set()
8332 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; in tg3_phy_lpbk_set()
8354 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) in tg3_set_loopback()
8357 spin_lock_bh(&tp->lock); in tg3_set_loopback()
8359 netif_carrier_on(tp->dev); in tg3_set_loopback()
8360 spin_unlock_bh(&tp->lock); in tg3_set_loopback()
8363 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) in tg3_set_loopback()
8366 spin_lock_bh(&tp->lock); in tg3_set_loopback()
8370 spin_unlock_bh(&tp->lock); in tg3_set_loopback()
8380 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) in tg3_fix_features()
8388 netdev_features_t changed = dev->features ^ features; in tg3_set_features()
8401 if (tpr != &tp->napi[0].prodring) { in tg3_rx_prodring_free()
8402 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; in tg3_rx_prodring_free()
8403 i = (i + 1) & tp->rx_std_ring_mask) in tg3_rx_prodring_free()
8404 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], in tg3_rx_prodring_free()
8405 tp->rx_pkt_map_sz); in tg3_rx_prodring_free()
8408 for (i = tpr->rx_jmb_cons_idx; in tg3_rx_prodring_free()
8409 i != tpr->rx_jmb_prod_idx; in tg3_rx_prodring_free()
8410 i = (i + 1) & tp->rx_jmb_ring_mask) { in tg3_rx_prodring_free()
8411 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], in tg3_rx_prodring_free()
8419 for (i = 0; i <= tp->rx_std_ring_mask; i++) in tg3_rx_prodring_free()
8420 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], in tg3_rx_prodring_free()
8421 tp->rx_pkt_map_sz); in tg3_rx_prodring_free()
8424 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) in tg3_rx_prodring_free()
8425 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], in tg3_rx_prodring_free()
8430 /* Initialize rx rings for packet processing.
8434 * end up in the driver. tp->{tx,}lock are held and thus
8442 tpr->rx_std_cons_idx = 0; in tg3_rx_prodring_alloc()
8443 tpr->rx_std_prod_idx = 0; in tg3_rx_prodring_alloc()
8444 tpr->rx_jmb_cons_idx = 0; in tg3_rx_prodring_alloc()
8445 tpr->rx_jmb_prod_idx = 0; in tg3_rx_prodring_alloc()
8447 if (tpr != &tp->napi[0].prodring) { in tg3_rx_prodring_alloc()
8448 memset(&tpr->rx_std_buffers[0], 0, in tg3_rx_prodring_alloc()
8450 if (tpr->rx_jmb_buffers) in tg3_rx_prodring_alloc()
8451 memset(&tpr->rx_jmb_buffers[0], 0, in tg3_rx_prodring_alloc()
8457 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); in tg3_rx_prodring_alloc()
8461 tp->dev->mtu > ETH_DATA_LEN) in tg3_rx_prodring_alloc()
8463 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); in tg3_rx_prodring_alloc()
8467 * write into the rx buffer posting rings. in tg3_rx_prodring_alloc()
8469 for (i = 0; i <= tp->rx_std_ring_mask; i++) { in tg3_rx_prodring_alloc()
8472 rxd = &tpr->rx_std[i]; in tg3_rx_prodring_alloc()
8473 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; in tg3_rx_prodring_alloc()
8474 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); in tg3_rx_prodring_alloc()
8475 rxd->opaque = (RXD_OPAQUE_RING_STD | in tg3_rx_prodring_alloc()
8479 /* Now allocate fresh SKBs for each rx ring. */ in tg3_rx_prodring_alloc()
8480 for (i = 0; i < tp->rx_pending; i++) { in tg3_rx_prodring_alloc()
8485 netdev_warn(tp->dev, in tg3_rx_prodring_alloc()
8486 "Using a smaller RX standard ring. Only " in tg3_rx_prodring_alloc()
8488 "successfully\n", i, tp->rx_pending); in tg3_rx_prodring_alloc()
8491 tp->rx_pending = i; in tg3_rx_prodring_alloc()
8499 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); in tg3_rx_prodring_alloc()
8504 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { in tg3_rx_prodring_alloc()
8507 rxd = &tpr->rx_jmb[i].std; in tg3_rx_prodring_alloc()
8508 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; in tg3_rx_prodring_alloc()
8509 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | in tg3_rx_prodring_alloc()
8511 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | in tg3_rx_prodring_alloc()
8515 for (i = 0; i < tp->rx_jumbo_pending; i++) { in tg3_rx_prodring_alloc()
8520 netdev_warn(tp->dev, in tg3_rx_prodring_alloc()
8521 "Using a smaller RX jumbo ring. Only %d " in tg3_rx_prodring_alloc()
8523 "successfully\n", i, tp->rx_jumbo_pending); in tg3_rx_prodring_alloc()
8526 tp->rx_jumbo_pending = i; in tg3_rx_prodring_alloc()
8536 return -ENOMEM; in tg3_rx_prodring_alloc()
8542 kfree(tpr->rx_std_buffers); in tg3_rx_prodring_fini()
8543 tpr->rx_std_buffers = NULL; in tg3_rx_prodring_fini()
8544 kfree(tpr->rx_jmb_buffers); in tg3_rx_prodring_fini()
8545 tpr->rx_jmb_buffers = NULL; in tg3_rx_prodring_fini()
8546 if (tpr->rx_std) { in tg3_rx_prodring_fini()
8547 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), in tg3_rx_prodring_fini()
8548 tpr->rx_std, tpr->rx_std_mapping); in tg3_rx_prodring_fini()
8549 tpr->rx_std = NULL; in tg3_rx_prodring_fini()
8551 if (tpr->rx_jmb) { in tg3_rx_prodring_fini()
8552 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), in tg3_rx_prodring_fini()
8553 tpr->rx_jmb, tpr->rx_jmb_mapping); in tg3_rx_prodring_fini()
8554 tpr->rx_jmb = NULL; in tg3_rx_prodring_fini()
8561 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), in tg3_rx_prodring_init()
8563 if (!tpr->rx_std_buffers) in tg3_rx_prodring_init()
8564 return -ENOMEM; in tg3_rx_prodring_init()
8566 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, in tg3_rx_prodring_init()
8568 &tpr->rx_std_mapping, in tg3_rx_prodring_init()
8570 if (!tpr->rx_std) in tg3_rx_prodring_init()
8574 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), in tg3_rx_prodring_init()
8576 if (!tpr->rx_jmb_buffers) in tg3_rx_prodring_init()
8579 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, in tg3_rx_prodring_init()
8581 &tpr->rx_jmb_mapping, in tg3_rx_prodring_init()
8583 if (!tpr->rx_jmb) in tg3_rx_prodring_init()
8591 return -ENOMEM; in tg3_rx_prodring_init()
8594 /* Free up pending packets in all rx/tx rings.
8598 * end up in the driver. tp->{tx,}lock is not held and we are not
8605 for (j = 0; j < tp->irq_cnt; j++) { in tg3_free_rings()
8606 struct tg3_napi *tnapi = &tp->napi[j]; in tg3_free_rings()
8608 tg3_rx_prodring_free(tp, &tnapi->prodring); in tg3_free_rings()
8610 if (!tnapi->tx_buffers) in tg3_free_rings()
8614 struct sk_buff *skb = tnapi->tx_buffers[i].skb; in tg3_free_rings()
8620 skb_shinfo(skb)->nr_frags - 1); in tg3_free_rings()
8624 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); in tg3_free_rings()
8628 /* Initialize tx/rx rings for packet processing.
8632 * end up in the driver. tp->{tx,}lock are held and thus
8642 for (i = 0; i < tp->irq_cnt; i++) { in tg3_init_rings()
8643 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_init_rings()
8645 tnapi->last_tag = 0; in tg3_init_rings()
8646 tnapi->last_irq_tag = 0; in tg3_init_rings()
8647 tnapi->hw_status->status = 0; in tg3_init_rings()
8648 tnapi->hw_status->status_tag = 0; in tg3_init_rings()
8649 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); in tg3_init_rings()
8651 tnapi->tx_prod = 0; in tg3_init_rings()
8652 tnapi->tx_cons = 0; in tg3_init_rings()
8653 if (tnapi->tx_ring) in tg3_init_rings()
8654 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); in tg3_init_rings()
8656 tnapi->rx_rcb_ptr = 0; in tg3_init_rings()
8657 if (tnapi->rx_rcb) in tg3_init_rings()
8658 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); in tg3_init_rings()
8660 if (tnapi->prodring.rx_std && in tg3_init_rings()
8661 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { in tg3_init_rings()
8663 return -ENOMEM; in tg3_init_rings()
8674 for (i = 0; i < tp->irq_max; i++) { in tg3_mem_tx_release()
8675 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_mem_tx_release()
8677 if (tnapi->tx_ring) { in tg3_mem_tx_release()
8678 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, in tg3_mem_tx_release()
8679 tnapi->tx_ring, tnapi->tx_desc_mapping); in tg3_mem_tx_release()
8680 tnapi->tx_ring = NULL; in tg3_mem_tx_release()
8683 kfree(tnapi->tx_buffers); in tg3_mem_tx_release()
8684 tnapi->tx_buffers = NULL; in tg3_mem_tx_release()
8691 struct tg3_napi *tnapi = &tp->napi[0]; in tg3_mem_tx_acquire()
8699 for (i = 0; i < tp->txq_cnt; i++, tnapi++) { in tg3_mem_tx_acquire()
8700 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE, in tg3_mem_tx_acquire()
8703 if (!tnapi->tx_buffers) in tg3_mem_tx_acquire()
8706 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, in tg3_mem_tx_acquire()
8708 &tnapi->tx_desc_mapping, in tg3_mem_tx_acquire()
8710 if (!tnapi->tx_ring) in tg3_mem_tx_acquire()
8718 return -ENOMEM; in tg3_mem_tx_acquire()
8725 for (i = 0; i < tp->irq_max; i++) { in tg3_mem_rx_release()
8726 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_mem_rx_release()
8728 tg3_rx_prodring_fini(tp, &tnapi->prodring); in tg3_mem_rx_release()
8730 if (!tnapi->rx_rcb) in tg3_mem_rx_release()
8733 dma_free_coherent(&tp->pdev->dev, in tg3_mem_rx_release()
8735 tnapi->rx_rcb, in tg3_mem_rx_release()
8736 tnapi->rx_rcb_mapping); in tg3_mem_rx_release()
8737 tnapi->rx_rcb = NULL; in tg3_mem_rx_release()
8745 limit = tp->rxq_cnt; in tg3_mem_rx_acquire()
8754 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_mem_rx_acquire()
8756 if (tg3_rx_prodring_init(tp, &tnapi->prodring)) in tg3_mem_rx_acquire()
8760 * does not handle rx or tx interrupts. in tg3_mem_rx_acquire()
8766 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, in tg3_mem_rx_acquire()
8768 &tnapi->rx_rcb_mapping, in tg3_mem_rx_acquire()
8770 if (!tnapi->rx_rcb) in tg3_mem_rx_acquire()
8778 return -ENOMEM; in tg3_mem_rx_acquire()
8789 for (i = 0; i < tp->irq_cnt; i++) { in tg3_free_consistent()
8790 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_free_consistent()
8792 if (tnapi->hw_status) { in tg3_free_consistent()
8793 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, in tg3_free_consistent()
8794 tnapi->hw_status, in tg3_free_consistent()
8795 tnapi->status_mapping); in tg3_free_consistent()
8796 tnapi->hw_status = NULL; in tg3_free_consistent()
8803 /* tp->hw_stats can be referenced safely: in tg3_free_consistent()
8805 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. in tg3_free_consistent()
8807 if (tp->hw_stats) { in tg3_free_consistent()
8808 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), in tg3_free_consistent()
8809 tp->hw_stats, tp->stats_mapping); in tg3_free_consistent()
8810 tp->hw_stats = NULL; in tg3_free_consistent()
8822 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, in tg3_alloc_consistent()
8824 &tp->stats_mapping, GFP_KERNEL); in tg3_alloc_consistent()
8825 if (!tp->hw_stats) in tg3_alloc_consistent()
8828 for (i = 0; i < tp->irq_cnt; i++) { in tg3_alloc_consistent()
8829 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_alloc_consistent()
8832 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, in tg3_alloc_consistent()
8834 &tnapi->status_mapping, in tg3_alloc_consistent()
8836 if (!tnapi->hw_status) in tg3_alloc_consistent()
8839 sblk = tnapi->hw_status; in tg3_alloc_consistent()
8848 * other three rx return ring producer indexes. in tg3_alloc_consistent()
8852 prodptr = &sblk->idx[0].rx_producer; in tg3_alloc_consistent()
8855 prodptr = &sblk->rx_jumbo_consumer; in tg3_alloc_consistent()
8858 prodptr = &sblk->reserved; in tg3_alloc_consistent()
8861 prodptr = &sblk->rx_mini_consumer; in tg3_alloc_consistent()
8864 tnapi->rx_rcb_prod_idx = prodptr; in tg3_alloc_consistent()
8866 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; in tg3_alloc_consistent()
8877 return -ENOMEM; in tg3_alloc_consistent()
8883 * clears. tp->lock is held.
8912 if (pci_channel_offline(tp->pdev)) { in tg3_stop_block()
8913 dev_err(&tp->pdev->dev, in tg3_stop_block()
8917 return -ENODEV; in tg3_stop_block()
8927 dev_err(&tp->pdev->dev, in tg3_stop_block()
8930 return -ENODEV; in tg3_stop_block()
8936 /* tp->lock is held. */
8943 if (pci_channel_offline(tp->pdev)) { in tg3_abort_hw()
8944 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); in tg3_abort_hw()
8945 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; in tg3_abort_hw()
8946 err = -ENODEV; in tg3_abort_hw()
8950 tp->rx_mode &= ~RX_MODE_ENABLE; in tg3_abort_hw()
8951 tw32_f(MAC_RX_MODE, tp->rx_mode); in tg3_abort_hw()
8969 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; in tg3_abort_hw()
8970 tw32_f(MAC_MODE, tp->mac_mode); in tg3_abort_hw()
8973 tp->tx_mode &= ~TX_MODE_ENABLE; in tg3_abort_hw()
8974 tw32_f(MAC_TX_MODE, tp->tx_mode); in tg3_abort_hw()
8982 dev_err(&tp->pdev->dev, in tg3_abort_hw()
8985 err |= -ENODEV; in tg3_abort_hw()
8999 for (i = 0; i < tp->irq_cnt; i++) { in tg3_abort_hw()
9000 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_abort_hw()
9001 if (tnapi->hw_status) in tg3_abort_hw()
9002 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); in tg3_abort_hw()
9011 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); in tg3_save_pci_state()
9019 /* Re-enable indirect register accesses. */ in tg3_restore_pci_state()
9020 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, in tg3_restore_pci_state()
9021 tp->misc_host_ctrl); in tg3_restore_pci_state()
9033 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); in tg3_restore_pci_state()
9035 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); in tg3_restore_pci_state()
9038 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, in tg3_restore_pci_state()
9039 tp->pci_cacheline_sz); in tg3_restore_pci_state()
9040 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, in tg3_restore_pci_state()
9041 tp->pci_lat_timer); in tg3_restore_pci_state()
9044 /* Make sure PCI-X relaxed ordering bit is clear. */ in tg3_restore_pci_state()
9048 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, in tg3_restore_pci_state()
9051 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, in tg3_restore_pci_state()
9063 pci_read_config_word(tp->pdev, in tg3_restore_pci_state()
9064 tp->msi_cap + PCI_MSI_FLAGS, in tg3_restore_pci_state()
9066 pci_write_config_word(tp->pdev, in tg3_restore_pci_state()
9067 tp->msi_cap + PCI_MSI_FLAGS, in tg3_restore_pci_state()
9118 /* tp->lock is held. */
9120 __releases(tp->lock) in tg3_chip_reset()
9121 __acquires(tp->lock) in tg3_chip_reset()
9127 if (!pci_device_is_present(tp->pdev)) in tg3_chip_reset()
9128 return -ENODEV; in tg3_chip_reset()
9137 tp->nvram_lock_cnt = 0; in tg3_chip_reset()
9155 write_op = tp->write32; in tg3_chip_reset()
9157 tp->write32 = tg3_write32; in tg3_chip_reset()
9166 for (i = 0; i < tp->irq_cnt; i++) { in tg3_chip_reset()
9167 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_chip_reset()
9168 if (tnapi->hw_status) { in tg3_chip_reset()
9169 tnapi->hw_status->status = 0; in tg3_chip_reset()
9170 tnapi->hw_status->status_tag = 0; in tg3_chip_reset()
9172 tnapi->last_tag = 0; in tg3_chip_reset()
9173 tnapi->last_irq_tag = 0; in tg3_chip_reset()
9179 for (i = 0; i < tp->irq_cnt; i++) in tg3_chip_reset()
9180 synchronize_irq(tp->napi[i].irq_vec); in tg3_chip_reset()
9226 tp->write32 = write_op; in tg3_chip_reset()
9249 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); in tg3_chip_reset()
9253 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { in tg3_chip_reset()
9264 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); in tg3_chip_reset()
9265 pci_write_config_dword(tp->pdev, 0xc4, in tg3_chip_reset()
9277 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); in tg3_chip_reset()
9280 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, in tg3_chip_reset()
9305 * potentially defective internal ROM, stop the Rx RISC CPU, in tg3_chip_reset()
9316 tw32(GRC_MODE, tp->grc_mode); in tg3_chip_reset()
9324 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && in tg3_chip_reset()
9326 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; in tg3_chip_reset()
9328 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; in tg3_chip_reset()
9329 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); in tg3_chip_reset()
9332 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { in tg3_chip_reset()
9333 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; in tg3_chip_reset()
9334 val = tp->mac_mode; in tg3_chip_reset()
9335 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { in tg3_chip_reset()
9336 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; in tg3_chip_reset()
9337 val = tp->mac_mode; in tg3_chip_reset()
9370 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | in tg3_chip_reset()
9381 tp->last_event_jiffies = jiffies; in tg3_chip_reset()
9387 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; in tg3_chip_reset()
9389 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; in tg3_chip_reset()
9400 /* tp->lock is held. */
9417 if (tp->hw_stats) { in tg3_halt()
9419 tg3_get_nstats(tp, &tp->net_stats_prev); in tg3_halt()
9420 tg3_get_estats(tp, &tp->estats_prev); in tg3_halt()
9423 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); in tg3_halt()
9426 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_halt()
9428 tnapi->rx_dropped = 0; in tg3_halt()
9429 tnapi->tx_dropped = 0; in tg3_halt()
9443 if (!is_valid_ether_addr(addr->sa_data)) in tg3_set_mac_addr()
9444 return -EADDRNOTAVAIL; in tg3_set_mac_addr()
9446 eth_hw_addr_set(dev, addr->sa_data); in tg3_set_mac_addr()
9464 spin_lock_bh(&tp->lock); in tg3_set_mac_addr()
9467 spin_unlock_bh(&tp->lock); in tg3_set_mac_addr()
9472 /* tp->lock is held. */
9499 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); in tg3_coal_tx_init()
9500 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); in tg3_coal_tx_init()
9501 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); in tg3_coal_tx_init()
9507 for (; i < tp->txq_cnt; i++) { in tg3_coal_tx_init()
9511 tw32(reg, ec->tx_coalesce_usecs); in tg3_coal_tx_init()
9513 tw32(reg, ec->tx_max_coalesced_frames); in tg3_coal_tx_init()
9515 tw32(reg, ec->tx_max_coalesced_frames_irq); in tg3_coal_tx_init()
9519 for (; i < tp->irq_max - 1; i++) { in tg3_coal_tx_init()
9529 u32 limit = tp->rxq_cnt; in tg3_coal_rx_init()
9532 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); in tg3_coal_rx_init()
9533 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); in tg3_coal_rx_init()
9534 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); in tg3_coal_rx_init()
9535 limit--; in tg3_coal_rx_init()
9546 tw32(reg, ec->rx_coalesce_usecs); in tg3_coal_rx_init()
9548 tw32(reg, ec->rx_max_coalesced_frames); in tg3_coal_rx_init()
9550 tw32(reg, ec->rx_max_coalesced_frames_irq); in tg3_coal_rx_init()
9553 for (; i < tp->irq_max - 1; i++) { in tg3_coal_rx_init()
9566 u32 val = ec->stats_block_coalesce_usecs; in __tg3_set_coalesce()
9568 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); in __tg3_set_coalesce()
9569 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); in __tg3_set_coalesce()
9571 if (!tp->link_up) in __tg3_set_coalesce()
9578 /* tp->lock is held. */
9600 /* tp->lock is held. */
9609 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { in tg3_tx_rcbs_init()
9610 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_tx_rcbs_init()
9612 if (!tnapi->tx_ring) in tg3_tx_rcbs_init()
9615 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, in tg3_tx_rcbs_init()
9621 /* tp->lock is held. */
9644 /* tp->lock is held. */
9653 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { in tg3_rx_ret_rcbs_init()
9654 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_rx_ret_rcbs_init()
9656 if (!tnapi->rx_rcb) in tg3_rx_ret_rcbs_init()
9659 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, in tg3_rx_ret_rcbs_init()
9660 (tp->rx_ret_ring_mask + 1) << in tg3_rx_ret_rcbs_init()
9665 /* tp->lock is held. */
9670 struct tg3_napi *tnapi = &tp->napi[0]; in tg3_rings_reset()
9677 tw32_mailbox_f(tp->napi[0].int_mbox, 1); in tg3_rings_reset()
9678 tp->napi[0].chk_msi_cnt = 0; in tg3_rings_reset()
9679 tp->napi[0].last_rx_cons = 0; in tg3_rings_reset()
9680 tp->napi[0].last_tx_cons = 0; in tg3_rings_reset()
9684 for (i = 1; i < tp->irq_max; i++) { in tg3_rings_reset()
9685 tp->napi[i].tx_prod = 0; in tg3_rings_reset()
9686 tp->napi[i].tx_cons = 0; in tg3_rings_reset()
9688 tw32_mailbox(tp->napi[i].prodmbox, 0); in tg3_rings_reset()
9689 tw32_rx_mbox(tp->napi[i].consmbox, 0); in tg3_rings_reset()
9690 tw32_mailbox_f(tp->napi[i].int_mbox, 1); in tg3_rings_reset()
9691 tp->napi[i].chk_msi_cnt = 0; in tg3_rings_reset()
9692 tp->napi[i].last_rx_cons = 0; in tg3_rings_reset()
9693 tp->napi[i].last_tx_cons = 0; in tg3_rings_reset()
9696 tw32_mailbox(tp->napi[0].prodmbox, 0); in tg3_rings_reset()
9698 tp->napi[0].tx_prod = 0; in tg3_rings_reset()
9699 tp->napi[0].tx_cons = 0; in tg3_rings_reset()
9700 tw32_mailbox(tp->napi[0].prodmbox, 0); in tg3_rings_reset()
9701 tw32_rx_mbox(tp->napi[0].consmbox, 0); in tg3_rings_reset()
9704 /* Make sure the NIC-based send BD rings are disabled. */ in tg3_rings_reset()
9712 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); in tg3_rings_reset()
9716 ((u64) tnapi->status_mapping >> 32)); in tg3_rings_reset()
9718 ((u64) tnapi->status_mapping & 0xffffffff)); in tg3_rings_reset()
9722 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { in tg3_rings_reset()
9723 u64 mapping = (u64)tnapi->status_mapping; in tg3_rings_reset()
9729 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); in tg3_rings_reset()
9752 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); in tg3_setup_rxbd_thresholds()
9753 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); in tg3_setup_rxbd_thresholds()
9766 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); in tg3_setup_rxbd_thresholds()
9813 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | in __tg3_set_rx_mode()
9824 if (dev->flags & IFF_PROMISC) { in __tg3_set_rx_mode()
9827 } else if (dev->flags & IFF_ALLMULTI) { in __tg3_set_rx_mode()
9842 crc = calc_crc(ha->addr, ETH_ALEN); in __tg3_set_rx_mode()
9857 } else if (!(dev->flags & IFF_PROMISC)) { in __tg3_set_rx_mode()
9863 __tg3_set_one_mac_addr(tp, ha->addr, in __tg3_set_rx_mode()
9869 if (rx_mode != tp->rx_mode) { in __tg3_set_rx_mode()
9870 tp->rx_mode = rx_mode; in __tg3_set_rx_mode()
9881 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); in tg3_rss_init_dflt_indir_tbl()
9891 if (tp->rxq_cnt == 1) { in tg3_rss_check_indir_tbl()
9892 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); in tg3_rss_check_indir_tbl()
9898 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) in tg3_rss_check_indir_tbl()
9903 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); in tg3_rss_check_indir_tbl()
9912 u32 val = tp->rss_ind_tbl[i]; in tg3_rss_write_indir_tbl()
9916 val |= tp->rss_ind_tbl[i]; in tg3_rss_write_indir_tbl()
9931 /* tp->lock is held. */
9936 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; in tg3_reset_hw()
9947 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && in tg3_reset_hw()
9948 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { in tg3_reset_hw()
9951 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; in tg3_reset_hw()
9955 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) in tg3_reset_hw()
10069 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; in tg3_reset_hw()
10070 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); in tg3_reset_hw()
10116 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); in tg3_reset_hw()
10122 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); in tg3_reset_hw()
10125 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | in tg3_reset_hw()
10129 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; in tg3_reset_hw()
10131 /* Pseudo-header checksum is done by hardware logic and not in tg3_reset_hw()
10132 * the offload processers, so make the chip do the pseudo- in tg3_reset_hw()
10134 * convenient to do the pseudo-header checksum in software in tg3_reset_hw()
10137 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; in tg3_reset_hw()
10140 if (tp->rxptpctl) in tg3_reset_hw()
10142 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); in tg3_reset_hw()
10147 tw32(GRC_MODE, tp->grc_mode | val); in tg3_reset_hw()
10153 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && in tg3_reset_hw()
10154 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { in tg3_reset_hw()
10179 fw_len = tp->fw_len; in tg3_reset_hw()
10180 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); in tg3_reset_hw()
10184 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); in tg3_reset_hw()
10187 if (tp->dev->mtu <= ETH_DATA_LEN) { in tg3_reset_hw()
10189 tp->bufmgr_config.mbuf_read_dma_low_water); in tg3_reset_hw()
10191 tp->bufmgr_config.mbuf_mac_rx_low_water); in tg3_reset_hw()
10193 tp->bufmgr_config.mbuf_high_water); in tg3_reset_hw()
10196 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); in tg3_reset_hw()
10198 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); in tg3_reset_hw()
10200 tp->bufmgr_config.mbuf_high_water_jumbo); in tg3_reset_hw()
10203 tp->bufmgr_config.dma_low_water); in tg3_reset_hw()
10205 tp->bufmgr_config.dma_high_water); in tg3_reset_hw()
10222 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); in tg3_reset_hw()
10223 return -ENODEV; in tg3_reset_hw()
10232 * RCVDBDI_STD_BD: standard eth size rx ring in tg3_reset_hw()
10233 * RCVDBDI_JUMBO_BD: jumbo frame rx ring in tg3_reset_hw()
10234 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) in tg3_reset_hw()
10238 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | in tg3_reset_hw()
10249 ((u64) tpr->rx_std_mapping >> 32)); in tg3_reset_hw()
10251 ((u64) tpr->rx_std_mapping & 0xffffffff)); in tg3_reset_hw()
10269 ((u64) tpr->rx_jmb_mapping >> 32)); in tg3_reset_hw()
10271 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); in tg3_reset_hw()
10297 tpr->rx_std_prod_idx = tp->rx_pending; in tg3_reset_hw()
10298 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); in tg3_reset_hw()
10300 tpr->rx_jmb_prod_idx = in tg3_reset_hw()
10301 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; in tg3_reset_hw()
10302 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); in tg3_reset_hw()
10311 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); in tg3_reset_hw()
10365 tp->dma_limit = 0; in tg3_reset_hw()
10366 if (tp->dev->mtu <= ETH_DATA_LEN) { in tg3_reset_hw()
10368 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; in tg3_reset_hw()
10454 __tg3_set_coalesce(tp, &tp->coal); in tg3_reset_hw()
10462 ((u64) tp->stats_mapping >> 32)); in tg3_reset_hw()
10464 ((u64) tp->stats_mapping & 0xffffffff)); in tg3_reset_hw()
10478 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); in tg3_reset_hw()
10485 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { in tg3_reset_hw()
10486 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; in tg3_reset_hw()
10487 /* reset to prevent losing 1st rx packet intermittently */ in tg3_reset_hw()
10492 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | in tg3_reset_hw()
10496 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; in tg3_reset_hw()
10498 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && in tg3_reset_hw()
10500 tp->mac_mode |= MAC_MODE_LINK_POLARITY; in tg3_reset_hw()
10501 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); in tg3_reset_hw()
10504 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). in tg3_reset_hw()
10524 tp->grc_local_ctrl &= ~gpio_mask; in tg3_reset_hw()
10525 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; in tg3_reset_hw()
10529 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | in tg3_reset_hw()
10532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); in tg3_reset_hw()
10538 if (tp->irq_cnt > 1) in tg3_reset_hw()
10581 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, in tg3_reset_hw()
10590 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, in tg3_reset_hw()
10657 tp->tx_mode = TX_MODE_ENABLE; in tg3_reset_hw()
10661 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; in tg3_reset_hw()
10666 tp->tx_mode &= ~val; in tg3_reset_hw()
10667 tp->tx_mode |= tr32(MAC_TX_MODE) & val; in tg3_reset_hw()
10670 tw32_f(MAC_TX_MODE, tp->tx_mode); in tg3_reset_hw()
10684 tp->rx_mode = RX_MODE_ENABLE; in tg3_reset_hw()
10686 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; in tg3_reset_hw()
10689 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; in tg3_reset_hw()
10692 tp->rx_mode |= RX_MODE_RSS_ENABLE | in tg3_reset_hw()
10699 tw32_f(MAC_RX_MODE, tp->rx_mode); in tg3_reset_hw()
10702 tw32(MAC_LED_CTRL, tp->led_ctrl); in tg3_reset_hw()
10705 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { in tg3_reset_hw()
10709 tw32_f(MAC_RX_MODE, tp->rx_mode); in tg3_reset_hw()
10712 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { in tg3_reset_hw()
10714 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { in tg3_reset_hw()
10716 /* only if the signal pre-emphasis bit is not set */ in tg3_reset_hw()
10736 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { in tg3_reset_hw()
10737 /* Use hardware link auto-negotiation */ in tg3_reset_hw()
10741 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && in tg3_reset_hw()
10747 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; in tg3_reset_hw()
10748 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; in tg3_reset_hw()
10749 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); in tg3_reset_hw()
10753 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) in tg3_reset_hw()
10754 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; in tg3_reset_hw()
10760 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && in tg3_reset_hw()
10761 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { in tg3_reset_hw()
10773 __tg3_set_rx_mode(tp->dev); in tg3_reset_hw()
10786 limit -= 4; in tg3_reset_hw()
10846 * packet processing. Invoked with tp->lock held.
10873 if (ocir->signature != TG3_OCIR_SIG_MAGIC || in tg3_sd_scan_scratchpad()
10874 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) in tg3_sd_scan_scratchpad()
10887 spin_lock_bh(&tp->lock); in tg3_show_temp()
10888 tg3_ape_scratchpad_read(tp, &temperature, attr->index, in tg3_show_temp()
10890 spin_unlock_bh(&tp->lock); in tg3_show_temp()
10912 if (tp->hwmon_dev) { in tg3_hwmon_close()
10913 hwmon_device_unregister(tp->hwmon_dev); in tg3_hwmon_close()
10914 tp->hwmon_dev = NULL; in tg3_hwmon_close()
10922 struct pci_dev *pdev = tp->pdev; in tg3_hwmon_open()
10938 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", in tg3_hwmon_open()
10940 if (IS_ERR(tp->hwmon_dev)) { in tg3_hwmon_open()
10941 tp->hwmon_dev = NULL; in tg3_hwmon_open()
10942 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); in tg3_hwmon_open()
10953 (PSTAT)->low += __val; \
10954 if ((PSTAT)->low < __val) \
10955 (PSTAT)->high += 1; \
10960 struct tg3_hw_stats *sp = tp->hw_stats; in tg3_periodic_fetch_stats() local
10962 if (!tp->link_up) in tg3_periodic_fetch_stats()
10965 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); in tg3_periodic_fetch_stats()
10966 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); in tg3_periodic_fetch_stats()
10967 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); in tg3_periodic_fetch_stats()
10968 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); in tg3_periodic_fetch_stats()
10969 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); in tg3_periodic_fetch_stats()
10970 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); in tg3_periodic_fetch_stats()
10971 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); in tg3_periodic_fetch_stats()
10972 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); in tg3_periodic_fetch_stats()
10973 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); in tg3_periodic_fetch_stats()
10974 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); in tg3_periodic_fetch_stats()
10975 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); in tg3_periodic_fetch_stats()
10976 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); in tg3_periodic_fetch_stats()
10977 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); in tg3_periodic_fetch_stats()
10979 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + in tg3_periodic_fetch_stats()
10980 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { in tg3_periodic_fetch_stats()
10989 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); in tg3_periodic_fetch_stats()
10990 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); in tg3_periodic_fetch_stats()
10991 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); in tg3_periodic_fetch_stats()
10992 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); in tg3_periodic_fetch_stats()
10993 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); in tg3_periodic_fetch_stats()
10994 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); in tg3_periodic_fetch_stats()
10995 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); in tg3_periodic_fetch_stats()
10996 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); in tg3_periodic_fetch_stats()
10997 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); in tg3_periodic_fetch_stats()
10998 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); in tg3_periodic_fetch_stats()
10999 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); in tg3_periodic_fetch_stats()
11000 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); in tg3_periodic_fetch_stats()
11001 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); in tg3_periodic_fetch_stats()
11002 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); in tg3_periodic_fetch_stats()
11004 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); in tg3_periodic_fetch_stats()
11009 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); in tg3_periodic_fetch_stats()
11015 sp->rx_discards.low += val; in tg3_periodic_fetch_stats()
11016 if (sp->rx_discards.low < val) in tg3_periodic_fetch_stats()
11017 sp->rx_discards.high += 1; in tg3_periodic_fetch_stats()
11019 sp->mbuf_lwm_thresh_hit = sp->rx_discards; in tg3_periodic_fetch_stats()
11021 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); in tg3_periodic_fetch_stats()
11028 for (i = 0; i < tp->irq_cnt; i++) { in tg3_chk_missed_msi()
11029 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_chk_missed_msi()
11032 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && in tg3_chk_missed_msi()
11033 tnapi->last_tx_cons == tnapi->tx_cons) { in tg3_chk_missed_msi()
11034 if (tnapi->chk_msi_cnt < 1) { in tg3_chk_missed_msi()
11035 tnapi->chk_msi_cnt++; in tg3_chk_missed_msi()
11041 tnapi->chk_msi_cnt = 0; in tg3_chk_missed_msi()
11042 tnapi->last_rx_cons = tnapi->rx_rcb_ptr; in tg3_chk_missed_msi()
11043 tnapi->last_tx_cons = tnapi->tx_cons; in tg3_chk_missed_msi()
11051 spin_lock(&tp->lock); in tg3_timer()
11053 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { in tg3_timer()
11054 spin_unlock(&tp->lock); in tg3_timer()
11068 /* All of this garbage is because when using non-tagged in tg3_timer()
11072 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { in tg3_timer()
11074 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); in tg3_timer()
11076 tw32(HOSTCC_MODE, tp->coalesce_mode | in tg3_timer()
11081 spin_unlock(&tp->lock); in tg3_timer()
11088 if (!--tp->timer_counter) { in tg3_timer()
11092 if (tp->setlpicnt && !--tp->setlpicnt) in tg3_timer()
11102 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { in tg3_timer()
11114 if (tp->link_up && in tg3_timer()
11118 if (!tp->link_up && in tg3_timer()
11124 if (!tp->serdes_counter) { in tg3_timer()
11126 (tp->mac_mode & in tg3_timer()
11129 tw32_f(MAC_MODE, tp->mac_mode); in tg3_timer()
11134 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && in tg3_timer()
11142 if (link_up != tp->link_up) in tg3_timer()
11146 tp->timer_counter = tp->timer_multiplier; in tg3_timer()
11154 * that may be filled with rx packets destined for the host. in tg3_timer()
11166 if (!--tp->asf_counter) { in tg3_timer()
11178 tp->asf_counter = tp->asf_multiplier; in tg3_timer()
11184 spin_unlock(&tp->lock); in tg3_timer()
11187 tp->timer.expires = jiffies + tp->timer_offset; in tg3_timer()
11188 add_timer(&tp->timer); in tg3_timer()
11196 tp->timer_offset = HZ; in tg3_timer_init()
11198 tp->timer_offset = HZ / 10; in tg3_timer_init()
11200 BUG_ON(tp->timer_offset > HZ); in tg3_timer_init()
11202 tp->timer_multiplier = (HZ / tp->timer_offset); in tg3_timer_init()
11203 tp->asf_multiplier = (HZ / tp->timer_offset) * in tg3_timer_init()
11206 timer_setup(&tp->timer, tg3_timer, 0); in tg3_timer_init()
11211 tp->asf_counter = tp->asf_multiplier; in tg3_timer_start()
11212 tp->timer_counter = tp->timer_multiplier; in tg3_timer_start()
11214 tp->timer.expires = jiffies + tp->timer_offset; in tg3_timer_start()
11215 add_timer(&tp->timer); in tg3_timer_start()
11220 del_timer_sync(&tp->timer); in tg3_timer_stop()
11223 /* Restart hardware after configuration changes, self-test, etc.
11224 * Invoked with tp->lock held.
11227 __releases(tp->lock) in tg3_restart_hw()
11228 __acquires(tp->lock) in tg3_restart_hw()
11234 netdev_err(tp->dev, in tg3_restart_hw()
11235 "Failed to re-initialize device, aborting\n"); in tg3_restart_hw()
11239 tp->irq_sync = 0; in tg3_restart_hw()
11241 dev_close(tp->dev); in tg3_restart_hw()
11255 if (tp->pcierr_recovery || !netif_running(tp->dev) || in tg3_reset_task()
11256 tp->pdev->error_state != pci_channel_io_normal) { in tg3_reset_task()
11272 tp->write32_tx_mbox = tg3_write32_tx_mbox; in tg3_reset_task()
11273 tp->write32_rx_mbox = tg3_write_flush_reg32; in tg3_reset_task()
11282 tp->irq_sync = 0; in tg3_reset_task()
11288 dev_close(tp->dev); in tg3_reset_task()
11305 struct tg3_napi *tnapi = &tp->napi[irq_num]; in tg3_request_irq()
11307 if (tp->irq_cnt == 1) in tg3_request_irq()
11308 name = tp->dev->name; in tg3_request_irq()
11310 name = &tnapi->irq_lbl[0]; in tg3_request_irq()
11311 if (tnapi->tx_buffers && tnapi->rx_rcb) in tg3_request_irq()
11313 "%s-txrx-%d", tp->dev->name, irq_num); in tg3_request_irq()
11314 else if (tnapi->tx_buffers) in tg3_request_irq()
11316 "%s-tx-%d", tp->dev->name, irq_num); in tg3_request_irq()
11317 else if (tnapi->rx_rcb) in tg3_request_irq()
11319 "%s-rx-%d", tp->dev->name, irq_num); in tg3_request_irq()
11322 "%s-%d", tp->dev->name, irq_num); in tg3_request_irq()
11323 name[IFNAMSIZ-1] = 0; in tg3_request_irq()
11338 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); in tg3_request_irq()
11343 struct tg3_napi *tnapi = &tp->napi[0]; in tg3_test_interrupt()
11344 struct net_device *dev = tp->dev; in tg3_test_interrupt()
11349 return -ENODEV; in tg3_test_interrupt()
11353 free_irq(tnapi->irq_vec, tnapi); in tg3_test_interrupt()
11364 err = request_irq(tnapi->irq_vec, tg3_test_isr, in tg3_test_interrupt()
11365 IRQF_SHARED, dev->name, tnapi); in tg3_test_interrupt()
11369 tnapi->hw_status->status &= ~SD_STATUS_UPDATED; in tg3_test_interrupt()
11372 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | in tg3_test_interrupt()
11373 tnapi->coal_now); in tg3_test_interrupt()
11378 int_mbox = tr32_mailbox(tnapi->int_mbox); in tg3_test_interrupt()
11388 tnapi->hw_status->status_tag != tnapi->last_tag) in tg3_test_interrupt()
11389 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); in tg3_test_interrupt()
11396 free_irq(tnapi->irq_vec, tnapi); in tg3_test_interrupt()
11412 return -EIO; in tg3_test_interrupt()
11429 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); in tg3_test_msi()
11430 pci_write_config_word(tp->pdev, PCI_COMMAND, in tg3_test_msi()
11435 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); in tg3_test_msi()
11441 if (err != -EIO) in tg3_test_msi()
11445 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " in tg3_test_msi()
11449 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); in tg3_test_msi()
11451 pci_disable_msi(tp->pdev); in tg3_test_msi()
11454 tp->napi[0].irq_vec = tp->pdev->irq; in tg3_test_msi()
11471 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); in tg3_test_msi()
11480 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { in tg3_request_firmware()
11481 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", in tg3_request_firmware()
11482 tp->fw_needed); in tg3_request_firmware()
11483 return -ENOENT; in tg3_request_firmware()
11486 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; in tg3_request_firmware()
11493 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ in tg3_request_firmware()
11494 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { in tg3_request_firmware()
11495 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", in tg3_request_firmware()
11496 tp->fw_len, tp->fw_needed); in tg3_request_firmware()
11497 release_firmware(tp->fw); in tg3_request_firmware()
11498 tp->fw = NULL; in tg3_request_firmware()
11499 return -EINVAL; in tg3_request_firmware()
11503 tp->fw_needed = NULL; in tg3_request_firmware()
11509 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); in tg3_irq_count()
11512 /* We want as many rx rings enabled as there are cpus. in tg3_irq_count()
11513 * In multiqueue MSI-X mode, the first MSI-X vector in tg3_irq_count()
11517 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); in tg3_irq_count()
11528 tp->txq_cnt = tp->txq_req; in tg3_enable_msix()
11529 tp->rxq_cnt = tp->rxq_req; in tg3_enable_msix()
11530 if (!tp->rxq_cnt) in tg3_enable_msix()
11531 tp->rxq_cnt = netif_get_num_default_rss_queues(); in tg3_enable_msix()
11532 if (tp->rxq_cnt > tp->rxq_max) in tg3_enable_msix()
11533 tp->rxq_cnt = tp->rxq_max; in tg3_enable_msix()
11535 /* Disable multiple TX rings by default. Simple round-robin hardware in tg3_enable_msix()
11539 if (!tp->txq_req) in tg3_enable_msix()
11540 tp->txq_cnt = 1; in tg3_enable_msix()
11542 tp->irq_cnt = tg3_irq_count(tp); in tg3_enable_msix()
11544 for (i = 0; i < tp->irq_max; i++) { in tg3_enable_msix()
11549 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); in tg3_enable_msix()
11552 } else if (rc < tp->irq_cnt) { in tg3_enable_msix()
11553 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", in tg3_enable_msix()
11554 tp->irq_cnt, rc); in tg3_enable_msix()
11555 tp->irq_cnt = rc; in tg3_enable_msix()
11556 tp->rxq_cnt = max(rc - 1, 1); in tg3_enable_msix()
11557 if (tp->txq_cnt) in tg3_enable_msix()
11558 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); in tg3_enable_msix()
11561 for (i = 0; i < tp->irq_max; i++) in tg3_enable_msix()
11562 tp->napi[i].irq_vec = msix_ent[i].vector; in tg3_enable_msix()
11564 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { in tg3_enable_msix()
11565 pci_disable_msix(tp->pdev); in tg3_enable_msix()
11569 if (tp->irq_cnt == 1) in tg3_enable_msix()
11574 if (tp->txq_cnt > 1) in tg3_enable_msix()
11577 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); in tg3_enable_msix()
11589 netdev_warn(tp->dev, in tg3_ints_init()
11596 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) in tg3_ints_init()
11601 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) in tg3_ints_init()
11609 tp->irq_cnt = 1; in tg3_ints_init()
11610 tp->napi[0].irq_vec = tp->pdev->irq; in tg3_ints_init()
11613 if (tp->irq_cnt == 1) { in tg3_ints_init()
11614 tp->txq_cnt = 1; in tg3_ints_init()
11615 tp->rxq_cnt = 1; in tg3_ints_init()
11616 netif_set_real_num_tx_queues(tp->dev, 1); in tg3_ints_init()
11617 netif_set_real_num_rx_queues(tp->dev, 1); in tg3_ints_init()
11624 pci_disable_msix(tp->pdev); in tg3_ints_fini()
11626 pci_disable_msi(tp->pdev); in tg3_ints_fini()
11636 struct net_device *dev = tp->dev; in tg3_start()
11658 for (i = 0; i < tp->irq_cnt; i++) { in tg3_start()
11661 for (i--; i >= 0; i--) { in tg3_start()
11662 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_start()
11664 free_irq(tnapi->irq_vec, tnapi); in tg3_start()
11726 if (dev->features & NETIF_F_LOOPBACK) in tg3_start()
11727 tg3_set_loopback(dev, dev->features); in tg3_start()
11732 for (i = tp->irq_cnt - 1; i >= 0; i--) { in tg3_start()
11733 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_start()
11734 free_irq(tnapi->irq_vec, tnapi); in tg3_start()
11771 for (i = tp->irq_cnt - 1; i >= 0; i--) { in tg3_stop()
11772 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_stop()
11773 free_irq(tnapi->irq_vec, tnapi); in tg3_stop()
11788 if (tp->pcierr_recovery) { in tg3_open()
11791 return -EAGAIN; in tg3_open()
11794 if (tp->fw_needed) { in tg3_open()
11798 netdev_warn(tp->dev, "EEE capability disabled\n"); in tg3_open()
11799 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; in tg3_open()
11800 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { in tg3_open()
11801 netdev_warn(tp->dev, "EEE capability restored\n"); in tg3_open()
11802 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; in tg3_open()
11808 netdev_warn(tp->dev, "TSO capability disabled\n"); in tg3_open()
11811 netdev_notice(tp->dev, "TSO capability restored\n"); in tg3_open()
11830 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), in tg3_open()
11834 pci_set_power_state(tp->pdev, PCI_D3hot); in tg3_open()
11844 if (tp->pcierr_recovery) { in tg3_close()
11847 return -EAGAIN; in tg3_close()
11852 if (pci_device_is_present(tp->pdev)) { in tg3_close()
11862 return ((u64)val->high << 32) | ((u64)val->low); in get_stat64()
11867 struct tg3_hw_stats *hw_stats = tp->hw_stats; in tg3_calc_crc_errors()
11869 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && in tg3_calc_crc_errors()
11881 tp->phy_crc_errors += val; in tg3_calc_crc_errors()
11883 return tp->phy_crc_errors; in tg3_calc_crc_errors()
11886 return get_stat64(&hw_stats->rx_fcs_errors); in tg3_calc_crc_errors()
11890 estats->member = old_estats->member + \
11891 get_stat64(&hw_stats->member)
11895 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; in tg3_get_estats()
11896 struct tg3_hw_stats *hw_stats = tp->hw_stats; in tg3_get_estats()
11979 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; in tg3_get_nstats()
11980 struct tg3_hw_stats *hw_stats = tp->hw_stats; in tg3_get_nstats()
11985 stats->rx_packets = old_stats->rx_packets + in tg3_get_nstats()
11986 get_stat64(&hw_stats->rx_ucast_packets) + in tg3_get_nstats()
11987 get_stat64(&hw_stats->rx_mcast_packets) + in tg3_get_nstats()
11988 get_stat64(&hw_stats->rx_bcast_packets); in tg3_get_nstats()
11990 stats->tx_packets = old_stats->tx_packets + in tg3_get_nstats()
11991 get_stat64(&hw_stats->tx_ucast_packets) + in tg3_get_nstats()
11992 get_stat64(&hw_stats->tx_mcast_packets) + in tg3_get_nstats()
11993 get_stat64(&hw_stats->tx_bcast_packets); in tg3_get_nstats()
11995 stats->rx_bytes = old_stats->rx_bytes + in tg3_get_nstats()
11996 get_stat64(&hw_stats->rx_octets); in tg3_get_nstats()
11997 stats->tx_bytes = old_stats->tx_bytes + in tg3_get_nstats()
11998 get_stat64(&hw_stats->tx_octets); in tg3_get_nstats()
12000 stats->rx_errors = old_stats->rx_errors + in tg3_get_nstats()
12001 get_stat64(&hw_stats->rx_errors); in tg3_get_nstats()
12002 stats->tx_errors = old_stats->tx_errors + in tg3_get_nstats()
12003 get_stat64(&hw_stats->tx_errors) + in tg3_get_nstats()
12004 get_stat64(&hw_stats->tx_mac_errors) + in tg3_get_nstats()
12005 get_stat64(&hw_stats->tx_carrier_sense_errors) + in tg3_get_nstats()
12006 get_stat64(&hw_stats->tx_discards); in tg3_get_nstats()
12008 stats->multicast = old_stats->multicast + in tg3_get_nstats()
12009 get_stat64(&hw_stats->rx_mcast_packets); in tg3_get_nstats()
12010 stats->collisions = old_stats->collisions + in tg3_get_nstats()
12011 get_stat64(&hw_stats->tx_collisions); in tg3_get_nstats()
12013 stats->rx_length_errors = old_stats->rx_length_errors + in tg3_get_nstats()
12014 get_stat64(&hw_stats->rx_frame_too_long_errors) + in tg3_get_nstats()
12015 get_stat64(&hw_stats->rx_undersize_packets); in tg3_get_nstats()
12017 stats->rx_frame_errors = old_stats->rx_frame_errors + in tg3_get_nstats()
12018 get_stat64(&hw_stats->rx_align_errors); in tg3_get_nstats()
12019 stats->tx_aborted_errors = old_stats->tx_aborted_errors + in tg3_get_nstats()
12020 get_stat64(&hw_stats->tx_discards); in tg3_get_nstats()
12021 stats->tx_carrier_errors = old_stats->tx_carrier_errors + in tg3_get_nstats()
12022 get_stat64(&hw_stats->tx_carrier_sense_errors); in tg3_get_nstats()
12024 stats->rx_crc_errors = old_stats->rx_crc_errors + in tg3_get_nstats()
12027 stats->rx_missed_errors = old_stats->rx_missed_errors + in tg3_get_nstats()
12028 get_stat64(&hw_stats->rx_discards); in tg3_get_nstats()
12030 /* Aggregate per-queue counters. The per-queue counters are updated in tg3_get_nstats()
12031 * by a single writer, race-free. The result computed by this loop in tg3_get_nstats()
12038 rx_dropped = (unsigned long)(old_stats->rx_dropped); in tg3_get_nstats()
12039 tx_dropped = (unsigned long)(old_stats->tx_dropped); in tg3_get_nstats()
12041 for (i = 0; i < tp->irq_cnt; i++) { in tg3_get_nstats()
12042 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_get_nstats()
12044 rx_dropped += tnapi->rx_dropped; in tg3_get_nstats()
12045 tx_dropped += tnapi->tx_dropped; in tg3_get_nstats()
12048 stats->rx_dropped = rx_dropped; in tg3_get_nstats()
12049 stats->tx_dropped = tx_dropped; in tg3_get_nstats()
12062 regs->version = 0; in tg3_get_regs()
12066 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) in tg3_get_regs()
12080 return tp->nvram_size; in tg3_get_eeprom_len()
12092 return -EINVAL; in tg3_get_eeprom()
12094 offset = eeprom->offset; in tg3_get_eeprom()
12095 len = eeprom->len; in tg3_get_eeprom()
12096 eeprom->len = 0; in tg3_get_eeprom()
12098 eeprom->magic = TG3_EEPROM_MAGIC; in tg3_get_eeprom()
12116 b_count = 4 - b_offset; in tg3_get_eeprom()
12121 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); in tg3_get_eeprom()
12125 len -= b_count; in tg3_get_eeprom()
12127 eeprom->len += b_count; in tg3_get_eeprom()
12131 pd = &data[eeprom->len]; in tg3_get_eeprom()
12132 for (i = 0; i < (len - (len & 3)); i += 4) { in tg3_get_eeprom()
12136 i -= 4; in tg3_get_eeprom()
12137 eeprom->len += i; in tg3_get_eeprom()
12143 eeprom->len += i; in tg3_get_eeprom()
12144 ret = -EINTR; in tg3_get_eeprom()
12150 eeprom->len += i; in tg3_get_eeprom()
12154 pd = &data[eeprom->len]; in tg3_get_eeprom()
12156 b_offset = offset + len - b_count; in tg3_get_eeprom()
12161 eeprom->len += b_count; in tg3_get_eeprom()
12183 eeprom->magic != TG3_EEPROM_MAGIC) in tg3_set_eeprom()
12184 return -EINVAL; in tg3_set_eeprom()
12186 offset = eeprom->offset; in tg3_set_eeprom()
12187 len = eeprom->len; in tg3_set_eeprom()
12191 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); in tg3_set_eeprom()
12205 ret = tg3_nvram_read_be32(tp, offset+len-4, &end); in tg3_set_eeprom()
12214 return -ENOMEM; in tg3_set_eeprom()
12218 memcpy(buf+len-4, &end, 4); in tg3_set_eeprom()
12219 memcpy(buf + b_offset, data, eeprom->len); in tg3_set_eeprom()
12238 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_get_link_ksettings()
12239 return -EAGAIN; in tg3_get_link_ksettings()
12240 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_get_link_ksettings()
12248 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) in tg3_get_link_ksettings()
12252 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { in tg3_get_link_ksettings()
12258 cmd->base.port = PORT_TP; in tg3_get_link_ksettings()
12261 cmd->base.port = PORT_FIBRE; in tg3_get_link_ksettings()
12263 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in tg3_get_link_ksettings()
12266 advertising = tp->link_config.advertising; in tg3_get_link_ksettings()
12268 if (tp->link_config.flowctrl & FLOW_CTRL_RX) { in tg3_get_link_ksettings()
12269 if (tp->link_config.flowctrl & FLOW_CTRL_TX) { in tg3_get_link_ksettings()
12275 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { in tg3_get_link_ksettings()
12279 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in tg3_get_link_ksettings()
12282 if (netif_running(dev) && tp->link_up) { in tg3_get_link_ksettings()
12283 cmd->base.speed = tp->link_config.active_speed; in tg3_get_link_ksettings()
12284 cmd->base.duplex = tp->link_config.active_duplex; in tg3_get_link_ksettings()
12286 cmd->link_modes.lp_advertising, in tg3_get_link_ksettings()
12287 tp->link_config.rmt_adv); in tg3_get_link_ksettings()
12289 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { in tg3_get_link_ksettings()
12290 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) in tg3_get_link_ksettings()
12291 cmd->base.eth_tp_mdix = ETH_TP_MDI_X; in tg3_get_link_ksettings()
12293 cmd->base.eth_tp_mdix = ETH_TP_MDI; in tg3_get_link_ksettings()
12296 cmd->base.speed = SPEED_UNKNOWN; in tg3_get_link_ksettings()
12297 cmd->base.duplex = DUPLEX_UNKNOWN; in tg3_get_link_ksettings()
12298 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; in tg3_get_link_ksettings()
12300 cmd->base.phy_address = tp->phy_addr; in tg3_get_link_ksettings()
12301 cmd->base.autoneg = tp->link_config.autoneg; in tg3_get_link_ksettings()
12309 u32 speed = cmd->base.speed; in tg3_set_link_ksettings()
12314 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_set_link_ksettings()
12315 return -EAGAIN; in tg3_set_link_ksettings()
12316 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_set_link_ksettings()
12320 if (cmd->base.autoneg != AUTONEG_ENABLE && in tg3_set_link_ksettings()
12321 cmd->base.autoneg != AUTONEG_DISABLE) in tg3_set_link_ksettings()
12322 return -EINVAL; in tg3_set_link_ksettings()
12324 if (cmd->base.autoneg == AUTONEG_DISABLE && in tg3_set_link_ksettings()
12325 cmd->base.duplex != DUPLEX_FULL && in tg3_set_link_ksettings()
12326 cmd->base.duplex != DUPLEX_HALF) in tg3_set_link_ksettings()
12327 return -EINVAL; in tg3_set_link_ksettings()
12330 cmd->link_modes.advertising); in tg3_set_link_ksettings()
12332 if (cmd->base.autoneg == AUTONEG_ENABLE) { in tg3_set_link_ksettings()
12337 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) in tg3_set_link_ksettings()
12341 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) in tg3_set_link_ksettings()
12351 return -EINVAL; in tg3_set_link_ksettings()
12362 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { in tg3_set_link_ksettings()
12364 return -EINVAL; in tg3_set_link_ksettings()
12366 if (cmd->base.duplex != DUPLEX_FULL) in tg3_set_link_ksettings()
12367 return -EINVAL; in tg3_set_link_ksettings()
12371 return -EINVAL; in tg3_set_link_ksettings()
12377 tp->link_config.autoneg = cmd->base.autoneg; in tg3_set_link_ksettings()
12378 if (cmd->base.autoneg == AUTONEG_ENABLE) { in tg3_set_link_ksettings()
12379 tp->link_config.advertising = (advertising | in tg3_set_link_ksettings()
12381 tp->link_config.speed = SPEED_UNKNOWN; in tg3_set_link_ksettings()
12382 tp->link_config.duplex = DUPLEX_UNKNOWN; in tg3_set_link_ksettings()
12384 tp->link_config.advertising = 0; in tg3_set_link_ksettings()
12385 tp->link_config.speed = speed; in tg3_set_link_ksettings()
12386 tp->link_config.duplex = cmd->base.duplex; in tg3_set_link_ksettings()
12389 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; in tg3_set_link_ksettings()
12405 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); in tg3_get_drvinfo()
12406 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); in tg3_get_drvinfo()
12407 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); in tg3_get_drvinfo()
12414 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) in tg3_get_wol()
12415 wol->supported = WAKE_MAGIC; in tg3_get_wol()
12417 wol->supported = 0; in tg3_get_wol()
12418 wol->wolopts = 0; in tg3_get_wol()
12419 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) in tg3_get_wol()
12420 wol->wolopts = WAKE_MAGIC; in tg3_get_wol()
12421 memset(&wol->sopass, 0, sizeof(wol->sopass)); in tg3_get_wol()
12427 struct device *dp = &tp->pdev->dev; in tg3_set_wol()
12429 if (wol->wolopts & ~WAKE_MAGIC) in tg3_set_wol()
12430 return -EINVAL; in tg3_set_wol()
12431 if ((wol->wolopts & WAKE_MAGIC) && in tg3_set_wol()
12433 return -EINVAL; in tg3_set_wol()
12435 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); in tg3_set_wol()
12448 return tp->msg_enable; in tg3_get_msglevel()
12454 tp->msg_enable = value; in tg3_set_msglevel()
12463 return -EAGAIN; in tg3_nway_reset()
12465 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) in tg3_nway_reset()
12466 return -EINVAL; in tg3_nway_reset()
12471 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_nway_reset()
12472 return -EAGAIN; in tg3_nway_reset()
12473 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); in tg3_nway_reset()
12477 spin_lock_bh(&tp->lock); in tg3_nway_reset()
12478 r = -EINVAL; in tg3_nway_reset()
12482 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { in tg3_nway_reset()
12487 spin_unlock_bh(&tp->lock); in tg3_nway_reset()
12500 ering->rx_max_pending = tp->rx_std_ring_mask; in tg3_get_ringparam()
12502 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; in tg3_get_ringparam()
12504 ering->rx_jumbo_max_pending = 0; in tg3_get_ringparam()
12506 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; in tg3_get_ringparam()
12508 ering->rx_pending = tp->rx_pending; in tg3_get_ringparam()
12510 ering->rx_jumbo_pending = tp->rx_jumbo_pending; in tg3_get_ringparam()
12512 ering->rx_jumbo_pending = 0; in tg3_get_ringparam()
12514 ering->tx_pending = tp->napi[0].tx_pending; in tg3_get_ringparam()
12526 if ((ering->rx_pending > tp->rx_std_ring_mask) || in tg3_set_ringparam()
12527 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || in tg3_set_ringparam()
12528 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || in tg3_set_ringparam()
12529 (ering->tx_pending <= MAX_SKB_FRAGS) || in tg3_set_ringparam()
12531 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) in tg3_set_ringparam()
12532 return -EINVAL; in tg3_set_ringparam()
12542 tp->rx_pending = ering->rx_pending; in tg3_set_ringparam()
12545 tp->rx_pending > 63) in tg3_set_ringparam()
12546 tp->rx_pending = 63; in tg3_set_ringparam()
12549 tp->rx_jumbo_pending = ering->rx_jumbo_pending; in tg3_set_ringparam()
12551 for (i = 0; i < tp->irq_max; i++) in tg3_set_ringparam()
12552 tp->napi[i].tx_pending = ering->tx_pending; in tg3_set_ringparam()
12579 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); in tg3_get_pauseparam()
12581 if (tp->link_config.flowctrl & FLOW_CTRL_RX) in tg3_get_pauseparam()
12582 epause->rx_pause = 1; in tg3_get_pauseparam()
12584 epause->rx_pause = 0; in tg3_get_pauseparam()
12586 if (tp->link_config.flowctrl & FLOW_CTRL_TX) in tg3_get_pauseparam()
12587 epause->tx_pause = 1; in tg3_get_pauseparam()
12589 epause->tx_pause = 0; in tg3_get_pauseparam()
12598 if (tp->link_config.autoneg == AUTONEG_ENABLE) in tg3_set_pauseparam()
12604 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_set_pauseparam()
12607 return -EINVAL; in tg3_set_pauseparam()
12609 tp->link_config.flowctrl = 0; in tg3_set_pauseparam()
12610 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); in tg3_set_pauseparam()
12611 if (epause->rx_pause) { in tg3_set_pauseparam()
12612 tp->link_config.flowctrl |= FLOW_CTRL_RX; in tg3_set_pauseparam()
12614 if (epause->tx_pause) { in tg3_set_pauseparam()
12615 tp->link_config.flowctrl |= FLOW_CTRL_TX; in tg3_set_pauseparam()
12617 } else if (epause->tx_pause) { in tg3_set_pauseparam()
12618 tp->link_config.flowctrl |= FLOW_CTRL_TX; in tg3_set_pauseparam()
12621 if (epause->autoneg) in tg3_set_pauseparam()
12626 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { in tg3_set_pauseparam()
12627 if (phydev->autoneg) { in tg3_set_pauseparam()
12638 if (!epause->autoneg) in tg3_set_pauseparam()
12651 if (epause->autoneg) in tg3_set_pauseparam()
12655 if (epause->rx_pause) in tg3_set_pauseparam()
12656 tp->link_config.flowctrl |= FLOW_CTRL_RX; in tg3_set_pauseparam()
12658 tp->link_config.flowctrl &= ~FLOW_CTRL_RX; in tg3_set_pauseparam()
12659 if (epause->tx_pause) in tg3_set_pauseparam()
12660 tp->link_config.flowctrl |= FLOW_CTRL_TX; in tg3_set_pauseparam()
12662 tp->link_config.flowctrl &= ~FLOW_CTRL_TX; in tg3_set_pauseparam()
12680 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; in tg3_set_pauseparam()
12693 return -EOPNOTSUPP; in tg3_get_sset_count()
12703 return -EOPNOTSUPP; in tg3_get_rxnfc()
12705 switch (info->cmd) { in tg3_get_rxnfc()
12707 if (netif_running(tp->dev)) in tg3_get_rxnfc()
12708 info->data = tp->rxq_cnt; in tg3_get_rxnfc()
12710 info->data = num_online_cpus(); in tg3_get_rxnfc()
12711 if (info->data > TG3_RSS_MAX_NUM_QS) in tg3_get_rxnfc()
12712 info->data = TG3_RSS_MAX_NUM_QS; in tg3_get_rxnfc()
12718 return -EOPNOTSUPP; in tg3_get_rxnfc()
12738 rxfh->hfunc = ETH_RSS_HASH_TOP; in tg3_get_rxfh()
12739 if (!rxfh->indir) in tg3_get_rxfh()
12743 rxfh->indir[i] = tp->rss_ind_tbl[i]; in tg3_get_rxfh()
12757 if (rxfh->key || in tg3_set_rxfh()
12758 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in tg3_set_rxfh()
12759 rxfh->hfunc != ETH_RSS_HASH_TOP)) in tg3_set_rxfh()
12760 return -EOPNOTSUPP; in tg3_set_rxfh()
12762 if (!rxfh->indir) in tg3_set_rxfh()
12766 tp->rss_ind_tbl[i] = rxfh->indir[i]; in tg3_set_rxfh()
12787 channel->max_rx = tp->rxq_max; in tg3_get_channels()
12788 channel->max_tx = tp->txq_max; in tg3_get_channels()
12791 channel->rx_count = tp->rxq_cnt; in tg3_get_channels()
12792 channel->tx_count = tp->txq_cnt; in tg3_get_channels()
12794 if (tp->rxq_req) in tg3_get_channels()
12795 channel->rx_count = tp->rxq_req; in tg3_get_channels()
12797 channel->rx_count = min(deflt_qs, tp->rxq_max); in tg3_get_channels()
12799 if (tp->txq_req) in tg3_get_channels()
12800 channel->tx_count = tp->txq_req; in tg3_get_channels()
12802 channel->tx_count = min(deflt_qs, tp->txq_max); in tg3_get_channels()
12812 return -EOPNOTSUPP; in tg3_set_channels()
12814 if (channel->rx_count > tp->rxq_max || in tg3_set_channels()
12815 channel->tx_count > tp->txq_max) in tg3_set_channels()
12816 return -EINVAL; in tg3_set_channels()
12818 tp->rxq_req = channel->rx_count; in tg3_set_channels()
12819 tp->txq_req = channel->tx_count; in tg3_set_channels()
12873 tw32(MAC_LED_CTRL, tp->led_ctrl); in tg3_set_phys_id()
12885 if (tp->hw_stats) in tg3_get_ethtool_stats()
12931 /* The data is in little-endian format in NVRAM. in tg3_vpd_readblock()
12932 * Use the big-endian read routines to preserve in tg3_vpd_readblock()
12940 buf = pci_vpd_alloc(tp->pdev, vpdlen); in tg3_vpd_readblock()
12973 return -EIO; in tg3_test_nvram()
13000 return -EIO; in tg3_test_nvram()
13007 return -EIO; in tg3_test_nvram()
13011 return -ENOMEM; in tg3_test_nvram()
13013 err = -EIO; in tg3_test_nvram()
13045 err = -EIO; in tg3_test_nvram()
13079 err = -EIO; in tg3_test_nvram()
13092 err = -EIO; in tg3_test_nvram()
13108 return -ENOMEM; in tg3_test_nvram()
13126 if (!netif_running(tp->dev)) in tg3_test_link()
13127 return -ENODEV; in tg3_test_link()
13129 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) in tg3_test_link()
13135 if (tp->link_up) in tg3_test_link()
13142 return -EIO; in tg3_test_link()
13323 /* Determine the read-only value. */ in tg3_test_registers()
13326 /* Write zero to the register, then make sure the read-only bits in tg3_test_registers()
13333 /* Test the read-only and read/write bits. */ in tg3_test_registers()
13338 * make sure the read-only bits are not changed and the in tg3_test_registers()
13345 /* Test the read-only bits. */ in tg3_test_registers()
13360 netdev_err(tp->dev, in tg3_test_registers()
13363 return -EIO; in tg3_test_registers()
13379 return -EIO; in tg3_do_mem_test()
13489 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; in tg3_run_loopback()
13491 tnapi = &tp->napi[0]; in tg3_run_loopback()
13492 rnapi = &tp->napi[0]; in tg3_run_loopback()
13493 if (tp->irq_cnt > 1) { in tg3_run_loopback()
13495 rnapi = &tp->napi[1]; in tg3_run_loopback()
13497 tnapi = &tp->napi[1]; in tg3_run_loopback()
13499 coal_now = tnapi->coal_now | rnapi->coal_now; in tg3_run_loopback()
13501 err = -EIO; in tg3_run_loopback()
13504 skb = netdev_alloc_skb(tp->dev, tx_len); in tg3_run_loopback()
13506 return -ENOMEM; in tg3_run_loopback()
13509 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); in tg3_run_loopback()
13524 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); in tg3_run_loopback()
13528 iph->tot_len = htons((u16)(mss + hdr_len)); in tg3_run_loopback()
13539 th->check = 0; in tg3_run_loopback()
13570 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); in tg3_run_loopback()
13571 if (dma_mapping_error(&tp->pdev->dev, map)) { in tg3_run_loopback()
13573 return -EIO; in tg3_run_loopback()
13576 val = tnapi->tx_prod; in tg3_run_loopback()
13577 tnapi->tx_buffers[val].skb = skb; in tg3_run_loopback()
13578 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); in tg3_run_loopback()
13580 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | in tg3_run_loopback()
13581 rnapi->coal_now); in tg3_run_loopback()
13585 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; in tg3_run_loopback()
13590 tnapi->tx_buffers[val].skb = NULL; in tg3_run_loopback()
13592 return -EIO; in tg3_run_loopback()
13595 tnapi->tx_prod++; in tg3_run_loopback()
13600 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); in tg3_run_loopback()
13601 tr32_mailbox(tnapi->prodmbox); in tg3_run_loopback()
13607 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | in tg3_run_loopback()
13612 tx_idx = tnapi->hw_status->idx[0].tx_consumer; in tg3_run_loopback()
13613 rx_idx = rnapi->hw_status->idx[0].rx_producer; in tg3_run_loopback()
13614 if ((tx_idx == tnapi->tx_prod) && in tg3_run_loopback()
13619 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); in tg3_run_loopback()
13622 if (tx_idx != tnapi->tx_prod) in tg3_run_loopback()
13630 desc = &rnapi->rx_rcb[rx_start_idx++]; in tg3_run_loopback()
13631 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; in tg3_run_loopback()
13632 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; in tg3_run_loopback()
13634 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && in tg3_run_loopback()
13635 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) in tg3_run_loopback()
13638 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) in tg3_run_loopback()
13639 - ETH_FCS_LEN; in tg3_run_loopback()
13645 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { in tg3_run_loopback()
13652 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && in tg3_run_loopback()
13653 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) in tg3_run_loopback()
13659 rx_data = tpr->rx_std_buffers[desc_idx].data; in tg3_run_loopback()
13660 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], in tg3_run_loopback()
13663 rx_data = tpr->rx_jmb_buffers[desc_idx].data; in tg3_run_loopback()
13664 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], in tg3_run_loopback()
13669 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, in tg3_run_loopback()
13696 int err = -EIO; in tg3_test_loopback()
13700 if (tp->dma_limit) in tg3_test_loopback()
13701 jmb_pkt_sz = tp->dma_limit - ETH_HLEN; in tg3_test_loopback()
13703 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; in tg3_test_loopback()
13704 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; in tg3_test_loopback()
13706 if (!netif_running(tp->dev)) { in tg3_test_loopback()
13726 /* Reroute all rx packets to the 1st queue */ in tg3_test_loopback()
13732 /* HW errata - mac loopback fails in some cases on 5780. in tg3_test_loopback()
13751 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && in tg3_test_loopback()
13795 /* Re-enable gphy autopowerdown. */ in tg3_test_loopback()
13796 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) in tg3_test_loopback()
13801 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; in tg3_test_loopback()
13804 tp->phy_flags |= eee_cap; in tg3_test_loopback()
13813 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; in tg3_self_test()
13815 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { in tg3_self_test()
13817 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13827 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13831 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13834 if (etest->flags & ETH_TEST_FL_OFFLINE) { in tg3_self_test()
13852 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) in tg3_self_test()
13856 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13861 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13866 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; in tg3_self_test()
13869 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13874 etest->flags |= ETH_TEST_FL_FAILED; in tg3_self_test()
13893 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) in tg3_self_test()
13904 return -EOPNOTSUPP; in tg3_hwtstamp_set()
13906 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) in tg3_hwtstamp_set()
13907 return -EFAULT; in tg3_hwtstamp_set()
13911 return -ERANGE; in tg3_hwtstamp_set()
13915 tp->rxptpctl = 0; in tg3_hwtstamp_set()
13918 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | in tg3_hwtstamp_set()
13922 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | in tg3_hwtstamp_set()
13926 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | in tg3_hwtstamp_set()
13930 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | in tg3_hwtstamp_set()
13934 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | in tg3_hwtstamp_set()
13938 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | in tg3_hwtstamp_set()
13942 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | in tg3_hwtstamp_set()
13946 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | in tg3_hwtstamp_set()
13950 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | in tg3_hwtstamp_set()
13954 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | in tg3_hwtstamp_set()
13958 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | in tg3_hwtstamp_set()
13962 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | in tg3_hwtstamp_set()
13966 return -ERANGE; in tg3_hwtstamp_set()
13969 if (netif_running(dev) && tp->rxptpctl) in tg3_hwtstamp_set()
13971 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); in tg3_hwtstamp_set()
13978 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? in tg3_hwtstamp_set()
13979 -EFAULT : 0; in tg3_hwtstamp_set()
13988 return -EOPNOTSUPP; in tg3_hwtstamp_get()
13994 switch (tp->rxptpctl) { in tg3_hwtstamp_get()
14036 return -ERANGE; in tg3_hwtstamp_get()
14039 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? in tg3_hwtstamp_get()
14040 -EFAULT : 0; in tg3_hwtstamp_get()
14051 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) in tg3_ioctl()
14052 return -EAGAIN; in tg3_ioctl()
14053 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); in tg3_ioctl()
14059 data->phy_id = tp->phy_addr; in tg3_ioctl()
14065 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) in tg3_ioctl()
14069 return -EAGAIN; in tg3_ioctl()
14071 spin_lock_bh(&tp->lock); in tg3_ioctl()
14072 err = __tg3_readphy(tp, data->phy_id & 0x1f, in tg3_ioctl()
14073 data->reg_num & 0x1f, &mii_regval); in tg3_ioctl()
14074 spin_unlock_bh(&tp->lock); in tg3_ioctl()
14076 data->val_out = mii_regval; in tg3_ioctl()
14082 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) in tg3_ioctl()
14086 return -EAGAIN; in tg3_ioctl()
14088 spin_lock_bh(&tp->lock); in tg3_ioctl()
14089 err = __tg3_writephy(tp, data->phy_id & 0x1f, in tg3_ioctl()
14090 data->reg_num & 0x1f, data->val_in); in tg3_ioctl()
14091 spin_unlock_bh(&tp->lock); in tg3_ioctl()
14105 return -EOPNOTSUPP; in tg3_ioctl()
14115 memcpy(ec, &tp->coal, sizeof(*ec)); in tg3_get_coalesce()
14135 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || in tg3_set_coalesce()
14136 (!ec->rx_coalesce_usecs) || in tg3_set_coalesce()
14137 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || in tg3_set_coalesce()
14138 (!ec->tx_coalesce_usecs) || in tg3_set_coalesce()
14139 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || in tg3_set_coalesce()
14140 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || in tg3_set_coalesce()
14141 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || in tg3_set_coalesce()
14142 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || in tg3_set_coalesce()
14143 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || in tg3_set_coalesce()
14144 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || in tg3_set_coalesce()
14145 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || in tg3_set_coalesce()
14146 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) in tg3_set_coalesce()
14147 return -EINVAL; in tg3_set_coalesce()
14150 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; in tg3_set_coalesce()
14151 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; in tg3_set_coalesce()
14152 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in tg3_set_coalesce()
14153 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; in tg3_set_coalesce()
14154 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; in tg3_set_coalesce()
14155 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; in tg3_set_coalesce()
14156 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; in tg3_set_coalesce()
14157 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; in tg3_set_coalesce()
14158 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; in tg3_set_coalesce()
14162 __tg3_set_coalesce(tp, &tp->coal); in tg3_set_coalesce()
14172 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { in tg3_set_eee()
14173 netdev_warn(tp->dev, "Board does not support EEE!\n"); in tg3_set_eee()
14174 return -EOPNOTSUPP; in tg3_set_eee()
14177 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) { in tg3_set_eee()
14178 netdev_warn(tp->dev, in tg3_set_eee()
14180 return -EINVAL; in tg3_set_eee()
14183 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { in tg3_set_eee()
14184 netdev_warn(tp->dev, in tg3_set_eee()
14187 return -EINVAL; in tg3_set_eee()
14190 tp->eee.eee_enabled = edata->eee_enabled; in tg3_set_eee()
14191 tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled; in tg3_set_eee()
14192 tp->eee.tx_lpi_timer = edata->tx_lpi_timer; in tg3_set_eee()
14194 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; in tg3_set_eee()
14197 if (netif_running(tp->dev)) { in tg3_set_eee()
14211 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { in tg3_get_eee()
14212 netdev_warn(tp->dev, in tg3_get_eee()
14214 return -EOPNOTSUPP; in tg3_get_eee()
14217 *edata = tp->eee; in tg3_get_eee()
14268 spin_lock_bh(&tp->lock); in tg3_get_stats64()
14269 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { in tg3_get_stats64()
14270 *stats = tp->net_stats_prev; in tg3_get_stats64()
14271 spin_unlock_bh(&tp->lock); in tg3_get_stats64()
14276 spin_unlock_bh(&tp->lock); in tg3_get_stats64()
14294 WRITE_ONCE(dev->mtu, new_mtu); in tg3_set_mtu()
14380 tp->nvram_size = EEPROM_CHIP_SIZE; in tg3_get_eeprom_size()
14397 while (cursize < tp->nvram_size) { in tg3_get_eeprom_size()
14407 tp->nvram_size = cursize; in tg3_get_eeprom_size()
14426 * 16-bit value at offset 0xf2. The tg3_nvram_read() in tg3_get_nvram_size()
14430 * want will always reside in the lower 16-bits. in tg3_get_nvram_size()
14433 * opposite the endianness of the CPU. The 16-bit in tg3_get_nvram_size()
14436 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; in tg3_get_nvram_size()
14440 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_nvram_size()
14459 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_nvram_info()
14460 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; in tg3_get_nvram_info()
14464 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_nvram_info()
14465 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; in tg3_get_nvram_info()
14468 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_nvram_info()
14469 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_nvram_info()
14473 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_nvram_info()
14474 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; in tg3_get_nvram_info()
14478 tp->nvram_jedecnum = JEDEC_SAIFUN; in tg3_get_nvram_info()
14479 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; in tg3_get_nvram_info()
14483 tp->nvram_jedecnum = JEDEC_SST; in tg3_get_nvram_info()
14484 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; in tg3_get_nvram_info()
14488 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_nvram_info()
14489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; in tg3_get_nvram_info()
14498 tp->nvram_pagesize = 256; in tg3_nvram_get_pagesize()
14501 tp->nvram_pagesize = 512; in tg3_nvram_get_pagesize()
14504 tp->nvram_pagesize = 1024; in tg3_nvram_get_pagesize()
14507 tp->nvram_pagesize = 2048; in tg3_nvram_get_pagesize()
14510 tp->nvram_pagesize = 4096; in tg3_nvram_get_pagesize()
14513 tp->nvram_pagesize = 264; in tg3_nvram_get_pagesize()
14516 tp->nvram_pagesize = 528; in tg3_nvram_get_pagesize()
14534 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5752_nvram_info()
14538 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5752_nvram_info()
14545 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5752_nvram_info()
14555 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_5752_nvram_info()
14580 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5755_nvram_info()
14583 tp->nvram_pagesize = 264; in tg3_get_5755_nvram_info()
14586 tp->nvram_size = (protect ? 0x3e200 : in tg3_get_5755_nvram_info()
14589 tp->nvram_size = (protect ? 0x1f200 : in tg3_get_5755_nvram_info()
14592 tp->nvram_size = (protect ? 0x1f200 : in tg3_get_5755_nvram_info()
14598 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5755_nvram_info()
14601 tp->nvram_pagesize = 256; in tg3_get_5755_nvram_info()
14603 tp->nvram_size = (protect ? in tg3_get_5755_nvram_info()
14607 tp->nvram_size = (protect ? in tg3_get_5755_nvram_info()
14611 tp->nvram_size = (protect ? in tg3_get_5755_nvram_info()
14629 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5787_nvram_info()
14631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_5787_nvram_info()
14640 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5787_nvram_info()
14643 tp->nvram_pagesize = 264; in tg3_get_5787_nvram_info()
14648 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5787_nvram_info()
14651 tp->nvram_pagesize = 256; in tg3_get_5787_nvram_info()
14678 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5761_nvram_info()
14682 tp->nvram_pagesize = 256; in tg3_get_5761_nvram_info()
14692 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5761_nvram_info()
14695 tp->nvram_pagesize = 256; in tg3_get_5761_nvram_info()
14700 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); in tg3_get_5761_nvram_info()
14707 tp->nvram_size = TG3_NVRAM_SIZE_2MB; in tg3_get_5761_nvram_info()
14713 tp->nvram_size = TG3_NVRAM_SIZE_1MB; in tg3_get_5761_nvram_info()
14719 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_5761_nvram_info()
14725 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_5761_nvram_info()
14733 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5906_nvram_info()
14735 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_5906_nvram_info()
14747 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_57780_nvram_info()
14749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_57780_nvram_info()
14761 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_57780_nvram_info()
14769 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_57780_nvram_info()
14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_57780_nvram_info()
14777 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_57780_nvram_info()
14784 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_57780_nvram_info()
14790 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_57780_nvram_info()
14793 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_57780_nvram_info()
14796 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_57780_nvram_info()
14806 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) in tg3_get_57780_nvram_info()
14820 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5717_nvram_info()
14822 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_5717_nvram_info()
14834 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5717_nvram_info()
14844 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_5717_nvram_info()
14847 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_5717_nvram_info()
14861 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5717_nvram_info()
14872 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_5717_nvram_info()
14875 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_5717_nvram_info()
14885 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) in tg3_get_5717_nvram_info()
14908 tp->nvram_pagesize = 4096; in tg3_get_5720_nvram_info()
14909 tp->nvram_jedecnum = JEDEC_MACRONIX; in tg3_get_5720_nvram_info()
14914 tp->nvram_size = in tg3_get_5720_nvram_info()
14938 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5720_nvram_info()
14944 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; in tg3_get_5720_nvram_info()
14946 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; in tg3_get_5720_nvram_info()
14960 tp->nvram_jedecnum = JEDEC_ATMEL; in tg3_get_5720_nvram_info()
14968 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_5720_nvram_info()
14973 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_5720_nvram_info()
14977 tp->nvram_size = TG3_NVRAM_SIZE_1MB; in tg3_get_5720_nvram_info()
14981 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_5720_nvram_info()
15003 tp->nvram_jedecnum = JEDEC_ST; in tg3_get_5720_nvram_info()
15012 tp->nvram_size = TG3_NVRAM_SIZE_256KB; in tg3_get_5720_nvram_info()
15018 tp->nvram_size = TG3_NVRAM_SIZE_512KB; in tg3_get_5720_nvram_info()
15024 tp->nvram_size = TG3_NVRAM_SIZE_1MB; in tg3_get_5720_nvram_info()
15028 tp->nvram_size = TG3_NVRAM_SIZE_128KB; in tg3_get_5720_nvram_info()
15038 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) in tg3_get_5720_nvram_info()
15081 netdev_warn(tp->dev, in tg3_nvram_init()
15088 tp->nvram_size = 0; in tg3_nvram_init()
15114 if (tp->nvram_size == 0) in tg3_nvram_init()
15203 tp->pdev->subsystem_vendor) && in tg3_lookup_by_subsys()
15205 tp->pdev->subsystem_device)) in tg3_lookup_by_subsys()
15215 tp->phy_id = TG3_PHY_ID_INVALID; in tg3_get_eeprom_hw_cfg()
15216 tp->led_ctrl = LED_CTRL_MODE_PHY_1; in tg3_get_eeprom_hw_cfg()
15233 device_set_wakeup_enable(&tp->pdev->dev, true); in tg3_get_eeprom_hw_cfg()
15246 tp->nic_sram_data_cfg = nic_cfg; in tg3_get_eeprom_hw_cfg()
15279 tp->phy_id = eeprom_phy_id; in tg3_get_eeprom_hw_cfg()
15282 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; in tg3_get_eeprom_hw_cfg()
15284 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; in tg3_get_eeprom_hw_cfg()
15296 tp->led_ctrl = LED_CTRL_MODE_PHY_1; in tg3_get_eeprom_hw_cfg()
15300 tp->led_ctrl = LED_CTRL_MODE_PHY_2; in tg3_get_eeprom_hw_cfg()
15304 tp->led_ctrl = LED_CTRL_MODE_MAC; in tg3_get_eeprom_hw_cfg()
15311 tp->led_ctrl = LED_CTRL_MODE_PHY_1; in tg3_get_eeprom_hw_cfg()
15316 tp->led_ctrl = LED_CTRL_MODE_SHARED; in tg3_get_eeprom_hw_cfg()
15319 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | in tg3_get_eeprom_hw_cfg()
15324 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | in tg3_get_eeprom_hw_cfg()
15330 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; in tg3_get_eeprom_hw_cfg()
15334 tp->led_ctrl = LED_CTRL_MODE_COMBO; in tg3_get_eeprom_hw_cfg()
15336 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | in tg3_get_eeprom_hw_cfg()
15344 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) in tg3_get_eeprom_hw_cfg()
15345 tp->led_ctrl = LED_CTRL_MODE_PHY_2; in tg3_get_eeprom_hw_cfg()
15348 tp->led_ctrl = LED_CTRL_MODE_PHY_1; in tg3_get_eeprom_hw_cfg()
15352 if ((tp->pdev->subsystem_vendor == in tg3_get_eeprom_hw_cfg()
15354 (tp->pdev->subsystem_device == 0x205a || in tg3_get_eeprom_hw_cfg()
15355 tp->pdev->subsystem_device == 0x2063)) in tg3_get_eeprom_hw_cfg()
15372 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && in tg3_get_eeprom_hw_cfg()
15379 device_set_wakeup_enable(&tp->pdev->dev, true); in tg3_get_eeprom_hw_cfg()
15383 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; in tg3_get_eeprom_hw_cfg()
15385 /* serdes signal pre-emphasis in register 0x590 set by */ in tg3_get_eeprom_hw_cfg()
15388 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; in tg3_get_eeprom_hw_cfg()
15394 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; in tg3_get_eeprom_hw_cfg()
15405 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; in tg3_get_eeprom_hw_cfg()
15407 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; in tg3_get_eeprom_hw_cfg()
15418 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; in tg3_get_eeprom_hw_cfg()
15422 device_set_wakeup_enable(&tp->pdev->dev, in tg3_get_eeprom_hw_cfg()
15425 device_set_wakeup_capable(&tp->pdev->dev, false); in tg3_get_eeprom_hw_cfg()
15458 return -EBUSY; in tg3_ape_otp_read()
15477 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; in tg3_issue_otp_command()
15481 * configuration is a 32-bit value that straddles the alignment boundary.
15482 * We do two 32-bit reads and then shift and merge the results.
15514 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { in tg3_phy_init_link_config()
15515 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) in tg3_phy_init_link_config()
15520 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) in tg3_phy_init_link_config()
15529 tp->link_config.advertising = adv; in tg3_phy_init_link_config()
15530 tp->link_config.speed = SPEED_UNKNOWN; in tg3_phy_init_link_config()
15531 tp->link_config.duplex = DUPLEX_UNKNOWN; in tg3_phy_init_link_config()
15532 tp->link_config.autoneg = AUTONEG_ENABLE; in tg3_phy_init_link_config()
15533 tp->link_config.active_speed = SPEED_UNKNOWN; in tg3_phy_init_link_config()
15534 tp->link_config.active_duplex = DUPLEX_UNKNOWN; in tg3_phy_init_link_config()
15536 tp->old_link = -1; in tg3_phy_init_link_config()
15547 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; in tg3_phy_probe()
15550 switch (tp->pci_fn) { in tg3_phy_probe()
15552 tp->phy_ape_lock = TG3_APE_LOCK_PHY0; in tg3_phy_probe()
15555 tp->phy_ape_lock = TG3_APE_LOCK_PHY1; in tg3_phy_probe()
15558 tp->phy_ape_lock = TG3_APE_LOCK_PHY2; in tg3_phy_probe()
15561 tp->phy_ape_lock = TG3_APE_LOCK_PHY3; in tg3_phy_probe()
15567 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && in tg3_phy_probe()
15568 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) in tg3_phy_probe()
15569 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | in tg3_phy_probe()
15584 * to either the hard-coded table based PHY_ID and failing in tg3_phy_probe()
15598 tp->phy_id = hw_phy_id; in tg3_phy_probe()
15600 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; in tg3_phy_probe()
15602 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; in tg3_phy_probe()
15604 if (tp->phy_id != TG3_PHY_ID_INVALID) { in tg3_phy_probe()
15616 tp->phy_id = p->phy_id; in tg3_phy_probe()
15625 return -ENODEV; in tg3_phy_probe()
15628 if (!tp->phy_id || in tg3_phy_probe()
15629 tp->phy_id == TG3_PHY_ID_BCM8002) in tg3_phy_probe()
15630 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; in tg3_phy_probe()
15634 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && in tg3_phy_probe()
15643 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; in tg3_phy_probe()
15645 linkmode_zero(tp->eee.supported); in tg3_phy_probe()
15647 tp->eee.supported); in tg3_phy_probe()
15649 tp->eee.supported); in tg3_phy_probe()
15650 linkmode_copy(tp->eee.advertised, tp->eee.supported); in tg3_phy_probe()
15652 tp->eee.eee_enabled = 1; in tg3_phy_probe()
15653 tp->eee.tx_lpi_enabled = 1; in tg3_phy_probe()
15654 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; in tg3_phy_probe()
15659 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && in tg3_phy_probe()
15660 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && in tg3_phy_probe()
15677 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, in tg3_phy_probe()
15678 tp->link_config.flowctrl); in tg3_phy_probe()
15686 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { in tg3_phy_probe()
15720 memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); in tg3_read_vpd()
15721 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i); in tg3_read_vpd()
15732 memcpy(tp->board_part_number, &vpd_data[i], len); in tg3_read_vpd()
15736 if (tp->board_part_number[0]) in tg3_read_vpd()
15741 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || in tg3_read_vpd()
15742 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) in tg3_read_vpd()
15743 strcpy(tp->board_part_number, "BCM5717"); in tg3_read_vpd()
15744 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) in tg3_read_vpd()
15745 strcpy(tp->board_part_number, "BCM5718"); in tg3_read_vpd()
15749 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) in tg3_read_vpd()
15750 strcpy(tp->board_part_number, "BCM57780"); in tg3_read_vpd()
15751 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) in tg3_read_vpd()
15752 strcpy(tp->board_part_number, "BCM57760"); in tg3_read_vpd()
15753 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) in tg3_read_vpd()
15754 strcpy(tp->board_part_number, "BCM57790"); in tg3_read_vpd()
15755 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) in tg3_read_vpd()
15756 strcpy(tp->board_part_number, "BCM57788"); in tg3_read_vpd()
15760 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) in tg3_read_vpd()
15761 strcpy(tp->board_part_number, "BCM57761"); in tg3_read_vpd()
15762 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) in tg3_read_vpd()
15763 strcpy(tp->board_part_number, "BCM57765"); in tg3_read_vpd()
15764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) in tg3_read_vpd()
15765 strcpy(tp->board_part_number, "BCM57781"); in tg3_read_vpd()
15766 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) in tg3_read_vpd()
15767 strcpy(tp->board_part_number, "BCM57785"); in tg3_read_vpd()
15768 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) in tg3_read_vpd()
15769 strcpy(tp->board_part_number, "BCM57791"); in tg3_read_vpd()
15770 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) in tg3_read_vpd()
15771 strcpy(tp->board_part_number, "BCM57795"); in tg3_read_vpd()
15775 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) in tg3_read_vpd()
15776 strcpy(tp->board_part_number, "BCM57762"); in tg3_read_vpd()
15777 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) in tg3_read_vpd()
15778 strcpy(tp->board_part_number, "BCM57766"); in tg3_read_vpd()
15779 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) in tg3_read_vpd()
15780 strcpy(tp->board_part_number, "BCM57782"); in tg3_read_vpd()
15781 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) in tg3_read_vpd()
15782 strcpy(tp->board_part_number, "BCM57786"); in tg3_read_vpd()
15786 strcpy(tp->board_part_number, "BCM95906"); in tg3_read_vpd()
15789 strcpy(tp->board_part_number, "none"); in tg3_read_vpd()
15829 dst_off = strlen(tp->fw_ver); in tg3_read_bc_ver()
15832 if (TG3_VER_SIZE - dst_off < 16 || in tg3_read_bc_ver()
15836 offset = offset + ver_offset - start; in tg3_read_bc_ver()
15842 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); in tg3_read_bc_ver()
15853 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, in tg3_read_bc_ver()
15871 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); in tg3_read_hwsb_ver()
15878 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); in tg3_read_sb_ver()
15918 offset = strlen(tp->fw_ver); in tg3_read_sb_ver()
15919 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, in tg3_read_sb_ver()
15923 offset = strlen(tp->fw_ver); in tg3_read_sb_ver()
15924 if (offset < TG3_VER_SIZE - 1) in tg3_read_sb_ver()
15925 tp->fw_ver[offset] = 'a' + build - 1; in tg3_read_sb_ver()
15949 else if (tg3_nvram_read(tp, offset - 4, &start)) in tg3_read_mgmtfw_ver()
15957 offset += val - start; in tg3_read_mgmtfw_ver()
15959 vlen = strlen(tp->fw_ver); in tg3_read_mgmtfw_ver()
15961 tp->fw_ver[vlen++] = ','; in tg3_read_mgmtfw_ver()
15962 tp->fw_ver[vlen++] = ' '; in tg3_read_mgmtfw_ver()
15971 if (vlen > TG3_VER_SIZE - sizeof(v)) { in tg3_read_mgmtfw_ver()
15972 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); in tg3_read_mgmtfw_ver()
15976 memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); in tg3_read_mgmtfw_ver()
16007 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) in tg3_read_dash_ver()
16012 vlen = strlen(tp->fw_ver); in tg3_read_dash_ver()
16014 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", in tg3_read_dash_ver()
16042 vlen = strlen(tp->fw_ver); in tg3_read_otp_ver()
16043 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); in tg3_read_otp_ver()
16052 if (tp->fw_ver[0] != 0) in tg3_read_fw_ver()
16056 strcat(tp->fw_ver, "sb"); in tg3_read_fw_ver()
16081 tp->fw_ver[TG3_VER_SIZE - 1] = 0; in tg3_read_fw_ver()
16104 unsigned int func, devnr = tp->pdev->devfn & ~7; in tg3_find_peer()
16107 peer = pci_get_slot(tp->pdev->bus, devnr | func); in tg3_find_peer()
16108 if (peer && peer != tp->pdev) in tg3_find_peer()
16112 /* 5704 can be configured in single-port mode, set peer to in tg3_find_peer()
16113 * tp->pdev in that case. in tg3_find_peer()
16116 peer = tp->pdev; in tg3_find_peer()
16131 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; in tg3_detect_asic_rev()
16140 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || in tg3_detect_asic_rev()
16141 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || in tg3_detect_asic_rev()
16142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || in tg3_detect_asic_rev()
16143 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || in tg3_detect_asic_rev()
16144 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || in tg3_detect_asic_rev()
16145 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || in tg3_detect_asic_rev()
16146 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || in tg3_detect_asic_rev()
16147 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || in tg3_detect_asic_rev()
16148 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || in tg3_detect_asic_rev()
16149 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || in tg3_detect_asic_rev()
16150 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) in tg3_detect_asic_rev()
16152 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || in tg3_detect_asic_rev()
16153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || in tg3_detect_asic_rev()
16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || in tg3_detect_asic_rev()
16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || in tg3_detect_asic_rev()
16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || in tg3_detect_asic_rev()
16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || in tg3_detect_asic_rev()
16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || in tg3_detect_asic_rev()
16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || in tg3_detect_asic_rev()
16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || in tg3_detect_asic_rev()
16161 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) in tg3_detect_asic_rev()
16166 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); in tg3_detect_asic_rev()
16173 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; in tg3_detect_asic_rev()
16176 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; in tg3_detect_asic_rev()
16224 (tp->phy_flags & TG3_PHYFLG_IS_FET)) in tg3_10_100_only_device()
16227 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { in tg3_10_100_only_device()
16229 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) in tg3_10_100_only_device()
16254 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); in tg3_get_invariants()
16256 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); in tg3_get_invariants()
16258 /* Important! -- Make sure register accesses are byteswapped in tg3_get_invariants()
16263 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, in tg3_get_invariants()
16265 tp->misc_host_ctrl |= (misc_ctrl_reg & in tg3_get_invariants()
16267 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, in tg3_get_invariants()
16268 tp->misc_host_ctrl); in tg3_get_invariants()
16278 * will drive special cycles with non-zero data during the in tg3_get_invariants()
16281 * non-zero address during special cycles. However, only in tg3_get_invariants()
16282 * these ICH bridges are known to drive non-zero addresses in tg3_get_invariants()
16309 while (pci_id->vendor != 0) { in tg3_get_invariants()
16310 bridge = pci_get_device(pci_id->vendor, pci_id->device, in tg3_get_invariants()
16316 if (pci_id->rev != PCI_ANY_ID) { in tg3_get_invariants()
16317 if (bridge->revision > pci_id->rev) in tg3_get_invariants()
16320 if (bridge->subordinate && in tg3_get_invariants()
16321 (bridge->subordinate->number == in tg3_get_invariants()
16322 tp->pdev->bus->number)) { in tg3_get_invariants()
16342 while (pci_id->vendor != 0) { in tg3_get_invariants()
16343 bridge = pci_get_device(pci_id->vendor, in tg3_get_invariants()
16344 pci_id->device, in tg3_get_invariants()
16350 if (bridge->subordinate && in tg3_get_invariants()
16351 (bridge->subordinate->number <= in tg3_get_invariants()
16352 tp->pdev->bus->number) && in tg3_get_invariants()
16353 (bridge->subordinate->busn_res.end >= in tg3_get_invariants()
16354 tp->pdev->bus->number)) { in tg3_get_invariants()
16363 * DMA addresses > 40-bit. This bridge may have other additional in tg3_get_invariants()
16364 * 57xx devices behind it in some 4-port NIC designs for example. in tg3_get_invariants()
16365 * Any tg3 device found behind the bridge will also need the 40-bit in tg3_get_invariants()
16370 tp->msi_cap = tp->pdev->msi_cap; in tg3_get_invariants()
16378 if (bridge && bridge->subordinate && in tg3_get_invariants()
16379 (bridge->subordinate->number <= in tg3_get_invariants()
16380 tp->pdev->bus->number) && in tg3_get_invariants()
16381 (bridge->subordinate->busn_res.end >= in tg3_get_invariants()
16382 tp->pdev->bus->number)) { in tg3_get_invariants()
16392 tp->pdev_peer = tg3_find_peer(tp); in tg3_get_invariants()
16414 tp->fw_needed = FIRMWARE_TG3TSO5; in tg3_get_invariants()
16416 tp->fw_needed = FIRMWARE_TG3TSO; in tg3_get_invariants()
16432 tp->fw_needed = NULL; in tg3_get_invariants()
16436 tp->fw_needed = FIRMWARE_TG3; in tg3_get_invariants()
16439 tp->fw_needed = FIRMWARE_TG357766; in tg3_get_invariants()
16441 tp->irq_max = 1; in tg3_get_invariants()
16449 tp->pdev_peer == tp->pdev)) in tg3_get_invariants()
16459 tp->irq_max = TG3_IRQ_MAX_VECS; in tg3_get_invariants()
16463 tp->txq_max = 1; in tg3_get_invariants()
16464 tp->rxq_max = 1; in tg3_get_invariants()
16465 if (tp->irq_max > 1) { in tg3_get_invariants()
16466 tp->rxq_max = TG3_RSS_MAX_NUM_QS; in tg3_get_invariants()
16471 tp->txq_max = tp->irq_max - 1; in tg3_get_invariants()
16479 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; in tg3_get_invariants()
16496 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, in tg3_get_invariants()
16499 if (pci_is_pcie(tp->pdev)) { in tg3_get_invariants()
16504 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); in tg3_get_invariants()
16526 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); in tg3_get_invariants()
16527 if (!tp->pcix_cap) { in tg3_get_invariants()
16528 dev_err(&tp->pdev->dev, in tg3_get_invariants()
16529 "Cannot find PCI-X capability, aborting\n"); in tg3_get_invariants()
16530 return -EIO; in tg3_get_invariants()
16547 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, in tg3_get_invariants()
16548 &tp->pci_cacheline_sz); in tg3_get_invariants()
16549 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, in tg3_get_invariants()
16550 &tp->pci_lat_timer); in tg3_get_invariants()
16552 tp->pci_lat_timer < 64) { in tg3_get_invariants()
16553 tp->pci_lat_timer = 64; in tg3_get_invariants()
16554 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, in tg3_get_invariants()
16555 tp->pci_lat_timer); in tg3_get_invariants()
16558 /* Important! -- It is critical that the PCI-X hw workaround in tg3_get_invariants()
16567 /* If we are in PCI-X mode, enable register write workaround. in tg3_get_invariants()
16581 pci_read_config_dword(tp->pdev, in tg3_get_invariants()
16582 tp->pdev->pm_cap + PCI_PM_CTRL, in tg3_get_invariants()
16586 pci_write_config_dword(tp->pdev, in tg3_get_invariants()
16587 tp->pdev->pm_cap + PCI_PM_CTRL, in tg3_get_invariants()
16591 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); in tg3_get_invariants()
16593 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); in tg3_get_invariants()
16602 /* Chip-specific fixup from Broadcom driver */ in tg3_get_invariants()
16606 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); in tg3_get_invariants()
16610 tp->read32 = tg3_read32; in tg3_get_invariants()
16611 tp->write32 = tg3_write32; in tg3_get_invariants()
16612 tp->read32_mbox = tg3_read32; in tg3_get_invariants()
16613 tp->write32_mbox = tg3_write32; in tg3_get_invariants()
16614 tp->write32_tx_mbox = tg3_write32; in tg3_get_invariants()
16615 tp->write32_rx_mbox = tg3_write32; in tg3_get_invariants()
16619 tp->write32 = tg3_write_indirect_reg32; in tg3_get_invariants()
16630 tp->write32 = tg3_write_flush_reg32; in tg3_get_invariants()
16634 tp->write32_tx_mbox = tg3_write32_tx_mbox; in tg3_get_invariants()
16636 tp->write32_rx_mbox = tg3_write_flush_reg32; in tg3_get_invariants()
16640 tp->read32 = tg3_read_indirect_reg32; in tg3_get_invariants()
16641 tp->write32 = tg3_write_indirect_reg32; in tg3_get_invariants()
16642 tp->read32_mbox = tg3_read_indirect_mbox; in tg3_get_invariants()
16643 tp->write32_mbox = tg3_write_indirect_mbox; in tg3_get_invariants()
16644 tp->write32_tx_mbox = tg3_write_indirect_mbox; in tg3_get_invariants()
16645 tp->write32_rx_mbox = tg3_write_indirect_mbox; in tg3_get_invariants()
16647 iounmap(tp->regs); in tg3_get_invariants()
16648 tp->regs = NULL; in tg3_get_invariants()
16650 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); in tg3_get_invariants()
16652 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); in tg3_get_invariants()
16655 tp->read32_mbox = tg3_read32_mbox_5906; in tg3_get_invariants()
16656 tp->write32_mbox = tg3_write32_mbox_5906; in tg3_get_invariants()
16657 tp->write32_tx_mbox = tg3_write32_mbox_5906; in tg3_get_invariants()
16658 tp->write32_rx_mbox = tg3_write32_mbox_5906; in tg3_get_invariants()
16661 if (tp->write32 == tg3_write_indirect_reg32 || in tg3_get_invariants()
16675 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; in tg3_get_invariants()
16679 pci_read_config_dword(tp->pdev, in tg3_get_invariants()
16680 tp->pcix_cap + PCI_X_STATUS, in tg3_get_invariants()
16682 tp->pci_fn = val & 0x7; in tg3_get_invariants()
16692 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; in tg3_get_invariants()
16694 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> in tg3_get_invariants()
16699 tp->write32_tx_mbox = tg3_write_flush_reg32; in tg3_get_invariants()
16700 tp->write32_rx_mbox = tg3_write_flush_reg32; in tg3_get_invariants()
16716 tp->fw_needed = NULL; in tg3_get_invariants()
16726 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, in tg3_get_invariants()
16730 tp->ape_hb_interval = in tg3_get_invariants()
16734 /* Set up tp->grc_local_ctrl before calling in tg3_get_invariants()
16739 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; in tg3_get_invariants()
16742 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | in tg3_get_invariants()
16745 * are no pull-up resistors on unused GPIO pins. in tg3_get_invariants()
16748 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; in tg3_get_invariants()
16753 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; in tg3_get_invariants()
16755 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || in tg3_get_invariants()
16756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { in tg3_get_invariants()
16758 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; in tg3_get_invariants()
16761 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | in tg3_get_invariants()
16766 tp->grc_local_ctrl |= in tg3_get_invariants()
16775 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) in tg3_get_invariants()
16789 tp->phy_flags |= TG3_PHYFLG_IS_FET; in tg3_get_invariants()
16796 (tp->phy_flags & TG3_PHYFLG_IS_FET) || in tg3_get_invariants()
16797 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) in tg3_get_invariants()
16798 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; in tg3_get_invariants()
16802 tp->phy_flags |= TG3_PHYFLG_ADC_BUG; in tg3_get_invariants()
16804 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; in tg3_get_invariants()
16807 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && in tg3_get_invariants()
16815 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && in tg3_get_invariants()
16816 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) in tg3_get_invariants()
16817 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; in tg3_get_invariants()
16818 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) in tg3_get_invariants()
16819 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; in tg3_get_invariants()
16821 tp->phy_flags |= TG3_PHYFLG_BER_BUG; in tg3_get_invariants()
16826 tp->phy_otp = tg3_read_otp_phycfg(tp); in tg3_get_invariants()
16827 if (tp->phy_otp == 0) in tg3_get_invariants()
16828 tp->phy_otp = TG3_OTP_DEFAULT; in tg3_get_invariants()
16832 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; in tg3_get_invariants()
16834 tp->mi_mode = MAC_MI_MODE_BASE; in tg3_get_invariants()
16836 tp->coalesce_mode = 0; in tg3_get_invariants()
16839 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; in tg3_get_invariants()
16846 tp->coalesce_mode |= HOSTCC_MODE_ATTN; in tg3_get_invariants()
16847 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; in tg3_get_invariants()
16870 tw32(GRC_MODE, val | tp->grc_mode); in tg3_get_invariants()
16880 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, in tg3_get_invariants()
16894 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; in tg3_get_invariants()
16910 tp->fw_needed = NULL; in tg3_get_invariants()
16924 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | in tg3_get_invariants()
16927 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; in tg3_get_invariants()
16928 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, in tg3_get_invariants()
16929 tp->misc_host_ctrl); in tg3_get_invariants()
16934 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; in tg3_get_invariants()
16936 tp->mac_mode = 0; in tg3_get_invariants()
16939 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; in tg3_get_invariants()
16943 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); in tg3_get_invariants()
16951 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { in tg3_get_invariants()
16952 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; in tg3_get_invariants()
16955 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; in tg3_get_invariants()
16957 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; in tg3_get_invariants()
16973 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && in tg3_get_invariants()
16975 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { in tg3_get_invariants()
16976 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; in tg3_get_invariants()
16981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) in tg3_get_invariants()
16989 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; in tg3_get_invariants()
16990 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; in tg3_get_invariants()
16993 tp->rx_offset = NET_SKB_PAD; in tg3_get_invariants()
16995 tp->rx_copy_thresh = ~(u16)0; in tg3_get_invariants()
16999 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; in tg3_get_invariants()
17000 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; in tg3_get_invariants()
17001 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; in tg3_get_invariants()
17003 tp->rx_std_max_post = tp->rx_std_ring_mask + 1; in tg3_get_invariants()
17005 /* Increment the rx prod index on the rx std ring by at most in tg3_get_invariants()
17011 tp->rx_std_max_post = 8; in tg3_get_invariants()
17014 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & in tg3_get_invariants()
17026 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr)) in tg3_get_device_address()
17030 err = ssb_gige_get_macaddr(tp->pdev, addr); in tg3_get_device_address()
17045 if (tp->pci_fn & 1) in tg3_get_device_address()
17047 if (tp->pci_fn > 1) in tg3_get_device_address()
17090 return -EINVAL; in tg3_get_device_address()
17103 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); in tg3_calc_dma_bndry()
17136 * when a device tries to burst across a cache-line boundary. in tg3_calc_dma_bndry()
17139 * Unfortunately, for PCI-E there are only limited in tg3_calc_dma_bndry()
17140 * write-side controls for this, and thus for reads in tg3_calc_dma_bndry()
17270 * Broadcom noted the GRC reset will also reset all sub-components. in tg3_do_test_dma()
17289 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, in tg3_do_test_dma()
17291 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); in tg3_do_test_dma()
17293 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); in tg3_do_test_dma()
17300 ret = -ENODEV; in tg3_do_test_dma()
17332 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, in tg3_test_dma()
17335 ret = -ENOMEM; in tg3_test_dma()
17339 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | in tg3_test_dma()
17342 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); in tg3_test_dma()
17349 tp->dma_rwctrl |= 0x00180000; in tg3_test_dma()
17353 tp->dma_rwctrl |= 0x003f0000; in tg3_test_dma()
17355 tp->dma_rwctrl |= 0x003f000f; in tg3_test_dma()
17368 tp->dma_rwctrl |= 0x8000; in tg3_test_dma()
17370 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; in tg3_test_dma()
17375 tp->dma_rwctrl |= in tg3_test_dma()
17381 tp->dma_rwctrl |= 0x00144000; in tg3_test_dma()
17384 tp->dma_rwctrl |= 0x00148000; in tg3_test_dma()
17386 tp->dma_rwctrl |= 0x001b000f; in tg3_test_dma()
17390 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; in tg3_test_dma()
17394 tp->dma_rwctrl &= 0xfffffff0; in tg3_test_dma()
17399 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; in tg3_test_dma()
17409 * on those chips to enable a PCI-X workaround. in tg3_test_dma()
17411 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; in tg3_test_dma()
17414 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); in tg3_test_dma()
17424 saved_dma_rwctrl = tp->dma_rwctrl; in tg3_test_dma()
17425 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; in tg3_test_dma()
17426 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); in tg3_test_dma()
17437 dev_err(&tp->pdev->dev, in tg3_test_dma()
17446 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " in tg3_test_dma()
17456 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != in tg3_test_dma()
17458 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; in tg3_test_dma()
17459 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; in tg3_test_dma()
17460 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); in tg3_test_dma()
17463 dev_err(&tp->pdev->dev, in tg3_test_dma()
17466 ret = -ENODEV; in tg3_test_dma()
17477 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != in tg3_test_dma()
17484 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; in tg3_test_dma()
17485 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; in tg3_test_dma()
17488 tp->dma_rwctrl = saved_dma_rwctrl; in tg3_test_dma()
17491 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); in tg3_test_dma()
17495 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); in tg3_test_dma()
17503 tp->bufmgr_config.mbuf_read_dma_low_water = in tg3_init_bufmgr_config()
17505 tp->bufmgr_config.mbuf_mac_rx_low_water = in tg3_init_bufmgr_config()
17507 tp->bufmgr_config.mbuf_high_water = in tg3_init_bufmgr_config()
17510 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = in tg3_init_bufmgr_config()
17512 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = in tg3_init_bufmgr_config()
17514 tp->bufmgr_config.mbuf_high_water_jumbo = in tg3_init_bufmgr_config()
17517 tp->bufmgr_config.mbuf_read_dma_low_water = in tg3_init_bufmgr_config()
17519 tp->bufmgr_config.mbuf_mac_rx_low_water = in tg3_init_bufmgr_config()
17521 tp->bufmgr_config.mbuf_high_water = in tg3_init_bufmgr_config()
17524 tp->bufmgr_config.mbuf_mac_rx_low_water = in tg3_init_bufmgr_config()
17526 tp->bufmgr_config.mbuf_high_water = in tg3_init_bufmgr_config()
17530 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = in tg3_init_bufmgr_config()
17532 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = in tg3_init_bufmgr_config()
17534 tp->bufmgr_config.mbuf_high_water_jumbo = in tg3_init_bufmgr_config()
17537 tp->bufmgr_config.mbuf_read_dma_low_water = in tg3_init_bufmgr_config()
17539 tp->bufmgr_config.mbuf_mac_rx_low_water = in tg3_init_bufmgr_config()
17541 tp->bufmgr_config.mbuf_high_water = in tg3_init_bufmgr_config()
17544 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = in tg3_init_bufmgr_config()
17546 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = in tg3_init_bufmgr_config()
17548 tp->bufmgr_config.mbuf_high_water_jumbo = in tg3_init_bufmgr_config()
17552 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; in tg3_init_bufmgr_config()
17553 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; in tg3_init_bufmgr_config()
17558 switch (tp->phy_id & TG3_PHY_ID_MASK) { in tg3_phy_string()
17618 strcat(str, ":32-bit"); in tg3_bus_string()
17620 strcat(str, ":64-bit"); in tg3_bus_string()
17626 struct ethtool_coalesce *ec = &tp->coal; in tg3_init_coal()
17629 ec->cmd = ETHTOOL_GCOALESCE; in tg3_init_coal()
17630 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; in tg3_init_coal()
17631 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; in tg3_init_coal()
17632 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; in tg3_init_coal()
17633 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; in tg3_init_coal()
17634 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; in tg3_init_coal()
17635 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; in tg3_init_coal()
17636 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; in tg3_init_coal()
17637 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; in tg3_init_coal()
17638 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; in tg3_init_coal()
17640 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | in tg3_init_coal()
17642 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; in tg3_init_coal()
17643 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; in tg3_init_coal()
17644 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; in tg3_init_coal()
17645 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; in tg3_init_coal()
17649 ec->rx_coalesce_usecs_irq = 0; in tg3_init_coal()
17650 ec->tx_coalesce_usecs_irq = 0; in tg3_init_coal()
17651 ec->stats_block_coalesce_usecs = 0; in tg3_init_coal()
17669 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); in tg3_init_one()
17675 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); in tg3_init_one()
17683 err = -ENOMEM; in tg3_init_one()
17687 SET_NETDEV_DEV(dev, &pdev->dev); in tg3_init_one()
17690 tp->pdev = pdev; in tg3_init_one()
17691 tp->dev = dev; in tg3_init_one()
17692 tp->rx_mode = TG3_DEF_RX_MODE; in tg3_init_one()
17693 tp->tx_mode = TG3_DEF_TX_MODE; in tg3_init_one()
17694 tp->irq_sync = 1; in tg3_init_one()
17695 tp->pcierr_recovery = false; in tg3_init_one()
17698 tp->msg_enable = tg3_debug; in tg3_init_one()
17700 tp->msg_enable = TG3_DEF_MSG_ENABLE; in tg3_init_one()
17720 tp->misc_host_ctrl = in tg3_init_one()
17726 /* The NONFRM (non-frame) byte/word swap controls take effect in tg3_init_one()
17729 * The StrongARM chips on the board (one for tx, one for rx) in tg3_init_one()
17730 * are running in big-endian mode. in tg3_init_one()
17732 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | in tg3_init_one()
17735 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; in tg3_init_one()
17737 spin_lock_init(&tp->lock); in tg3_init_one()
17738 spin_lock_init(&tp->indirect_lock); in tg3_init_one()
17739 INIT_WORK(&tp->reset_task, tg3_reset_task); in tg3_init_one()
17741 tp->regs = pci_ioremap_bar(pdev, BAR_0); in tg3_init_one()
17742 if (!tp->regs) { in tg3_init_one()
17743 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); in tg3_init_one()
17744 err = -ENOMEM; in tg3_init_one()
17748 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || in tg3_init_one()
17749 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || in tg3_init_one()
17750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || in tg3_init_one()
17751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || in tg3_init_one()
17752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || in tg3_init_one()
17753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || in tg3_init_one()
17754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || in tg3_init_one()
17755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || in tg3_init_one()
17756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || in tg3_init_one()
17757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || in tg3_init_one()
17758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || in tg3_init_one()
17759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || in tg3_init_one()
17760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || in tg3_init_one()
17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || in tg3_init_one()
17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { in tg3_init_one()
17764 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); in tg3_init_one()
17765 if (!tp->aperegs) { in tg3_init_one()
17766 dev_err(&pdev->dev, in tg3_init_one()
17768 err = -ENOMEM; in tg3_init_one()
17773 tp->rx_pending = TG3_DEF_RX_RING_PENDING; in tg3_init_one()
17774 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; in tg3_init_one()
17776 dev->ethtool_ops = &tg3_ethtool_ops; in tg3_init_one()
17777 dev->watchdog_timeo = TG3_TX_TIMEOUT; in tg3_init_one()
17778 dev->netdev_ops = &tg3_netdev_ops; in tg3_init_one()
17779 dev->irq = pdev->irq; in tg3_init_one()
17783 dev_err(&pdev->dev, in tg3_init_one()
17789 * device behind the EPB cannot support DMA addresses > 40-bit. in tg3_init_one()
17790 * On 64-bit systems with IOMMU, use 40-bit dma_mask. in tg3_init_one()
17791 * On 64-bit systems without IOMMU, use 64-bit dma_mask and in tg3_init_one()
17806 err = dma_set_mask(&pdev->dev, dma_mask); in tg3_init_one()
17809 err = dma_set_coherent_mask(&pdev->dev, in tg3_init_one()
17812 dev_err(&pdev->dev, "Unable to obtain 64 bit " in tg3_init_one()
17819 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in tg3_init_one()
17821 dev_err(&pdev->dev, in tg3_init_one()
17860 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | in tg3_init_one()
17862 dev->vlan_features |= features; in tg3_init_one()
17866 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY in tg3_init_one()
17874 dev->hw_features |= features; in tg3_init_one()
17875 dev->priv_flags |= IFF_UNICAST_FLT; in tg3_init_one()
17877 /* MTU range: 60 - 9000 or 1500, depending on hardware */ in tg3_init_one()
17878 dev->min_mtu = TG3_MIN_MTU; in tg3_init_one()
17879 dev->max_mtu = TG3_MAX_MTU(tp); in tg3_init_one()
17885 tp->rx_pending = 63; in tg3_init_one()
17890 dev_err(&pdev->dev, in tg3_init_one()
17899 for (i = 0; i < tp->irq_max; i++) { in tg3_init_one()
17900 struct tg3_napi *tnapi = &tp->napi[i]; in tg3_init_one()
17902 tnapi->tp = tp; in tg3_init_one()
17903 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; in tg3_init_one()
17905 tnapi->int_mbox = intmbx; in tg3_init_one()
17908 tnapi->consmbox = rcvmbx; in tg3_init_one()
17909 tnapi->prodmbox = sndmbx; in tg3_init_one()
17912 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); in tg3_init_one()
17914 tnapi->coal_now = HOSTCC_MODE_NOW; in tg3_init_one()
17922 * remaining vectors handle rx and tx interrupts. Reuse the in tg3_init_one()
17932 sndmbx -= 0x4; in tg3_init_one()
17952 dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); in tg3_init_one()
17971 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); in tg3_init_one()
17977 tp->ptp_clock = ptp_clock_register(&tp->ptp_info, in tg3_init_one()
17978 &tp->pdev->dev); in tg3_init_one()
17979 if (IS_ERR(tp->ptp_clock)) in tg3_init_one()
17980 tp->ptp_clock = NULL; in tg3_init_one()
17984 tp->board_part_number, in tg3_init_one()
17987 dev->dev_addr); in tg3_init_one()
17989 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) { in tg3_init_one()
17992 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) in tg3_init_one()
17993 ethtype = "10/100Base-TX"; in tg3_init_one()
17994 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) in tg3_init_one()
17995 ethtype = "1000Base-SX"; in tg3_init_one()
17997 ethtype = "10/100/1000Base-T"; in tg3_init_one()
18002 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, in tg3_init_one()
18003 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); in tg3_init_one()
18007 (dev->features & NETIF_F_RXCSUM) != 0, in tg3_init_one()
18009 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, in tg3_init_one()
18012 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", in tg3_init_one()
18013 tp->dma_rwctrl, in tg3_init_one()
18014 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : in tg3_init_one()
18015 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); in tg3_init_one()
18022 if (tp->aperegs) { in tg3_init_one()
18023 iounmap(tp->aperegs); in tg3_init_one()
18024 tp->aperegs = NULL; in tg3_init_one()
18028 if (tp->regs) { in tg3_init_one()
18029 iounmap(tp->regs); in tg3_init_one()
18030 tp->regs = NULL; in tg3_init_one()
18054 release_firmware(tp->fw); in tg3_remove_one()
18064 if (tp->aperegs) { in tg3_remove_one()
18065 iounmap(tp->aperegs); in tg3_remove_one()
18066 tp->aperegs = NULL; in tg3_remove_one()
18068 if (tp->regs) { in tg3_remove_one()
18069 iounmap(tp->regs); in tg3_remove_one()
18070 tp->regs = NULL; in tg3_remove_one()
18132 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); in tg3_resume()
18177 * tg3_io_error_detected - called when PCI error is detected
18199 if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) in tg3_io_error_detected()
18204 tp->pcierr_recovery = true; in tg3_io_error_detected()
18236 * tg3_io_slot_reset - called after the pci bus has been reset.
18239 * Restart the card from scratch, as if from a cold-boot.
18254 dev_err(&pdev->dev, in tg3_io_slot_reset()
18255 "Cannot re-enable PCI device after reset.\n"); in tg3_io_slot_reset()
18285 * tg3_io_resume - called when traffic can start flowing again.
18323 tp->pcierr_recovery = false; in tg3_io_resume()