Lines Matching +full:ether +full:- +full:link +full:- +full:active +full:- +full:low
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
15 #include <linux/dma-mapping.h>
19 #include <linux/mdio-bitbang.h>
44 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
53 __diag_ignore_all("-Woverride-init",
350 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
355 iowrite32(data, mdp->addr + offset); in sh_eth_write()
361 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
366 return ioread32(mdp->addr + offset); in sh_eth_read()
378 return mdp->reg_offset[enum_index]; in sh_eth_tsu_get_offset()
389 iowrite32(data, mdp->tsu_addr + offset); in sh_eth_tsu_write()
399 return ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read()
418 switch (mdp->phy_interface) { in sh_eth_select_mii()
445 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); in sh_eth_set_duplex()
470 for (cnt = 100; cnt > 0; cnt--) { in sh_eth_check_soft_reset()
477 return -ETIMEDOUT; in sh_eth_check_soft_reset()
503 if (mdp->cd->csmr) in sh_eth_soft_reset_gether()
507 if (mdp->cd->select_mii) in sh_eth_soft_reset_gether()
517 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_gether()
520 switch (mdp->speed) { in sh_eth_set_rate_gether()
636 switch (mdp->speed) { in sh_eth_set_rate_rcar()
646 /* R-Car Gen1 */
677 /* R-Car Gen2 and RZ/G1 */
799 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
843 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
914 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_giga()
917 switch (mdp->speed) { in sh_eth_set_rate_giga()
1103 if (!cd->ecsr_value) in sh_eth_set_default_cpu_data()
1104 cd->ecsr_value = DEFAULT_ECSR_INIT; in sh_eth_set_default_cpu_data()
1106 if (!cd->ecsipr_value) in sh_eth_set_default_cpu_data()
1107 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; in sh_eth_set_default_cpu_data()
1109 if (!cd->fcftr_value) in sh_eth_set_default_cpu_data()
1110 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | in sh_eth_set_default_cpu_data()
1113 if (!cd->fdr_value) in sh_eth_set_default_cpu_data()
1114 cd->fdr_value = DEFAULT_FDR_INIT; in sh_eth_set_default_cpu_data()
1116 if (!cd->tx_check) in sh_eth_set_default_cpu_data()
1117 cd->tx_check = DEFAULT_TX_CHECK; in sh_eth_set_default_cpu_data()
1119 if (!cd->eesr_err_check) in sh_eth_set_default_cpu_data()
1120 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; in sh_eth_set_default_cpu_data()
1122 if (!cd->trscer_err_mask) in sh_eth_set_default_cpu_data()
1123 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; in sh_eth_set_default_cpu_data()
1128 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); in sh_eth_set_receive_align()
1131 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); in sh_eth_set_receive_align()
1134 /* Program the hardware MAC address from dev->dev_addr. */
1138 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in update_mac_address()
1139 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in update_mac_address()
1141 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in update_mac_address()
1147 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1181 if (bitbang->set_gate) in sh_mdio_ctrl()
1182 bitbang->set_gate(bitbang->addr); in sh_mdio_ctrl()
1184 pir = ioread32(bitbang->addr); in sh_mdio_ctrl()
1189 iowrite32(pir, bitbang->addr); in sh_mdio_ctrl()
1209 if (bitbang->set_gate) in sh_get_mdio()
1210 bitbang->set_gate(bitbang->addr); in sh_get_mdio()
1212 return (ioread32(bitbang->addr) & PIR_MDI) != 0; in sh_get_mdio()
1239 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_tx_free()
1240 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_tx_free()
1241 txdesc = &mdp->tx_ring[entry]; in sh_eth_tx_free()
1242 sent = !(txdesc->status & cpu_to_le32(TD_TACT)); in sh_eth_tx_free()
1249 entry, le32_to_cpu(txdesc->status)); in sh_eth_tx_free()
1251 if (mdp->tx_skbuff[entry]) { in sh_eth_tx_free()
1252 dma_unmap_single(&mdp->pdev->dev, in sh_eth_tx_free()
1253 le32_to_cpu(txdesc->addr), in sh_eth_tx_free()
1254 le32_to_cpu(txdesc->len) >> 16, in sh_eth_tx_free()
1256 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_tx_free()
1257 mdp->tx_skbuff[entry] = NULL; in sh_eth_tx_free()
1260 txdesc->status = cpu_to_le32(TD_TFP); in sh_eth_tx_free()
1261 if (entry >= mdp->num_tx_ring - 1) in sh_eth_tx_free()
1262 txdesc->status |= cpu_to_le32(TD_TDLE); in sh_eth_tx_free()
1265 ndev->stats.tx_packets++; in sh_eth_tx_free()
1266 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; in sh_eth_tx_free()
1278 if (mdp->rx_ring) { in sh_eth_ring_free()
1279 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_free()
1280 if (mdp->rx_skbuff[i]) { in sh_eth_ring_free()
1281 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_free()
1283 dma_unmap_single(&mdp->pdev->dev, in sh_eth_ring_free()
1284 le32_to_cpu(rxdesc->addr), in sh_eth_ring_free()
1285 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_ring_free()
1289 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1290 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1291 mdp->rx_desc_dma); in sh_eth_ring_free()
1292 mdp->rx_ring = NULL; in sh_eth_ring_free()
1296 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1297 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1298 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1300 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1301 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1303 if (mdp->tx_ring) { in sh_eth_ring_free()
1306 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1307 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1308 mdp->tx_desc_dma); in sh_eth_ring_free()
1309 mdp->tx_ring = NULL; in sh_eth_ring_free()
1313 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1314 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1325 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1326 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1327 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1331 mdp->cur_rx = 0; in sh_eth_ring_format()
1332 mdp->cur_tx = 0; in sh_eth_ring_format()
1333 mdp->dirty_rx = 0; in sh_eth_ring_format()
1334 mdp->dirty_tx = 0; in sh_eth_ring_format()
1336 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1339 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1341 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1348 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1349 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, in sh_eth_ring_format()
1351 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_ring_format()
1355 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1358 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1359 rxdesc->len = cpu_to_le32(buf_len << 16); in sh_eth_ring_format()
1360 rxdesc->addr = cpu_to_le32(dma_addr); in sh_eth_ring_format()
1361 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); in sh_eth_ring_format()
1365 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1366 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1371 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1375 rxdesc->status |= cpu_to_le32(RD_RDLE); in sh_eth_ring_format()
1377 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1380 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1381 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1382 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1383 txdesc->status = cpu_to_le32(TD_TFP); in sh_eth_ring_format()
1384 txdesc->len = cpu_to_le32(0); in sh_eth_ring_format()
1387 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1388 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1393 txdesc->status |= cpu_to_le32(TD_TDLE); in sh_eth_ring_format()
1407 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1408 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); in sh_eth_ring_init()
1409 if (mdp->cd->rpadir) in sh_eth_ring_init()
1410 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1413 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1415 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1416 return -ENOMEM; in sh_eth_ring_init()
1418 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1420 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1424 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1425 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, in sh_eth_ring_init()
1426 &mdp->rx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1427 if (!mdp->rx_ring) in sh_eth_ring_init()
1430 mdp->dirty_rx = 0; in sh_eth_ring_init()
1433 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1434 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, in sh_eth_ring_init()
1435 &mdp->tx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1436 if (!mdp->tx_ring) in sh_eth_ring_init()
1444 return -ENOMEM; in sh_eth_ring_init()
1453 ret = mdp->cd->soft_reset(ndev); in sh_eth_dev_init()
1457 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1462 if (mdp->cd->rpadir) in sh_eth_dev_init()
1469 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1476 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1479 /* Frame recv control (enable multiple-packets per rx irq) */ in sh_eth_dev_init()
1482 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1485 if (mdp->cd->nbst) in sh_eth_dev_init()
1488 /* Burst cycle count upper-limit */ in sh_eth_dev_init()
1489 if (mdp->cd->bculr) in sh_eth_dev_init()
1492 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1494 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1498 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, in sh_eth_dev_init()
1502 mdp->irq_enabled = true; in sh_eth_dev_init()
1503 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1506 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | in sh_eth_dev_init()
1507 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in sh_eth_dev_init()
1510 if (mdp->cd->set_rate) in sh_eth_dev_init()
1511 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1513 /* E-MAC Status Register clear */ in sh_eth_dev_init()
1514 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1516 /* E-MAC Interrupt Enable register */ in sh_eth_dev_init()
1517 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1523 if (mdp->cd->apr) in sh_eth_dev_init()
1525 if (mdp->cd->mpr) in sh_eth_dev_init()
1527 if (mdp->cd->tpauser) in sh_eth_dev_init()
1544 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1545 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); in sh_eth_dev_exit()
1560 mdp->cd->soft_reset(ndev); in sh_eth_dev_exit()
1563 if (mdp->cd->rmiimode) in sh_eth_dev_exit()
1575 if (unlikely(skb->len < sizeof(__sum16))) in sh_eth_rx_csum()
1577 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in sh_eth_rx_csum()
1578 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in sh_eth_rx_csum()
1579 skb->ip_summed = CHECKSUM_COMPLETE; in sh_eth_rx_csum()
1580 skb_trim(skb, skb->len - sizeof(__sum16)); in sh_eth_rx_csum()
1589 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1590 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1594 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1601 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1602 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { in sh_eth_rx()
1605 desc_status = le32_to_cpu(rxdesc->status); in sh_eth_rx()
1606 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; in sh_eth_rx()
1608 if (--boguscnt < 0) in sh_eth_rx()
1616 ndev->stats.rx_length_errors++; in sh_eth_rx()
1624 if (mdp->cd->csmr) in sh_eth_rx()
1627 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1630 ndev->stats.rx_errors++; in sh_eth_rx()
1632 ndev->stats.rx_crc_errors++; in sh_eth_rx()
1634 ndev->stats.rx_frame_errors++; in sh_eth_rx()
1636 ndev->stats.rx_length_errors++; in sh_eth_rx()
1638 ndev->stats.rx_length_errors++; in sh_eth_rx()
1640 ndev->stats.rx_missed_errors++; in sh_eth_rx()
1642 ndev->stats.rx_over_errors++; in sh_eth_rx()
1644 dma_addr = le32_to_cpu(rxdesc->addr); in sh_eth_rx()
1645 if (!mdp->cd->hw_swap) in sh_eth_rx()
1649 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1650 if (mdp->cd->rpadir) in sh_eth_rx()
1652 dma_unmap_single(&mdp->pdev->dev, dma_addr, in sh_eth_rx()
1653 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1656 skb->protocol = eth_type_trans(skb, ndev); in sh_eth_rx()
1657 if (ndev->features & NETIF_F_RXCSUM) in sh_eth_rx()
1660 ndev->stats.rx_packets++; in sh_eth_rx()
1661 ndev->stats.rx_bytes += pkt_len; in sh_eth_rx()
1663 ndev->stats.multicast++; in sh_eth_rx()
1665 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1666 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1670 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1671 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1672 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1674 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1675 rxdesc->len = cpu_to_le32(buf_len << 16); in sh_eth_rx()
1677 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1682 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, in sh_eth_rx()
1684 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_rx()
1688 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1691 rxdesc->addr = cpu_to_le32(dma_addr); in sh_eth_rx()
1694 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1695 rxdesc->status |= in sh_eth_rx()
1698 rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); in sh_eth_rx()
1702 /* If we don't need to check status, don't. -KDU */ in sh_eth_rx()
1705 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { in sh_eth_rx()
1706 u32 count = (sh_eth_read(ndev, RDFAR) - in sh_eth_rx()
1709 mdp->cur_rx = count; in sh_eth_rx()
1710 mdp->dirty_rx = count; in sh_eth_rx()
1715 *quota -= limit - boguscnt - 1; in sh_eth_rx()
1732 /* E-MAC interrupt handler */
1742 ndev->stats.tx_carrier_errors++; in sh_eth_emac_interrupt()
1744 pm_wakeup_event(&mdp->pdev->dev, 0); in sh_eth_emac_interrupt()
1746 /* Link Changed */ in sh_eth_emac_interrupt()
1747 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_emac_interrupt()
1750 if (mdp->ether_link_active_low) in sh_eth_emac_interrupt()
1755 /* Link Up */ in sh_eth_emac_interrupt()
1775 ndev->stats.tx_aborted_errors++; in sh_eth_error()
1784 ndev->stats.rx_frame_errors++; in sh_eth_error()
1790 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1796 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1802 ndev->stats.rx_over_errors++; in sh_eth_error()
1807 ndev->stats.rx_fifo_errors++; in sh_eth_error()
1810 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1812 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1817 if (mdp->cd->no_ade) in sh_eth_error()
1825 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1826 (u32)ndev->state, edtrr); in sh_eth_error()
1831 if (edtrr ^ mdp->cd->edtrr_trns) { in sh_eth_error()
1833 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_error()
1844 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1848 spin_lock(&mdp->lock); in sh_eth_interrupt()
1860 if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | in sh_eth_interrupt()
1861 cd->eesr_err_check)) in sh_eth_interrupt()
1866 if (unlikely(!mdp->irq_enabled)) { in sh_eth_interrupt()
1872 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1876 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1885 if (intr_status & cd->tx_check) { in sh_eth_interrupt()
1887 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); in sh_eth_interrupt()
1893 /* E-MAC interrupt */ in sh_eth_interrupt()
1897 if (intr_status & cd->eesr_err_check) { in sh_eth_interrupt()
1899 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); in sh_eth_interrupt()
1905 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1914 struct net_device *ndev = napi->dev; in sh_eth_poll()
1932 if (mdp->irq_enabled) in sh_eth_poll()
1933 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1935 return budget - quota; in sh_eth_poll()
1942 struct phy_device *phydev = ndev->phydev; in sh_eth_adjust_link()
1946 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_adjust_link()
1948 /* Disable TX and RX right over here, if E-MAC change is ignored */ in sh_eth_adjust_link()
1949 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1952 if (phydev->link) { in sh_eth_adjust_link()
1953 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1955 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1956 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1957 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1960 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1962 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1963 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1964 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1966 if (!mdp->link) { in sh_eth_adjust_link()
1969 mdp->link = phydev->link; in sh_eth_adjust_link()
1971 } else if (mdp->link) { in sh_eth_adjust_link()
1973 mdp->link = 0; in sh_eth_adjust_link()
1974 mdp->speed = 0; in sh_eth_adjust_link()
1975 mdp->duplex = -1; in sh_eth_adjust_link()
1978 /* Enable TX and RX right over here, if E-MAC change is ignored */ in sh_eth_adjust_link()
1979 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) in sh_eth_adjust_link()
1982 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_adjust_link()
1991 struct device_node *np = ndev->dev.parent->of_node; in sh_eth_phy_init()
1995 mdp->link = 0; in sh_eth_phy_init()
1996 mdp->speed = 0; in sh_eth_phy_init()
1997 mdp->duplex = -1; in sh_eth_phy_init()
2003 pn = of_parse_phandle(np, "phy-handle", 0); in sh_eth_phy_init()
2006 mdp->phy_interface); in sh_eth_phy_init()
2010 phydev = ERR_PTR(-ENOENT); in sh_eth_phy_init()
2015 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
2018 mdp->phy_interface); in sh_eth_phy_init()
2027 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) in sh_eth_phy_init()
2044 phy_start(ndev->phydev); in sh_eth_phy_start()
2060 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
2084 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2102 if (!cd->no_xdfar) in __sh_eth_get_regs()
2107 if (!cd->no_xdfar) in __sh_eth_get_regs()
2118 if (cd->rmiimode) in __sh_eth_get_regs()
2121 if (cd->rpadir) in __sh_eth_get_regs()
2123 if (!cd->no_trimd) in __sh_eth_get_regs()
2129 if (!cd->no_psr) in __sh_eth_get_regs()
2134 if (cd->apr) in __sh_eth_get_regs()
2136 if (cd->mpr) in __sh_eth_get_regs()
2140 if (cd->tpauser) in __sh_eth_get_regs()
2143 if (cd->gecmr) in __sh_eth_get_regs()
2145 if (cd->bculr) in __sh_eth_get_regs()
2149 if (!cd->no_tx_cntrs) { in __sh_eth_get_regs()
2159 if (cd->cexcr) { in __sh_eth_get_regs()
2164 if (cd->rtrate) in __sh_eth_get_regs()
2166 if (cd->csmr) in __sh_eth_get_regs()
2168 if (cd->select_mii) in __sh_eth_get_regs()
2170 if (cd->tsu) { in __sh_eth_get_regs()
2173 if (cd->dual_port) { in __sh_eth_get_regs()
2185 if (cd->dual_port) { in __sh_eth_get_regs()
2207 *buf++ = ioread32(mdp->tsu_addr + in __sh_eth_get_regs()
2208 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2232 regs->version = SH_ETH_REG_DUMP_VERSION; in sh_eth_get_regs()
2234 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2236 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2242 return mdp->msg_enable; in sh_eth_get_msglevel()
2248 mdp->msg_enable = value; in sh_eth_set_msglevel()
2263 return -EOPNOTSUPP; in sh_eth_get_sset_count()
2273 /* device-specific stats */ in sh_eth_get_ethtool_stats()
2274 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2275 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2276 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2277 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2297 ring->rx_max_pending = RX_RING_MAX; in sh_eth_get_ringparam()
2298 ring->tx_max_pending = TX_RING_MAX; in sh_eth_get_ringparam()
2299 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2300 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2311 if (ring->tx_pending > TX_RING_MAX || in sh_eth_set_ringparam()
2312 ring->rx_pending > RX_RING_MAX || in sh_eth_set_ringparam()
2313 ring->tx_pending < TX_RING_MIN || in sh_eth_set_ringparam()
2314 ring->rx_pending < RX_RING_MIN) in sh_eth_set_ringparam()
2315 return -EINVAL; in sh_eth_set_ringparam()
2316 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in sh_eth_set_ringparam()
2317 return -EINVAL; in sh_eth_set_ringparam()
2326 * won't be re-enabled. in sh_eth_set_ringparam()
2328 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2329 synchronize_irq(ndev->irq); in sh_eth_set_ringparam()
2330 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2340 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2341 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2367 wol->supported = 0; in sh_eth_get_wol()
2368 wol->wolopts = 0; in sh_eth_get_wol()
2370 if (mdp->cd->magic) { in sh_eth_get_wol()
2371 wol->supported = WAKE_MAGIC; in sh_eth_get_wol()
2372 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; in sh_eth_get_wol()
2380 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) in sh_eth_set_wol()
2381 return -EOPNOTSUPP; in sh_eth_set_wol()
2383 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in sh_eth_set_wol()
2385 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); in sh_eth_set_wol()
2414 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2416 napi_enable(&mdp->napi); in sh_eth_open()
2418 ret = request_irq(ndev->irq, sh_eth_interrupt, in sh_eth_open()
2419 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2442 mdp->is_opened = 1; in sh_eth_open()
2447 free_irq(ndev->irq, ndev); in sh_eth_open()
2449 napi_disable(&mdp->napi); in sh_eth_open()
2450 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2468 ndev->stats.tx_errors++; in sh_eth_tx_timeout()
2471 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2472 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2473 rxdesc->status = cpu_to_le32(0); in sh_eth_tx_timeout()
2474 rxdesc->addr = cpu_to_le32(0xBADF00D0); in sh_eth_tx_timeout()
2475 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2476 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2478 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2479 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2480 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2499 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2500 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2504 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2508 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2513 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2514 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2515 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2517 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2518 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); in sh_eth_start_xmit()
2519 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, in sh_eth_start_xmit()
2521 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_start_xmit()
2525 txdesc->addr = cpu_to_le32(dma_addr); in sh_eth_start_xmit()
2526 txdesc->len = cpu_to_le32(skb->len << 16); in sh_eth_start_xmit()
2529 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2530 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); in sh_eth_start_xmit()
2532 txdesc->status |= cpu_to_le32(TD_TACT); in sh_eth_start_xmit()
2535 mdp->cur_tx++; in sh_eth_start_xmit()
2537 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) in sh_eth_start_xmit()
2538 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_start_xmit()
2543 /* The statistics registers have write-clear behaviour, which means we
2545 * this by only clearing when we read a non-zero value, so we will
2563 if (mdp->cd->no_tx_cntrs) in sh_eth_get_stats()
2564 return &ndev->stats; in sh_eth_get_stats()
2566 if (!mdp->is_opened) in sh_eth_get_stats()
2567 return &ndev->stats; in sh_eth_get_stats()
2569 sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); in sh_eth_get_stats()
2570 sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); in sh_eth_get_stats()
2571 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); in sh_eth_get_stats()
2573 if (mdp->cd->cexcr) { in sh_eth_get_stats()
2574 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2576 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2579 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2583 return &ndev->stats; in sh_eth_get_stats()
2595 * ensure that interrupts won't be re-enabled. in sh_eth_close()
2597 mdp->irq_enabled = false; in sh_eth_close()
2598 synchronize_irq(ndev->irq); in sh_eth_close()
2599 napi_disable(&mdp->napi); in sh_eth_close()
2605 if (ndev->phydev) { in sh_eth_close()
2606 phy_stop(ndev->phydev); in sh_eth_close()
2607 phy_disconnect(ndev->phydev); in sh_eth_close()
2610 free_irq(ndev->irq, ndev); in sh_eth_close()
2615 mdp->is_opened = 0; in sh_eth_close()
2617 pm_runtime_put(&mdp->pdev->dev); in sh_eth_close()
2625 return -EBUSY; in sh_eth_change_mtu()
2627 WRITE_ONCE(ndev->mtu, new_mtu); in sh_eth_change_mtu()
2636 return 0x0f << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_mask()
2641 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2679 timeout--; in sh_eth_tsu_busy()
2682 return -ETIMEDOUT; in sh_eth_tsu_busy()
2696 iowrite32(val, mdp->tsu_addr + offset); in sh_eth_tsu_write_entry()
2698 return -EBUSY; in sh_eth_tsu_write_entry()
2701 iowrite32(val, mdp->tsu_addr + offset + 4); in sh_eth_tsu_write_entry()
2703 return -EBUSY; in sh_eth_tsu_write_entry()
2713 val = ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read_entry()
2718 val = ioread32(mdp->tsu_addr + offset + 4); in sh_eth_tsu_read_entry()
2737 return -ENOENT; in sh_eth_tsu_find_entry()
2747 return (entry < 0) ? -ENOMEM : entry; in sh_eth_tsu_find_empty()
2759 ~(1 << (31 - entry)), TSU_TEN); in sh_eth_tsu_disable_cam_entry_table()
2774 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2782 return -ENOMEM; in sh_eth_tsu_add_entry()
2789 (1 << (31 - i)), TSU_TEN); in sh_eth_tsu_add_entry()
2803 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2826 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2849 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2867 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2869 * Depending on ndev->flags, set PRM or clear MCT in sh_eth_set_rx_mode()
2872 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2875 if (!(ndev->flags & IFF_MULTICAST)) { in sh_eth_set_rx_mode()
2879 if (ndev->flags & IFF_ALLMULTI) { in sh_eth_set_rx_mode()
2885 if (ndev->flags & IFF_PROMISC) { in sh_eth_set_rx_mode()
2888 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2891 if (mcast_all && is_multicast_ether_addr(ha->addr)) in sh_eth_set_rx_mode()
2894 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { in sh_eth_set_rx_mode()
2907 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2915 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_csum()
2926 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_csum()
2932 netdev_features_t changed = ndev->features ^ features; in sh_eth_set_features()
2935 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) in sh_eth_set_features()
2938 ndev->features = features; in sh_eth_set_features()
2945 if (!mdp->port) in sh_eth_get_vtag_index()
2957 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2958 return -EPERM; in sh_eth_vlan_rx_add_vid()
2964 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2969 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2987 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2988 return -EPERM; in sh_eth_vlan_rx_kill_vid()
2994 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
3003 if (!mdp->cd->dual_port) { in sh_eth_tsu_init()
3010 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
3011 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
3012 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
3020 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
3021 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
3025 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
3026 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
3027 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
3028 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
3035 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
3038 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
3047 pm_runtime_get_sync(bus->parent); in sh_mdiobb_read_c22()
3049 pm_runtime_put(bus->parent); in sh_mdiobb_read_c22()
3058 pm_runtime_get_sync(bus->parent); in sh_mdiobb_write_c22()
3060 pm_runtime_put(bus->parent); in sh_mdiobb_write_c22()
3069 pm_runtime_get_sync(bus->parent); in sh_mdiobb_read_c45()
3071 pm_runtime_put(bus->parent); in sh_mdiobb_read_c45()
3081 pm_runtime_get_sync(bus->parent); in sh_mdiobb_write_c45()
3083 pm_runtime_put(bus->parent); in sh_mdiobb_write_c45()
3094 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
3095 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
3102 return -ENOMEM; in sh_mdio_init()
3105 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
3106 bitbang->set_gate = pd->set_mdio_gate; in sh_mdio_init()
3107 bitbang->ctrl.ops = &bb_ops; in sh_mdio_init()
3110 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
3111 if (!mdp->mii_bus) in sh_mdio_init()
3112 return -ENOMEM; in sh_mdio_init()
3114 /* Wrap accessors with Runtime PM-aware ops */ in sh_mdio_init()
3115 mdp->mii_bus->read = sh_mdiobb_read_c22; in sh_mdio_init()
3116 mdp->mii_bus->write = sh_mdiobb_write_c22; in sh_mdio_init()
3117 mdp->mii_bus->read_c45 = sh_mdiobb_read_c45; in sh_mdio_init()
3118 mdp->mii_bus->write_c45 = sh_mdiobb_write_c45; in sh_mdio_init()
3121 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
3122 mdp->mii_bus->parent = dev; in sh_mdio_init()
3123 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
3124 pdev->name, pdev->id); in sh_mdio_init()
3127 if (pd->phy_irq > 0) in sh_mdio_init()
3128 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
3130 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
3134 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); in sh_mdio_init()
3137 phydev->mac_managed_pm = true; in sh_mdio_init()
3138 put_device(&phydev->mdio.dev); in sh_mdio_init()
3145 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3204 struct device_node *np = dev->of_node; in sh_eth_parse_dt()
3216 pdata->phy_interface = interface; in sh_eth_parse_dt()
3218 of_get_mac_address(np, pdata->mac_addr); in sh_eth_parse_dt()
3220 pdata->no_ether_link = in sh_eth_parse_dt()
3221 of_property_read_bool(np, "renesas,no-ether-link"); in sh_eth_parse_dt()
3222 pdata->ether_link_active_low = in sh_eth_parse_dt()
3223 of_property_read_bool(np, "renesas,ether-link-active-low"); in sh_eth_parse_dt()
3229 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3230 { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3231 { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3232 { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3233 { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3234 { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3235 { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3236 { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3237 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3238 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3239 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3240 { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3241 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3242 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3256 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); in sh_eth_drv_probe()
3264 return -ENOMEM; in sh_eth_drv_probe()
3266 pm_runtime_enable(&pdev->dev); in sh_eth_drv_probe()
3267 pm_runtime_get_sync(&pdev->dev); in sh_eth_drv_probe()
3272 ndev->irq = ret; in sh_eth_drv_probe()
3274 SET_NETDEV_DEV(ndev, &pdev->dev); in sh_eth_drv_probe()
3277 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3278 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3279 mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in sh_eth_drv_probe()
3280 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3281 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3285 ndev->base_addr = res->start; in sh_eth_drv_probe()
3287 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3288 mdp->pdev = pdev; in sh_eth_drv_probe()
3290 if (pdev->dev.of_node) in sh_eth_drv_probe()
3291 pd = sh_eth_parse_dt(&pdev->dev); in sh_eth_drv_probe()
3293 dev_err(&pdev->dev, "no platform data\n"); in sh_eth_drv_probe()
3294 ret = -EINVAL; in sh_eth_drv_probe()
3299 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3300 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3301 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3302 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3306 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3308 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); in sh_eth_drv_probe()
3310 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3311 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3312 dev_err(&pdev->dev, "Unknown register type (%d)\n", in sh_eth_drv_probe()
3313 mdp->cd->register_type); in sh_eth_drv_probe()
3314 ret = -EINVAL; in sh_eth_drv_probe()
3317 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3323 ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); in sh_eth_drv_probe()
3324 ndev->min_mtu = ETH_MIN_MTU; in sh_eth_drv_probe()
3326 if (mdp->cd->rx_csum) { in sh_eth_drv_probe()
3327 ndev->features = NETIF_F_RXCSUM; in sh_eth_drv_probe()
3328 ndev->hw_features = NETIF_F_RXCSUM; in sh_eth_drv_probe()
3332 if (mdp->cd->tsu) in sh_eth_drv_probe()
3333 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; in sh_eth_drv_probe()
3335 ndev->netdev_ops = &sh_eth_netdev_ops; in sh_eth_drv_probe()
3336 ndev->ethtool_ops = &sh_eth_ethtool_ops; in sh_eth_drv_probe()
3337 ndev->watchdog_timeo = TX_TIMEOUT; in sh_eth_drv_probe()
3340 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3343 read_mac_address(ndev, pd->mac_addr); in sh_eth_drv_probe()
3344 if (!is_valid_ether_addr(ndev->dev_addr)) { in sh_eth_drv_probe()
3345 dev_warn(&pdev->dev, in sh_eth_drv_probe()
3350 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3351 int port = pdev->id < 0 ? 0 : pdev->id % 2; in sh_eth_drv_probe()
3356 dev_err(&pdev->dev, "no TSU resource\n"); in sh_eth_drv_probe()
3357 ret = -ENODEV; in sh_eth_drv_probe()
3364 !devm_request_mem_region(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3366 dev_name(&pdev->dev))) { in sh_eth_drv_probe()
3367 dev_err(&pdev->dev, "can't request TSU resource.\n"); in sh_eth_drv_probe()
3368 ret = -EBUSY; in sh_eth_drv_probe()
3372 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3374 if (!mdp->tsu_addr) { in sh_eth_drv_probe()
3375 dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); in sh_eth_drv_probe()
3376 ret = -ENOMEM; in sh_eth_drv_probe()
3379 mdp->port = port; in sh_eth_drv_probe()
3380 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in sh_eth_drv_probe()
3384 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3385 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3392 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3398 dev_err_probe(&pdev->dev, ret, "MDIO init failed\n"); in sh_eth_drv_probe()
3402 netif_napi_add(ndev, &mdp->napi, sh_eth_poll); in sh_eth_drv_probe()
3409 if (mdp->cd->magic) in sh_eth_drv_probe()
3410 device_set_wakeup_capable(&pdev->dev, 1); in sh_eth_drv_probe()
3414 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in sh_eth_drv_probe()
3416 pm_runtime_put(&pdev->dev); in sh_eth_drv_probe()
3422 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3429 pm_runtime_put(&pdev->dev); in sh_eth_drv_probe()
3430 pm_runtime_disable(&pdev->dev); in sh_eth_drv_probe()
3440 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3442 pm_runtime_disable(&pdev->dev); in sh_eth_drv_remove()
3453 synchronize_irq(ndev->irq); in sh_eth_wol_setup()
3454 napi_disable(&mdp->napi); in sh_eth_wol_setup()
3460 return enable_irq_wake(ndev->irq); in sh_eth_wol_setup()
3468 napi_enable(&mdp->napi); in sh_eth_wol_restore()
3483 return disable_irq_wake(ndev->irq); in sh_eth_wol_restore()
3497 if (mdp->wol_enabled) in sh_eth_suspend()
3514 if (mdp->wol_enabled) in sh_eth_resume()
3530 /* Runtime PM callback shared between ->runtime_suspend() in sh_eth_runtime_nop()
3531 * and ->runtime_resume(). Simply returns success. in sh_eth_runtime_nop()
3533 * This driver re-initializes all registers after in sh_eth_runtime_nop()
3550 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3551 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3552 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3553 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3554 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3555 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3556 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },