Lines Matching +full:sense +full:- +full:gain +full:- +full:div

1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
11 * David Bauer <mail@david-bauer.net>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
17 * Jo-Philipp Wich <jo@mein.io>
43 /* For our NAPI weight bigger does *NOT* mean better - it means more
44 * D-cache misses and lots more wasted cycles than we'll ever
45 * possibly gain from saving instructions.
246 { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
247 { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
248 { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
249 { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
250 { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
251 { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
263 { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
322 /* "Cold" fields - not used in the data path. */
349 /* Critical data related to the per-packet data path are clustered
350 * early in this structure to help improve the D-cache footprint.
364 /* From this point onwards we're not looking at per-packet fields. */
387 return (desc->ctrl & DESC_EMPTY) != 0; in ag71xx_desc_empty()
392 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; in ag71xx_ring_desc()
397 return fls(size - 1); in ag71xx_ring_size_order()
402 return ag->dcfg->type == type; in ag71xx_is()
407 iowrite32(value, ag->mac_base + reg); in ag71xx_wr()
409 (void)ioread32(ag->mac_base + reg); in ag71xx_wr()
414 return ioread32(ag->mac_base + reg); in ag71xx_rr()
421 r = ag->mac_base + reg; in ag71xx_sb()
431 r = ag->mac_base + reg; in ag71xx_cb()
451 return phylink_mii_ioctl(ag->phylink, ifr, cmd); in ag71xx_do_ioctl()
459 strscpy(info->driver, "ag71xx", sizeof(info->driver)); in ag71xx_get_drvinfo()
460 strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node), in ag71xx_get_drvinfo()
461 sizeof(info->bus_info)); in ag71xx_get_drvinfo()
469 return phylink_ethtool_ksettings_get(ag->phylink, kset); in ag71xx_get_link_ksettings()
477 return phylink_ethtool_ksettings_set(ag->phylink, kset); in ag71xx_set_link_ksettings()
484 return phylink_ethtool_nway_reset(ag->phylink); in ag71xx_ethtool_nway_reset()
492 phylink_ethtool_get_pauseparam(ag->phylink, pause); in ag71xx_ethtool_get_pauseparam()
500 return phylink_ethtool_set_pauseparam(ag->phylink, pause); in ag71xx_ethtool_set_pauseparam()
538 return -EOPNOTSUPP; in ag71xx_ethtool_get_sset_count()
559 struct net_device *ndev = ag->ndev; in ag71xx_mdio_wait_busy()
576 return -ETIMEDOUT; in ag71xx_mdio_wait_busy()
581 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_read()
601 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_read()
610 struct ag71xx *ag = bus->priv; in ag71xx_mdio_mii_write()
612 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", in ag71xx_mdio_mii_write()
634 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div) in ag71xx_mdio_get_divider() argument
640 ref_clock = clk_get_rate(ag->clk_mdio); in ag71xx_mdio_get_divider()
642 return -EINVAL; in ag71xx_mdio_get_divider()
660 *div = i; in ag71xx_mdio_get_divider()
665 return -ENOENT; in ag71xx_mdio_get_divider()
670 struct ag71xx *ag = bus->priv; in ag71xx_mdio_reset()
689 struct device *dev = &ag->pdev->dev; in ag71xx_mdio_probe()
690 struct net_device *ndev = ag->ndev; in ag71xx_mdio_probe()
696 np = dev->of_node; in ag71xx_mdio_probe()
698 ag->clk_mdio = devm_clk_get_enabled(dev, "mdio"); in ag71xx_mdio_probe()
699 if (IS_ERR(ag->clk_mdio)) { in ag71xx_mdio_probe()
701 return PTR_ERR(ag->clk_mdio); in ag71xx_mdio_probe()
706 return -ENOMEM; in ag71xx_mdio_probe()
714 mii_bus->name = "ag71xx_mdio"; in ag71xx_mdio_probe()
715 mii_bus->read = ag71xx_mdio_mii_read; in ag71xx_mdio_probe()
716 mii_bus->write = ag71xx_mdio_mii_write; in ag71xx_mdio_probe()
717 mii_bus->reset = ag71xx_mdio_reset; in ag71xx_mdio_probe()
718 mii_bus->priv = ag; in ag71xx_mdio_probe()
719 mii_bus->parent = dev; in ag71xx_mdio_probe()
720 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); in ag71xx_mdio_probe()
749 timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start); in ag71xx_check_dma_stuck()
753 if (!netif_carrier_ok(ag->ndev)) in ag71xx_check_dma_stuck()
771 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_tx_packets()
773 struct net_device *ndev = ag->ndev; in ag71xx_tx_packets()
777 ring_mask = BIT(ring->order) - 1; in ag71xx_tx_packets()
778 ring_size = BIT(ring->order); in ag71xx_tx_packets()
782 while (ring->dirty + n != ring->curr) { in ag71xx_tx_packets()
787 i = (ring->dirty + n) & ring_mask; in ag71xx_tx_packets()
789 skb = ring->buf[i].tx.skb; in ag71xx_tx_packets()
792 if (ag->dcfg->tx_hang_workaround && in ag71xx_tx_packets()
794 schedule_delayed_work(&ag->restart_work, in ag71xx_tx_packets()
802 desc->ctrl |= DESC_EMPTY; in ag71xx_tx_packets()
809 ring->buf[i].tx.skb = NULL; in ag71xx_tx_packets()
811 bytes_compl += ring->buf[i].tx.len; in ag71xx_tx_packets()
814 ring->dirty += n; in ag71xx_tx_packets()
818 n--; in ag71xx_tx_packets()
827 ag->ndev->stats.tx_bytes += bytes_compl; in ag71xx_tx_packets()
828 ag->ndev->stats.tx_packets += sent; in ag71xx_tx_packets()
830 netdev_completed_queue(ag->ndev, sent, bytes_compl); in ag71xx_tx_packets()
831 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) in ag71xx_tx_packets()
832 netif_wake_queue(ag->ndev); in ag71xx_tx_packets()
835 cancel_delayed_work(&ag->restart_work); in ag71xx_tx_packets()
842 struct net_device *ndev = ag->ndev; in ag71xx_dma_wait_stop()
861 struct net_device *ndev = ag->ndev; in ag71xx_dma_reset()
875 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
876 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); in ag71xx_dma_reset()
918 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); in ag71xx_hw_setup()
919 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); in ag71xx_hw_setup()
944 struct net_device *dev = ag->ndev; in ag71xx_fast_reset()
955 reset_control_assert(ag->mac_reset); in ag71xx_fast_reset()
957 reset_control_deassert(ag->mac_reset); in ag71xx_fast_reset()
962 ag->tx_ring.curr = 0; in ag71xx_fast_reset()
963 ag->tx_ring.dirty = 0; in ag71xx_fast_reset()
964 netdev_reset_queue(ag->ndev); in ag71xx_fast_reset()
968 ag71xx_max_frame_len(ag->ndev->mtu)); in ag71xx_fast_reset()
971 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_fast_reset()
974 ag71xx_hw_set_macaddr(ag, dev->dev_addr); in ag71xx_fast_reset()
985 netif_wake_queue(ag->ndev); in ag71xx_hw_start()
991 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_config()
999 if (ag->tx_ring.desc_split) { in ag71xx_mac_config()
1000 ag->fifodata[2] &= 0xffff; in ag71xx_mac_config()
1001 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; in ag71xx_mac_config()
1004 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); in ag71xx_mac_config()
1010 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_down()
1021 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); in ag71xx_mac_link_up()
1078 ag->phylink_config.dev = &ag->ndev->dev; in ag71xx_phylink_setup()
1079 ag->phylink_config.type = PHYLINK_NETDEV; in ag71xx_phylink_setup()
1080 ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | in ag71xx_phylink_setup()
1083 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || in ag71xx_phylink_setup()
1086 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1088 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1090 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || in ag71xx_phylink_setup()
1091 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || in ag71xx_phylink_setup()
1092 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1094 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1096 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) in ag71xx_phylink_setup()
1098 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1100 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) in ag71xx_phylink_setup()
1102 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1104 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || in ag71xx_phylink_setup()
1105 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) in ag71xx_phylink_setup()
1107 ag->phylink_config.supported_interfaces); in ag71xx_phylink_setup()
1109 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, in ag71xx_phylink_setup()
1110 ag->phy_if_mode, &ag71xx_phylink_mac_ops); in ag71xx_phylink_setup()
1114 ag->phylink = phylink; in ag71xx_phylink_setup()
1120 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_clean()
1121 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_tx_clean()
1123 struct net_device *ndev = ag->ndev; in ag71xx_ring_tx_clean()
1125 while (ring->curr != ring->dirty) { in ag71xx_ring_tx_clean()
1127 u32 i = ring->dirty & ring_mask; in ag71xx_ring_tx_clean()
1131 desc->ctrl = 0; in ag71xx_ring_tx_clean()
1132 ndev->stats.tx_errors++; in ag71xx_ring_tx_clean()
1135 if (ring->buf[i].tx.skb) { in ag71xx_ring_tx_clean()
1136 bytes_compl += ring->buf[i].tx.len; in ag71xx_ring_tx_clean()
1138 dev_kfree_skb_any(ring->buf[i].tx.skb); in ag71xx_ring_tx_clean()
1140 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_clean()
1141 ring->dirty++; in ag71xx_ring_tx_clean()
1152 struct ag71xx_ring *ring = &ag->tx_ring; in ag71xx_ring_tx_init()
1153 int ring_size = BIT(ring->order); in ag71xx_ring_tx_init()
1154 int ring_mask = ring_size - 1; in ag71xx_ring_tx_init()
1160 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_tx_init()
1163 desc->ctrl = DESC_EMPTY; in ag71xx_ring_tx_init()
1164 ring->buf[i].tx.skb = NULL; in ag71xx_ring_tx_init()
1170 ring->curr = 0; in ag71xx_ring_tx_init()
1171 ring->dirty = 0; in ag71xx_ring_tx_init()
1172 netdev_reset_queue(ag->ndev); in ag71xx_ring_tx_init()
1177 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_clean()
1178 int ring_size = BIT(ring->order); in ag71xx_ring_rx_clean()
1181 if (!ring->buf) in ag71xx_ring_rx_clean()
1185 if (ring->buf[i].rx.rx_buf) { in ag71xx_ring_rx_clean()
1186 dma_unmap_single(&ag->pdev->dev, in ag71xx_ring_rx_clean()
1187 ring->buf[i].rx.dma_addr, in ag71xx_ring_rx_clean()
1188 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_ring_rx_clean()
1189 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_ring_rx_clean()
1195 return ag->rx_buf_size + in ag71xx_buffer_size()
1203 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_fill_rx_buf()
1207 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); in ag71xx_fill_rx_buf()
1213 buf->rx.rx_buf = data; in ag71xx_fill_rx_buf()
1214 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, in ag71xx_fill_rx_buf()
1216 desc->data = (u32)buf->rx.dma_addr + offset; in ag71xx_fill_rx_buf()
1222 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_init()
1223 struct net_device *ndev = ag->ndev; in ag71xx_ring_rx_init()
1224 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_init()
1225 int ring_size = BIT(ring->order); in ag71xx_ring_rx_init()
1233 desc->next = (u32)(ring->descs_dma + in ag71xx_ring_rx_init()
1237 desc, desc->next); in ag71xx_ring_rx_init()
1243 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, in ag71xx_ring_rx_init()
1245 ret = -ENOMEM; in ag71xx_ring_rx_init()
1249 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_init()
1255 ring->curr = 0; in ag71xx_ring_rx_init()
1256 ring->dirty = 0; in ag71xx_ring_rx_init()
1263 struct ag71xx_ring *ring = &ag->rx_ring; in ag71xx_ring_rx_refill()
1264 int ring_mask = BIT(ring->order) - 1; in ag71xx_ring_rx_refill()
1265 int offset = ag->rx_buf_offset; in ag71xx_ring_rx_refill()
1269 for (; ring->curr - ring->dirty > 0; ring->dirty++) { in ag71xx_ring_rx_refill()
1273 i = ring->dirty & ring_mask; in ag71xx_ring_rx_refill()
1276 if (!ring->buf[i].rx.rx_buf && in ag71xx_ring_rx_refill()
1277 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, in ag71xx_ring_rx_refill()
1281 desc->ctrl = DESC_EMPTY; in ag71xx_ring_rx_refill()
1288 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", in ag71xx_ring_rx_refill()
1296 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_init()
1297 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_init()
1300 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_init()
1301 tx_size = BIT(tx->order); in ag71xx_rings_init()
1303 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); in ag71xx_rings_init()
1304 if (!tx->buf) in ag71xx_rings_init()
1305 return -ENOMEM; in ag71xx_rings_init()
1307 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, in ag71xx_rings_init()
1309 &tx->descs_dma, GFP_KERNEL); in ag71xx_rings_init()
1310 if (!tx->descs_cpu) { in ag71xx_rings_init()
1311 kfree(tx->buf); in ag71xx_rings_init()
1312 tx->buf = NULL; in ag71xx_rings_init()
1313 return -ENOMEM; in ag71xx_rings_init()
1316 rx->buf = &tx->buf[tx_size]; in ag71xx_rings_init()
1317 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1318 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; in ag71xx_rings_init()
1326 struct ag71xx_ring *tx = &ag->tx_ring; in ag71xx_rings_free()
1327 struct ag71xx_ring *rx = &ag->rx_ring; in ag71xx_rings_free()
1330 ring_size = BIT(tx->order) + BIT(rx->order); in ag71xx_rings_free()
1332 if (tx->descs_cpu) in ag71xx_rings_free()
1333 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, in ag71xx_rings_free()
1334 tx->descs_cpu, tx->descs_dma); in ag71xx_rings_free()
1336 kfree(tx->buf); in ag71xx_rings_free()
1338 tx->descs_cpu = NULL; in ag71xx_rings_free()
1339 rx->descs_cpu = NULL; in ag71xx_rings_free()
1340 tx->buf = NULL; in ag71xx_rings_free()
1341 rx->buf = NULL; in ag71xx_rings_free()
1350 netdev_reset_queue(ag->ndev); in ag71xx_rings_cleanup()
1360 reset_control_assert(ag->mac_reset); in ag71xx_hw_init()
1362 reset_control_deassert(ag->mac_reset); in ag71xx_hw_init()
1378 napi_enable(&ag->napi); in ag71xx_hw_enable()
1379 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); in ag71xx_hw_enable()
1380 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); in ag71xx_hw_enable()
1381 netif_start_queue(ag->ndev); in ag71xx_hw_enable()
1388 netif_stop_queue(ag->ndev); in ag71xx_hw_disable()
1393 napi_disable(&ag->napi); in ag71xx_hw_disable()
1394 del_timer_sync(&ag->oom_timer); in ag71xx_hw_disable()
1405 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0); in ag71xx_open()
1412 max_frame_len = ag71xx_max_frame_len(ndev->mtu); in ag71xx_open()
1413 ag->rx_buf_size = in ag71xx_open()
1418 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); in ag71xx_open()
1424 phylink_start(ag->phylink); in ag71xx_open()
1430 phylink_disconnect_phy(ag->phylink); in ag71xx_open()
1438 phylink_stop(ag->phylink); in ag71xx_stop()
1439 phylink_disconnect_phy(ag->phylink); in ag71xx_stop()
1450 ring_mask = BIT(ring->order) - 1; in ag71xx_fill_dma_desc()
1452 split = ring->desc_split; in ag71xx_fill_dma_desc()
1460 i = (ring->curr + ndesc) & ring_mask; in ag71xx_fill_dma_desc()
1464 return -1; in ag71xx_fill_dma_desc()
1473 cur_len -= 4; in ag71xx_fill_dma_desc()
1476 desc->data = addr; in ag71xx_fill_dma_desc()
1478 len -= cur_len; in ag71xx_fill_dma_desc()
1487 desc->ctrl = cur_len; in ag71xx_fill_dma_desc()
1503 ring = &ag->tx_ring; in ag71xx_hard_start_xmit()
1504 ring_mask = BIT(ring->order) - 1; in ag71xx_hard_start_xmit()
1505 ring_size = BIT(ring->order); in ag71xx_hard_start_xmit()
1507 if (skb->len <= 4) { in ag71xx_hard_start_xmit()
1512 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, in ag71xx_hard_start_xmit()
1515 i = ring->curr & ring_mask; in ag71xx_hard_start_xmit()
1520 skb->len & ag->dcfg->desc_pktlen_mask); in ag71xx_hard_start_xmit()
1524 i = (ring->curr + n - 1) & ring_mask; in ag71xx_hard_start_xmit()
1525 ring->buf[i].tx.len = skb->len; in ag71xx_hard_start_xmit()
1526 ring->buf[i].tx.skb = skb; in ag71xx_hard_start_xmit()
1528 netdev_sent_queue(ndev, skb->len); in ag71xx_hard_start_xmit()
1532 desc->ctrl &= ~DESC_EMPTY; in ag71xx_hard_start_xmit()
1533 ring->curr += n; in ag71xx_hard_start_xmit()
1539 if (ring->desc_split) in ag71xx_hard_start_xmit()
1542 if (ring->curr - ring->dirty >= ring_size - ring_min) { in ag71xx_hard_start_xmit()
1555 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); in ag71xx_hard_start_xmit()
1558 ndev->stats.tx_dropped++; in ag71xx_hard_start_xmit()
1568 napi_schedule(&ag->napi); in ag71xx_oom_timer_handler()
1577 schedule_delayed_work(&ag->restart_work, 1); in ag71xx_tx_timeout()
1589 phylink_stop(ag->phylink); in ag71xx_restart_work_func()
1590 phylink_start(ag->phylink); in ag71xx_restart_work_func()
1597 struct net_device *ndev = ag->ndev; in ag71xx_rx_packets()
1604 ring = &ag->rx_ring; in ag71xx_rx_packets()
1605 pktlen_mask = ag->dcfg->desc_pktlen_mask; in ag71xx_rx_packets()
1606 offset = ag->rx_buf_offset; in ag71xx_rx_packets()
1607 ring_mask = BIT(ring->order) - 1; in ag71xx_rx_packets()
1608 ring_size = BIT(ring->order); in ag71xx_rx_packets()
1611 limit, ring->curr, ring->dirty); in ag71xx_rx_packets()
1616 unsigned int i = ring->curr & ring_mask; in ag71xx_rx_packets()
1623 if ((ring->dirty + ring_size) == ring->curr) { in ag71xx_rx_packets()
1630 pktlen = desc->ctrl & pktlen_mask; in ag71xx_rx_packets()
1631 pktlen -= ETH_FCS_LEN; in ag71xx_rx_packets()
1633 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, in ag71xx_rx_packets()
1634 ag->rx_buf_size, DMA_FROM_DEVICE); in ag71xx_rx_packets()
1636 ndev->stats.rx_packets++; in ag71xx_rx_packets()
1637 ndev->stats.rx_bytes += pktlen; in ag71xx_rx_packets()
1639 skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); in ag71xx_rx_packets()
1641 ndev->stats.rx_errors++; in ag71xx_rx_packets()
1642 skb_free_frag(ring->buf[i].rx.rx_buf); in ag71xx_rx_packets()
1649 skb->dev = ndev; in ag71xx_rx_packets()
1650 skb->ip_summed = CHECKSUM_NONE; in ag71xx_rx_packets()
1651 list_add_tail(&skb->list, &rx_list); in ag71xx_rx_packets()
1654 ring->buf[i].rx.rx_buf = NULL; in ag71xx_rx_packets()
1657 ring->curr++; in ag71xx_rx_packets()
1663 skb->protocol = eth_type_trans(skb, ndev); in ag71xx_rx_packets()
1667 ring->curr, ring->dirty, done); in ag71xx_rx_packets()
1675 struct ag71xx_ring *rx_ring = &ag->rx_ring; in ag71xx_poll()
1676 int rx_ring_size = BIT(rx_ring->order); in ag71xx_poll()
1677 struct net_device *ndev = ag->ndev; in ag71xx_poll()
1686 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) in ag71xx_poll()
1692 ndev->stats.rx_fifo_errors++; in ag71xx_poll()
1724 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); in ag71xx_poll()
1755 napi_schedule(&ag->napi); in ag71xx_interrupt()
1765 WRITE_ONCE(ndev->mtu, new_mtu); in ag71xx_change_mtu()
1767 ag71xx_max_frame_len(ndev->mtu)); in ag71xx_change_mtu()
1789 struct device_node *np = pdev->dev.of_node; in ag71xx_probe()
1798 return -ENODEV; in ag71xx_probe()
1800 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); in ag71xx_probe()
1802 return -ENOMEM; in ag71xx_probe()
1806 return -EINVAL; in ag71xx_probe()
1808 dcfg = of_device_get_match_data(&pdev->dev); in ag71xx_probe()
1810 return -EINVAL; in ag71xx_probe()
1813 ag->mac_idx = -1; in ag71xx_probe()
1815 if (ar71xx_addr_ar7100[i] == res->start) in ag71xx_probe()
1816 ag->mac_idx = i; in ag71xx_probe()
1819 if (ag->mac_idx < 0) { in ag71xx_probe()
1821 return -EINVAL; in ag71xx_probe()
1824 clk_eth = devm_clk_get_enabled(&pdev->dev, "eth"); in ag71xx_probe()
1830 SET_NETDEV_DEV(ndev, &pdev->dev); in ag71xx_probe()
1832 ag->pdev = pdev; in ag71xx_probe()
1833 ag->ndev = ndev; in ag71xx_probe()
1834 ag->dcfg = dcfg; in ag71xx_probe()
1835 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); in ag71xx_probe()
1836 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); in ag71xx_probe()
1838 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); in ag71xx_probe()
1839 if (IS_ERR(ag->mac_reset)) { in ag71xx_probe()
1841 return PTR_ERR(ag->mac_reset); in ag71xx_probe()
1844 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); in ag71xx_probe()
1845 if (!ag->mac_base) in ag71xx_probe()
1846 return -ENOMEM; in ag71xx_probe()
1854 ndev->irq = platform_get_irq(pdev, 0); in ag71xx_probe()
1855 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, in ag71xx_probe()
1856 0x0, dev_name(&pdev->dev), ndev); in ag71xx_probe()
1859 ndev->irq); in ag71xx_probe()
1863 ndev->netdev_ops = &ag71xx_netdev_ops; in ag71xx_probe()
1864 ndev->ethtool_ops = &ag71xx_ethtool_ops; in ag71xx_probe()
1866 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); in ag71xx_probe()
1867 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); in ag71xx_probe()
1870 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); in ag71xx_probe()
1872 ndev->min_mtu = 68; in ag71xx_probe()
1873 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); in ag71xx_probe()
1875 ag->rx_buf_offset = NET_SKB_PAD; in ag71xx_probe()
1877 ag->rx_buf_offset += NET_IP_ALIGN; in ag71xx_probe()
1880 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; in ag71xx_probe()
1883 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); in ag71xx_probe()
1885 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, in ag71xx_probe()
1887 &ag->stop_desc_dma, GFP_KERNEL); in ag71xx_probe()
1888 if (!ag->stop_desc) in ag71xx_probe()
1889 return -ENOMEM; in ag71xx_probe()
1891 ag->stop_desc->data = 0; in ag71xx_probe()
1892 ag->stop_desc->ctrl = 0; in ag71xx_probe()
1893 ag->stop_desc->next = (u32)ag->stop_desc_dma; in ag71xx_probe()
1896 if (err == -EPROBE_DEFER) in ag71xx_probe()
1903 err = of_get_phy_mode(np, &ag->phy_if_mode); in ag71xx_probe()
1905 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); in ag71xx_probe()
1909 netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, in ag71xx_probe()
1928 err = devm_register_netdev(&pdev->dev, ndev); in ag71xx_probe()
1936 (unsigned long)ag->mac_base, ndev->irq, in ag71xx_probe()
1937 phy_modes(ag->phy_if_mode)); in ag71xx_probe()
1958 .desc_pktlen_mask = SZ_4K - 1,
1966 .desc_pktlen_mask = SZ_4K - 1,
1974 .desc_pktlen_mask = SZ_4K - 1,
1982 .desc_pktlen_mask = SZ_4K - 1,
1989 .max_frame_len = SZ_16K - 1,
1990 .desc_pktlen_mask = SZ_16K - 1,
1997 .max_frame_len = SZ_16K - 1,
1998 .desc_pktlen_mask = SZ_16K - 1,
2006 .desc_pktlen_mask = SZ_16K - 1,
2011 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2012 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2013 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2014 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2015 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2016 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2017 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2018 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2019 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2020 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2034 MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");