Lines Matching full:eth

292 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)  in mtk_w32()  argument
294 __raw_writel(val, eth->base + reg); in mtk_w32()
297 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
299 return __raw_readl(eth->base + reg); in mtk_r32()
302 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) in mtk_m32() argument
306 val = mtk_r32(eth, reg); in mtk_m32()
309 mtk_w32(eth, val, reg); in mtk_m32()
313 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
318 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
325 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
329 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
334 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
338 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
346 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
353 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
358 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
362 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
370 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
374 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
382 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
389 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
393 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
397 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
404 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
408 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
411 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
416 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
420 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
428 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
432 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
439 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
443 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
449 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
451 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
457 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
459 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
464 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
466 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
472 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
474 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
477 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
485 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
491 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
497 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
498 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000); in mtk_gmac0_rgmii_adjust()
500 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
504 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); in mtk_gmac0_rgmii_adjust()
507 static void mtk_setup_bridge_switch(struct mtk_eth *eth) in mtk_setup_bridge_switch() argument
510 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), in mtk_setup_bridge_switch()
514 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, in mtk_setup_bridge_switch()
525 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
530 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
533 return eth->sgmii_pcs[sid]; in mtk_mac_select_pcs()
544 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
549 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
559 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
560 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
568 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
574 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
622 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
625 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
636 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
638 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
645 dev_err(eth->dev, in mtk_mac_config()
651 if (mtk_is_netsys_v3_or_greater(eth) && in mtk_mac_config()
656 mtk_setup_bridge_switch(eth); in mtk_mac_config()
662 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
667 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
676 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
682 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
709 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
712 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
723 if (mtk_is_netsys_v1(eth)) in mtk_set_queue_speed()
775 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
826 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
833 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
835 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
844 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
845 if (!eth->mii_bus) { in mtk_mdio_init()
850 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
851 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
852 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
853 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
854 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
855 eth->mii_bus->priv = eth; in mtk_mdio_init()
856 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
858 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
862 dev_err(eth->dev, "MDIO clock frequency out of range"); in mtk_mdio_init()
871 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
872 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); in mtk_mdio_init()
876 if (!mtk_is_netsys_v3_or_greater(eth)) in mtk_mdio_init()
878 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); in mtk_mdio_init()
880 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); in mtk_mdio_init()
882 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
889 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
891 if (!eth->mii_bus) in mtk_mdio_cleanup()
894 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
897 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
902 spin_lock_irqsave(&eth->tx_irq_lock, flags); in mtk_tx_irq_disable()
903 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
904 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
905 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); in mtk_tx_irq_disable()
908 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
913 spin_lock_irqsave(&eth->tx_irq_lock, flags); in mtk_tx_irq_enable()
914 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
915 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
916 spin_unlock_irqrestore(&eth->tx_irq_lock, flags); in mtk_tx_irq_enable()
919 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
924 spin_lock_irqsave(&eth->rx_irq_lock, flags); in mtk_rx_irq_disable()
925 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
926 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
927 spin_unlock_irqrestore(&eth->rx_irq_lock, flags); in mtk_rx_irq_disable()
930 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
935 spin_lock_irqsave(&eth->rx_irq_lock, flags); in mtk_rx_irq_enable()
936 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
937 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
938 spin_unlock_irqrestore(&eth->rx_irq_lock, flags); in mtk_rx_irq_enable()
945 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
955 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
976 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
980 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
988 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
1011 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_stats_update_mac()
1041 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
1046 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
1048 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
1049 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
1050 spin_unlock(&eth->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1109 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1119 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_rx_get_desc()
1139 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1141 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1147 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) in mtk_init_fq_dma()
1148 eth->scratch_ring = eth->sram_base; in mtk_init_fq_dma()
1150 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1152 &eth->phy_scratch_ring, in mtk_init_fq_dma()
1155 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1158 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); in mtk_init_fq_dma()
1162 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1164 if (unlikely(!eth->scratch_head[j])) in mtk_init_fq_dma()
1167 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1168 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1171 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1177 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; in mtk_init_fq_dma()
1180 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1188 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_init_fq_dma()
1197 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1198 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1199 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1200 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1229 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1232 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1234 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1239 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1246 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1253 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1283 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1287 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1310 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1341 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1350 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_tx_set_dma_desc_v2()
1378 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) in mtk_tx_set_dma_desc_v2()
1396 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1398 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_set_dma_desc()
1419 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1420 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1437 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1439 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1446 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1479 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1482 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1495 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1527 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1533 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1543 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1556 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1565 eth->soc->tx.dma_max_len); in mtk_cal_txd_req()
1574 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1579 if (!eth->netdev[i]) in mtk_queue_stopped()
1581 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1588 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1593 if (!eth->netdev[i]) in mtk_wake_queue()
1595 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1602 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1603 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_start_xmit()
1612 spin_lock(&eth->page_lock); in mtk_start_xmit()
1614 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_start_xmit()
1617 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1620 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1622 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1629 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1647 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1652 spin_unlock(&eth->page_lock); in mtk_start_xmit()
1658 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1664 if (!eth->hwlro) in mtk_get_rx_ring()
1665 return &eth->rx_ring[0]; in mtk_get_rx_ring()
1670 ring = &eth->rx_ring[i]; in mtk_get_rx_ring()
1672 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_get_rx_ring()
1682 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1687 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1688 ring = &eth->rx_ring[0]; in mtk_update_rx_cpu_idx()
1689 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1692 ring = &eth->rx_ring[i]; in mtk_update_rx_cpu_idx()
1695 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1701 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1703 return mtk_is_netsys_v2_or_greater(eth); in mtk_page_pool_enabled()
1706 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1715 .dev = eth->dma_dev, in mtk_create_page_pool()
1722 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1728 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id, in mtk_create_page_pool()
1729 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1769 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1774 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_xdp_frame_map()
1779 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1781 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1790 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1800 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1806 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1810 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1811 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_xdp_submit_frame()
1824 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_xdp_submit_frame()
1831 spin_lock(&eth->page_lock); in mtk_xdp_submit_frame()
1835 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1845 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1893 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1898 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1902 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1909 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1921 spin_unlock(&eth->page_lock); in mtk_xdp_submit_frame()
1931 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1938 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1951 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1962 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1982 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
2015 struct mtk_eth *eth) in mtk_poll_rx() argument
2035 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
2040 rxd = ring->dma + idx * eth->soc->rx.desc_size; in mtk_poll_rx()
2043 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
2047 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_poll_rx()
2061 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
2067 !eth->netdev[mac])) in mtk_poll_rx()
2070 netdev = eth->netdev[mac]; in mtk_poll_rx()
2071 ppe_idx = eth->mac[mac]->ppe_idx; in mtk_poll_rx()
2073 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) in mtk_poll_rx()
2092 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
2101 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2130 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2131 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2133 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2140 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_poll_rx()
2143 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), in mtk_poll_rx()
2160 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_poll_rx()
2176 if (*rxdcsum & eth->soc->rx.dma_l4_valid) in mtk_poll_rx()
2185 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && in mtk_poll_rx()
2189 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2190 eth->dsa_meta[port]) in mtk_poll_rx()
2191 skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst); in mtk_poll_rx()
2195 mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); in mtk_poll_rx()
2204 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2209 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && in mtk_poll_rx()
2223 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2226 eth->rx_packets += done; in mtk_poll_rx()
2227 eth->rx_bytes += bytes; in mtk_poll_rx()
2228 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2230 net_dim(&eth->rx_dim, dim_sample); in mtk_poll_rx()
2246 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2254 eth->tx_packets++; in mtk_poll_tx_done()
2255 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2257 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2276 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2279 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2280 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx_qdma()
2287 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2300 eth->soc->tx.desc_size); in mtk_poll_tx_qdma()
2306 mtk_poll_tx_done(eth, state, tx_buf->mac_id, in mtk_poll_tx_qdma()
2311 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2321 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2326 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2329 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx_pdma()
2336 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2346 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2349 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2351 desc = ring->dma + cpu * eth->soc->tx.desc_size; in mtk_poll_tx_pdma()
2364 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2366 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_poll_tx()
2370 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2371 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2373 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2378 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2380 net_dim(&eth->tx_dim, dim_sample); in mtk_poll_tx()
2382 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2384 mtk_wake_queue(eth); in mtk_poll_tx()
2389 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2391 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2394 mtk_stats_update(eth); in mtk_handle_status_irq()
2395 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2402 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2403 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2406 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2407 mtk_handle_status_irq(eth); in mtk_napi_tx()
2408 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2409 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2411 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2412 dev_info(eth->dev, in mtk_napi_tx()
2414 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2415 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2421 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2425 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2432 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2433 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2436 mtk_handle_status_irq(eth); in mtk_napi_rx()
2441 mtk_w32(eth, eth->soc->rx.irq_done_mask, in mtk_napi_rx()
2443 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2446 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2447 dev_info(eth->dev, in mtk_napi_rx()
2449 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2450 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2456 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2457 eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2460 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_napi_rx()
2465 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2467 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2468 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_tx_alloc()
2485 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; in mtk_tx_alloc()
2486 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; in mtk_tx_alloc()
2488 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2503 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_tx_alloc()
2516 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2540 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2541 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2542 mtk_w32(eth, in mtk_tx_alloc()
2545 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2549 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2556 if (mtk_is_netsys_v1(eth)) in mtk_tx_alloc()
2558 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2562 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2563 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_tx_alloc()
2564 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2566 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2567 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2568 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2569 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2578 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2580 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2581 struct mtk_tx_ring *ring = &eth->tx_ring; in mtk_tx_clean()
2586 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2591 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2598 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2605 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2607 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2608 const struct mtk_soc_data *soc = eth->soc; in mtk_rx_alloc()
2613 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_rx_alloc()
2621 ring = &eth->rx_ring_qdma; in mtk_rx_alloc()
2623 ring = &eth->rx_ring[ring_no]; in mtk_rx_alloc()
2641 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2644 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2652 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || in mtk_rx_alloc()
2654 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2655 rx_dma_size * eth->soc->rx.desc_size, in mtk_rx_alloc()
2658 struct mtk_tx_ring *tx_ring = &eth->tx_ring; in mtk_rx_alloc()
2661 eth->soc->tx.desc_size * (ring_no + 1); in mtk_rx_alloc()
2663 eth->soc->tx.desc_size * (ring_no + 1); in mtk_rx_alloc()
2674 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_alloc()
2689 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2690 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2692 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2701 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_alloc()
2711 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_rx_alloc()
2734 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2736 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2738 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2741 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2743 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2745 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2748 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2753 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) in mtk_rx_clean() argument
2765 rxd = ring->dma + i * eth->soc->rx.desc_size; in mtk_rx_clean()
2769 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) in mtk_rx_clean()
2772 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), in mtk_rx_clean()
2781 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2782 ring->dma_size * eth->soc->rx.desc_size, in mtk_rx_clean()
2795 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2819 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2820 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2821 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2831 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2834 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2837 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2849 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2850 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2855 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2861 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2865 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2875 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2878 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2881 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2885 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2888 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2890 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2893 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2896 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2900 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2903 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2905 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2927 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2940 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2951 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2962 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2970 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2977 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
3061 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
3067 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
3068 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
3070 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
3072 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
3076 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
3081 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
3086 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
3089 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3093 err = mtk_init_fq_dma(eth); in mtk_dma_init()
3098 err = mtk_tx_alloc(eth); in mtk_dma_init()
3102 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3103 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
3108 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
3112 if (eth->hwlro) { in mtk_dma_init()
3114 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
3118 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3123 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3127 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3128 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3129 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3135 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3137 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3141 if (eth->netdev[i]) in mtk_dma_free()
3142 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
3143 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { in mtk_dma_free()
3144 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3146 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3147 eth->scratch_ring = NULL; in mtk_dma_free()
3148 eth->phy_scratch_ring = 0; in mtk_dma_free()
3150 mtk_tx_clean(eth); in mtk_dma_free()
3151 mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); in mtk_dma_free()
3152 mtk_rx_clean(eth, &eth->rx_ring_qdma, false); in mtk_dma_free()
3154 if (eth->hwlro) { in mtk_dma_free()
3155 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3157 mtk_rx_clean(eth, &eth->rx_ring[i], false); in mtk_dma_free()
3161 kfree(eth->scratch_head[i]); in mtk_dma_free()
3162 eth->scratch_head[i] = NULL; in mtk_dma_free()
3166 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3168 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3178 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3180 if (test_bit(MTK_RESETTING, &eth->state)) in mtk_tx_timeout()
3183 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3186 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3187 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3189 schedule_work(&eth->pending_work); in mtk_tx_timeout()
3194 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3196 eth->rx_events++; in mtk_handle_irq_rx()
3197 if (likely(napi_schedule_prep(&eth->rx_napi))) { in mtk_handle_irq_rx()
3198 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_handle_irq_rx()
3199 __napi_schedule(&eth->rx_napi); in mtk_handle_irq_rx()
3207 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3209 eth->tx_events++; in mtk_handle_irq_tx()
3210 if (likely(napi_schedule_prep(&eth->tx_napi))) { in mtk_handle_irq_tx()
3211 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3212 __napi_schedule(&eth->tx_napi); in mtk_handle_irq_tx()
3220 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3221 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3223 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3224 eth->soc->rx.irq_done_mask) { in mtk_handle_irq()
3225 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3226 eth->soc->rx.irq_done_mask) in mtk_handle_irq()
3229 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3230 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3241 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3243 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3244 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3245 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3246 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3247 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); in mtk_poll_controller()
3251 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3254 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3257 err = mtk_dma_init(eth); in mtk_start_dma()
3259 mtk_dma_free(eth); in mtk_start_dma()
3263 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3264 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3269 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_start_dma()
3275 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3277 mtk_w32(eth, in mtk_start_dma()
3282 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3290 static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) in mtk_gdm_config() argument
3294 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3297 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); in mtk_gdm_config()
3307 if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) in mtk_gdm_config()
3310 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); in mtk_gdm_config()
3327 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3361 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3369 struct mtk_eth *eth = mac->hw; in mtk_open() local
3373 ppe_num = eth->soc->ppe_num; in mtk_open()
3383 if (!refcount_read(&eth->dma_refcnt)) { in mtk_open()
3384 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3388 err = mtk_start_dma(eth); in mtk_open()
3394 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3395 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3398 if (!eth->netdev[i]) in mtk_open()
3401 target_mac = netdev_priv(eth->netdev[i]); in mtk_open()
3415 mtk_gdm_config(eth, target_mac->id, gdm_config); in mtk_open()
3418 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_open()
3419 mtk_w32(eth, 0, MTK_RST_GL); in mtk_open()
3421 napi_enable(&eth->tx_napi); in mtk_open()
3422 napi_enable(&eth->rx_napi); in mtk_open()
3423 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3424 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); in mtk_open()
3425 refcount_set(&eth->dma_refcnt, 1); in mtk_open()
3427 refcount_inc(&eth->dma_refcnt); in mtk_open()
3433 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_open()
3436 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3437 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3438 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3449 eth->dsa_meta[i] = md_dst; in mtk_open()
3455 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3458 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3460 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); in mtk_open()
3466 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3472 spin_lock_bh(&eth->page_lock); in mtk_stop_dma()
3473 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3474 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3476 spin_unlock_bh(&eth->page_lock); in mtk_stop_dma()
3480 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3492 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3502 if (!refcount_dec_and_test(&eth->dma_refcnt)) in mtk_stop()
3506 mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); in mtk_stop()
3508 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3509 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); in mtk_stop()
3510 napi_disable(&eth->tx_napi); in mtk_stop()
3511 napi_disable(&eth->rx_napi); in mtk_stop()
3513 cancel_work_sync(&eth->rx_dim.work); in mtk_stop()
3514 cancel_work_sync(&eth->tx_dim.work); in mtk_stop()
3516 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3517 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3518 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3520 mtk_dma_free(eth); in mtk_stop()
3522 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3523 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3532 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3536 if (eth->hwlro) { in mtk_xdp_setup()
3546 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3550 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3570 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3572 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3577 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3583 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3588 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3591 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3596 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3605 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3613 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3614 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3618 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3620 spin_lock_bh(&eth->dim_lock); in mtk_dim_rx()
3622 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3632 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3633 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3634 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3636 spin_unlock_bh(&eth->dim_lock); in mtk_dim_rx()
3644 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3645 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3649 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3651 spin_lock_bh(&eth->dim_lock); in mtk_dim_tx()
3653 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3663 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3664 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3665 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3667 spin_unlock_bh(&eth->dim_lock); in mtk_dim_tx()
3674 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3677 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3696 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3700 if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3701 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3703 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_reset()
3706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3709 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_reset()
3713 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_reset()
3716 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3722 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3724 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_hw_reset()
3725 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3727 else if (mtk_is_netsys_v2_or_greater(eth)) in mtk_hw_reset()
3728 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3732 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3736 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3740 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3744 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3746 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3748 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3749 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3753 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_warm_reset()
3755 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3757 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_hw_warm_reset()
3761 } else if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_hw_warm_reset()
3763 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3769 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3772 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3774 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3778 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3781 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3783 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3787 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3789 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3797 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3801 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3803 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3806 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3809 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3810 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3811 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3813 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3814 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3815 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3822 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3823 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3825 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3826 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3827 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3828 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3829 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3830 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3835 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3836 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3843 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3844 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3845 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3846 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3849 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3850 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3856 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3857 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3858 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3860 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3868 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3871 if (test_bit(MTK_RESETTING, &eth->state)) in mtk_hw_reset_monitor_work()
3875 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3876 schedule_work(&eth->pending_work); in mtk_hw_reset_monitor_work()
3879 schedule_delayed_work(&eth->reset.monitor_work, in mtk_hw_reset_monitor_work()
3883 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3887 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3890 if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state)) in mtk_hw_init()
3894 pm_runtime_enable(eth->dev); in mtk_hw_init()
3895 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3897 ret = mtk_clk_enable(eth); in mtk_hw_init()
3902 if (eth->ethsys) in mtk_hw_init()
3903 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3904 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3906 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3907 ret = device_reset(eth->dev); in mtk_hw_init()
3909 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3914 mtk_dim_rx(&eth->rx_dim.work); in mtk_hw_init()
3915 mtk_dim_tx(&eth->tx_dim.work); in mtk_hw_init()
3918 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3919 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3927 mtk_hw_warm_reset(eth); in mtk_hw_init()
3929 mtk_hw_reset(eth); in mtk_hw_init()
3931 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3933 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3934 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3937 if (eth->pctl) { in mtk_hw_init()
3939 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3942 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3945 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3953 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3958 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3966 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3967 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3968 if (mtk_is_netsys_v1(eth)) { in mtk_hw_init()
3969 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3970 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3972 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3976 mtk_dim_rx(&eth->rx_dim.work); in mtk_hw_init()
3977 mtk_dim_tx(&eth->tx_dim.work); in mtk_hw_init()
3980 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3981 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3984 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3985 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3986 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3987 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3988 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3990 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_hw_init()
3992 mtk_w32(eth, 0x00000302, PSE_DROP_CFG); in mtk_hw_init()
3995 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); in mtk_hw_init()
3996 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); in mtk_hw_init()
3999 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); in mtk_hw_init()
4006 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); in mtk_hw_init()
4007 } else if (!mtk_is_netsys_v1(eth)) { in mtk_hw_init()
4009 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
4012 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); in mtk_hw_init()
4015 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
4018 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
4019 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
4020 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
4021 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
4022 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
4023 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
4024 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
4025 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
4028 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
4029 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
4030 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
4031 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
4032 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
4033 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
4034 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
4035 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
4038 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
4039 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
4040 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
4041 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
4042 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
4043 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
4050 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
4051 pm_runtime_disable(eth->dev); in mtk_hw_init()
4057 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
4059 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state)) in mtk_hw_deinit()
4062 mtk_clk_disable(eth); in mtk_hw_deinit()
4064 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
4065 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
4073 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
4076 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
4077 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
4084 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
4086 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
4114 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
4121 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_prepare_for_reset()
4123 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_prepare_for_reset()
4124 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
4126 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_prepare_for_reset()
4128 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_prepare_for_reset()
4132 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
4133 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
4136 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
4140 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
4141 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
4147 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
4153 set_bit(MTK_RESETTING, &eth->state); in mtk_pending_work()
4155 mtk_prepare_for_reset(eth); in mtk_pending_work()
4160 mtk_prepare_for_reset(eth); in mtk_pending_work()
4164 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
4167 mtk_stop(eth->netdev[i]); in mtk_pending_work()
4173 if (eth->dev->pins) in mtk_pending_work()
4174 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
4175 eth->dev->pins->default_state); in mtk_pending_work()
4176 mtk_hw_init(eth, true); in mtk_pending_work()
4180 if (!eth->netdev[i] || !test_bit(i, &restart)) in mtk_pending_work()
4183 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4184 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4186 dev_close(eth->netdev[i]); in mtk_pending_work()
4192 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); in mtk_pending_work()
4194 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); in mtk_pending_work()
4195 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4197 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) in mtk_pending_work()
4200 mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); in mtk_pending_work()
4203 clear_bit(MTK_RESETTING, &eth->state); in mtk_pending_work()
4210 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4215 if (!eth->netdev[i]) in mtk_free_dev()
4217 free_netdev(eth->netdev[i]); in mtk_free_dev()
4220 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4221 if (!eth->dsa_meta[i]) in mtk_free_dev()
4223 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4229 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4235 if (!eth->netdev[i]) in mtk_unreg_dev()
4237 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4238 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4240 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4246 static void mtk_sgmii_destroy(struct mtk_eth *eth) in mtk_sgmii_destroy() argument
4251 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]); in mtk_sgmii_destroy()
4254 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4256 mtk_sgmii_destroy(eth); in mtk_cleanup()
4257 mtk_unreg_dev(eth); in mtk_cleanup()
4258 mtk_free_dev(eth); in mtk_cleanup()
4259 cancel_work_sync(&eth->pending_work); in mtk_cleanup()
4260 cancel_delayed_work_sync(&eth->reset.monitor_work); in mtk_cleanup()
4361 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4366 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4367 struct mtk_rx_ring *ring = &eth->rx_ring[i]; in mtk_ethtool_pp_stats()
4535 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4546 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4552 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4556 if (eth->netdev[id]) { in mtk_add_mac()
4557 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4561 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4564 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4565 if (!eth->netdev[id]) { in mtk_add_mac()
4566 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4569 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4570 eth->mac[id] = mac; in mtk_add_mac()
4572 mac->hw = eth; in mtk_add_mac()
4575 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); in mtk_add_mac()
4581 eth_hw_addr_random(eth->netdev[id]); in mtk_add_mac()
4582 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_add_mac()
4583 eth->netdev[id]->dev_addr); in mtk_add_mac()
4589 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4593 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4600 if (mtk_is_netsys_v3_or_greater(eth)) in mtk_add_mac()
4608 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4616 mac->phylink_config.dev = &eth->netdev[id]->dev; in mtk_add_mac()
4642 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mtk_add_mac()
4678 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4679 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4680 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4681 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4683 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4684 if (eth->hwlro) in mtk_add_mac()
4685 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4687 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4689 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4690 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4692 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4693 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4695 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4696 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4698 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4700 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4705 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4706 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4714 free_netdev(eth->netdev[id]); in mtk_add_mac()
4718 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4727 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4737 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4747 static int mtk_sgmii_init(struct mtk_eth *eth) in mtk_sgmii_init() argument
4755 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i); in mtk_sgmii_init()
4769 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap, in mtk_sgmii_init()
4770 eth->soc->ana_rgc3, in mtk_sgmii_init()
4781 struct mtk_eth *eth; in mtk_probe() local
4784 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4785 if (!eth) in mtk_probe()
4788 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4790 eth->dev = &pdev->dev; in mtk_probe()
4791 eth->dma_dev = &pdev->dev; in mtk_probe()
4792 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4793 if (IS_ERR(eth->base)) in mtk_probe()
4794 return PTR_ERR(eth->base); in mtk_probe()
4796 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4797 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4799 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4804 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4805 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); in mtk_probe()
4806 if (IS_ERR(eth->sram_base)) in mtk_probe()
4807 return PTR_ERR(eth->sram_base); in mtk_probe()
4809 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4813 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { in mtk_probe()
4824 spin_lock_init(&eth->page_lock); in mtk_probe()
4825 spin_lock_init(&eth->tx_irq_lock); in mtk_probe()
4826 spin_lock_init(&eth->rx_irq_lock); in mtk_probe()
4827 spin_lock_init(&eth->dim_lock); in mtk_probe()
4829 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4830 INIT_WORK(&eth->rx_dim.work, mtk_dim_rx); in mtk_probe()
4831 INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4833 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4834 INIT_WORK(&eth->tx_dim.work, mtk_dim_tx); in mtk_probe()
4836 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4837 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4839 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4841 return PTR_ERR(eth->ethsys); in mtk_probe()
4845 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4846 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4848 if (IS_ERR(eth->infra)) { in mtk_probe()
4850 return PTR_ERR(eth->infra); in mtk_probe()
4864 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4865 err = mtk_sgmii_init(eth); in mtk_probe()
4871 if (eth->soc->required_pctl) { in mtk_probe()
4872 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4874 if (IS_ERR(eth->pctl)) { in mtk_probe()
4876 err = PTR_ERR(eth->pctl); in mtk_probe()
4881 if (mtk_is_netsys_v2_or_greater(eth)) { in mtk_probe()
4887 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { in mtk_probe()
4888 if (mtk_is_netsys_v3_or_greater(eth)) { in mtk_probe()
4894 eth->phy_scratch_ring = res_sram->start; in mtk_probe()
4896 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; in mtk_probe()
4901 if (eth->soc->offload_version) { in mtk_probe()
4907 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4915 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4917 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4923 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4924 eth->irq[i] = eth->irq[0]; in mtk_probe()
4926 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4927 if (eth->irq[i] < 0) { in mtk_probe()
4933 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4934 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4936 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4937 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4941 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4947 eth->clks[i] = NULL; in mtk_probe()
4951 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4952 INIT_WORK(&eth->pending_work, mtk_pending_work); in mtk_probe()
4954 err = mtk_hw_init(eth, false); in mtk_probe()
4958 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4962 "mediatek,eth-mac")) in mtk_probe()
4968 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4975 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4976 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4978 dev_name(eth->dev), eth); in mtk_probe()
4980 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4982 dev_name(eth->dev), eth); in mtk_probe()
4986 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4988 dev_name(eth->dev), eth); in mtk_probe()
4994 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4995 err = mtk_mdio_init(eth); in mtk_probe()
5000 if (eth->soc->offload_version) { in mtk_probe()
5001 u8 ppe_num = eth->soc->ppe_num; in mtk_probe()
5003 ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); in mtk_probe()
5005 u32 ppe_addr = eth->soc->reg_map->ppe_base; in mtk_probe()
5008 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); in mtk_probe()
5010 if (!eth->ppe[i]) { in mtk_probe()
5014 err = mtk_eth_offload_init(eth, i); in mtk_probe()
5022 if (!eth->netdev[i]) in mtk_probe()
5025 err = register_netdev(eth->netdev[i]); in mtk_probe()
5027 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
5030 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
5032 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
5038 eth->dummy_dev = alloc_netdev_dummy(0); in mtk_probe()
5039 if (!eth->dummy_dev) { in mtk_probe()
5041 dev_err(eth->dev, "failed to allocated dummy device\n"); in mtk_probe()
5044 netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx); in mtk_probe()
5045 netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx); in mtk_probe()
5047 platform_set_drvdata(pdev, eth); in mtk_probe()
5048 schedule_delayed_work(&eth->reset.monitor_work, in mtk_probe()
5054 mtk_unreg_dev(eth); in mtk_probe()
5056 mtk_ppe_deinit(eth); in mtk_probe()
5057 mtk_mdio_cleanup(eth); in mtk_probe()
5059 mtk_free_dev(eth); in mtk_probe()
5061 mtk_hw_deinit(eth); in mtk_probe()
5065 mtk_sgmii_destroy(eth); in mtk_probe()
5072 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
5078 if (!eth->netdev[i]) in mtk_remove()
5080 mtk_stop(eth->netdev[i]); in mtk_remove()
5081 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
5086 mtk_hw_deinit(eth); in mtk_remove()
5088 netif_napi_del(&eth->tx_napi); in mtk_remove()
5089 netif_napi_del(&eth->rx_napi); in mtk_remove()
5090 mtk_cleanup(eth); in mtk_remove()
5091 free_netdev(eth->dummy_dev); in mtk_remove()
5092 mtk_mdio_cleanup(eth); in mtk_remove()
5346 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5347 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5348 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5349 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5350 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5351 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5352 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5353 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5354 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },