Lines Matching +full:rx +full:- +full:queues +full:- +full:config

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2004-2006 Atmel Corporation
10 #include <linux/clk-provider.h>
25 #include <linux/dma-mapping.h>
40 #include <linux/firmware/xlnx-zynqmp.h>
58 * (bp)->rx_ring_size)
64 * (bp)->tx_ring_size)
67 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
78 …MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN -
94 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
133 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
158 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
184 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
190 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
191 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
192 return &queue->tx_ring[index]; in macb_tx_desc()
198 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
205 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
206 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
208 return queue->tx_ring_dma + offset; in macb_tx_dma()
213 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
218 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
219 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
220 return &queue->rx_ring[index]; in macb_rx_desc()
225 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
226 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
232 return __raw_readl(bp->regs + offset); in hw_readl_native()
237 __raw_writel(value, bp->regs + offset); in hw_writel_native()
242 return readl_relaxed(bp->regs + offset); in hw_readl()
247 writel_relaxed(value, bp->regs + offset); in hw_writel()
284 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
286 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
323 eth_hw_addr_set(bp->dev, addr); in macb_get_hwaddr()
328 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
329 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
342 struct macb *bp = bus->priv; in macb_mdio_read_c22()
345 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_read_c22()
366 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read_c22()
367 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read_c22()
375 struct macb *bp = bus->priv; in macb_mdio_read_c45()
378 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read_c45()
380 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read_c45()
412 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read_c45()
413 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read_c45()
421 struct macb *bp = bus->priv; in macb_mdio_write_c22()
424 status = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_mdio_write_c22()
444 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write_c22()
445 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write_c22()
454 struct macb *bp = bus->priv; in macb_mdio_write_c45()
457 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write_c45()
459 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write_c45()
490 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write_c45()
491 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write_c45()
501 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
502 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
504 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
506 upper_32_bits(queue->rx_ring_dma)); in macb_init_buffers()
508 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
510 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
512 upper_32_bits(queue->tx_ring_dma)); in macb_init_buffers()
518 * macb_set_tx_clk() - Set a clock to a new frequency
526 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
530 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
547 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
554 ferr = abs(rate_rounded - rate); in macb_set_tx_clk()
557 netdev_warn(bp->dev, in macb_set_tx_clk()
561 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
562 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
570 u32 config; in macb_usx_pcs_link_up() local
572 config = gem_readl(bp, USX_CONTROL); in macb_usx_pcs_link_up()
573 config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); in macb_usx_pcs_link_up()
574 config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); in macb_usx_pcs_link_up()
575 config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); in macb_usx_pcs_link_up()
576 config |= GEM_BIT(TX_EN); in macb_usx_pcs_link_up()
577 gem_writel(bp, USX_CONTROL, config); in macb_usx_pcs_link_up()
586 state->speed = SPEED_10000; in macb_usx_pcs_get_state()
587 state->duplex = 1; in macb_usx_pcs_get_state()
588 state->an_complete = 1; in macb_usx_pcs_get_state()
591 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); in macb_usx_pcs_get_state()
594 state->pause = MLO_PAUSE_RX; in macb_usx_pcs_get_state()
614 state->link = 0; in macb_pcs_get_state()
643 static void macb_mac_config(struct phylink_config *config, unsigned int mode, in macb_mac_config() argument
646 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_config()
652 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
657 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
658 if (state->interface == PHY_INTERFACE_MODE_RMII) in macb_mac_config()
664 if (state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
666 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { in macb_mac_config()
669 } else if (bp->caps & MACB_CAPS_MIIONRGMII && in macb_mac_config()
670 bp->phy_interface == PHY_INTERFACE_MODE_MII) { in macb_mac_config()
686 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
698 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
701 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, in macb_mac_link_down() argument
704 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_down()
710 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
711 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
713 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
715 /* Disable Rx and Tx */ in macb_mac_link_down()
722 static void macb_mac_link_up(struct phylink_config *config, in macb_mac_link_up() argument
728 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_link_up()
735 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
747 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
762 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
765 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
767 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
772 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
776 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
778 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_up()
781 /* Enable Rx and Tx; Enable PTP unicast */ in macb_mac_link_up()
791 static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config, in macb_mac_select_pcs() argument
794 struct net_device *ndev = to_net_dev(config->dev); in macb_mac_select_pcs()
798 return &bp->phylink_usx_pcs; in macb_mac_select_pcs()
800 return &bp->phylink_sgmii_pcs; in macb_mac_select_pcs()
814 dn = of_parse_phandle(dn, "phy-handle", 0); in macb_phy_handle_exists()
821 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
822 struct net_device *dev = bp->dev; in macb_phylink_connect()
827 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
830 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
833 return -ENXIO; in macb_phylink_connect()
837 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
845 phylink_start(bp->phylink); in macb_phylink_connect()
850 static void macb_get_pcs_fixed_state(struct phylink_config *config, in macb_get_pcs_fixed_state() argument
853 struct net_device *ndev = to_net_dev(config->dev); in macb_get_pcs_fixed_state()
856 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
864 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; in macb_mii_probe()
865 bp->phylink_sgmii_pcs.neg_mode = true; in macb_mii_probe()
866 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mii_probe()
867 bp->phylink_usx_pcs.neg_mode = true; in macb_mii_probe()
869 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
870 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
871 bp->phylink_config.mac_managed_pm = true; in macb_mii_probe()
873 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
874 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
875 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
878 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | in macb_mii_probe()
882 bp->phylink_config.supported_interfaces); in macb_mii_probe()
884 bp->phylink_config.supported_interfaces); in macb_mii_probe()
887 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { in macb_mii_probe()
888 bp->phylink_config.mac_capabilities |= MAC_1000FD; in macb_mii_probe()
889 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_mii_probe()
890 bp->phylink_config.mac_capabilities |= MAC_1000HD; in macb_mii_probe()
893 bp->phylink_config.supported_interfaces); in macb_mii_probe()
894 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces); in macb_mii_probe()
896 if (bp->caps & MACB_CAPS_PCS) in macb_mii_probe()
898 bp->phylink_config.supported_interfaces); in macb_mii_probe()
900 if (bp->caps & MACB_CAPS_HIGH_SPEED) { in macb_mii_probe()
902 bp->phylink_config.supported_interfaces); in macb_mii_probe()
903 bp->phylink_config.mac_capabilities |= MAC_10000FD; in macb_mii_probe()
907 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
908 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
909 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
911 PTR_ERR(bp->phylink)); in macb_mii_probe()
912 return PTR_ERR(bp->phylink); in macb_mii_probe()
920 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
927 int ret = of_mdiobus_register(bp->mii_bus, child); in macb_mdiobus_register()
945 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
948 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
953 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mii_init()
954 int err = -ENXIO; in macb_mii_init()
956 /* With fixed-link, we don't need to register the MDIO bus, in macb_mii_init()
964 return macb_mii_probe(bp->dev); in macb_mii_init()
969 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
970 if (!bp->mii_bus) { in macb_mii_init()
971 err = -ENOMEM; in macb_mii_init()
975 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
976 bp->mii_bus->read = &macb_mdio_read_c22; in macb_mii_init()
977 bp->mii_bus->write = &macb_mdio_write_c22; in macb_mii_init()
978 bp->mii_bus->read_c45 = &macb_mdio_read_c45; in macb_mii_init()
979 bp->mii_bus->write_c45 = &macb_mdio_write_c45; in macb_mii_init()
980 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
981 bp->pdev->name, bp->pdev->id); in macb_mii_init()
982 bp->mii_bus->priv = bp; in macb_mii_init()
983 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
985 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
991 err = macb_mii_probe(bp->dev); in macb_mii_init()
998 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
1000 mdiobus_free(bp->mii_bus); in macb_mii_init()
1007 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
1008 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
1011 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); in macb_update_stats()
1014 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
1034 return -ETIMEDOUT; in macb_halt_tx()
1039 if (tx_skb->mapping) { in macb_tx_unmap()
1040 if (tx_skb->mapped_as_page) in macb_tx_unmap()
1041 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1042 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1044 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1045 tx_skb->size, DMA_TO_DEVICE); in macb_tx_unmap()
1046 tx_skb->mapping = 0; in macb_tx_unmap()
1049 if (tx_skb->skb) { in macb_tx_unmap()
1050 napi_consume_skb(tx_skb->skb, budget); in macb_tx_unmap()
1051 tx_skb->skb = NULL; in macb_tx_unmap()
1060 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
1062 desc_64->addrh = upper_32_bits(addr); in macb_set_addr()
1063 /* The low bits of RX address contain the RX_USED bit, clearing in macb_set_addr()
1064 * of which allows packet RX. Make sure the high bits are also in macb_set_addr()
1070 desc->addr = lower_32_bits(addr); in macb_set_addr()
1079 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1081 addr = ((u64)(desc_64->addrh) << 32); in macb_get_addr()
1084 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); in macb_get_addr()
1086 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_get_addr()
1097 struct macb *bp = queue->bp; in macb_tx_error_task()
1104 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1105 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
1106 queue->tx_tail, queue->tx_head); in macb_tx_error_task()
1114 napi_disable(&queue->napi_tx); in macb_tx_error_task()
1115 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1118 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1125 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1133 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { in macb_tx_error_task()
1137 ctrl = desc->ctrl; in macb_tx_error_task()
1139 skb = tx_skb->skb; in macb_tx_error_task()
1147 skb = tx_skb->skb; in macb_tx_error_task()
1154 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1156 skb->data); in macb_tx_error_task()
1157 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1158 queue->stats.tx_packets++; in macb_tx_error_task()
1159 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1160 queue->stats.tx_bytes += skb->len; in macb_tx_error_task()
1163 /* "Buffers exhausted mid-frame" errors may only happen in macb_tx_error_task()
1168 netdev_err(bp->dev, in macb_tx_error_task()
1169 "BUG: TX buffers exhausted mid-frame\n"); in macb_tx_error_task()
1171 desc->ctrl = ctrl | MACB_BIT(TX_USED); in macb_tx_error_task()
1180 desc->ctrl = MACB_BIT(TX_USED); in macb_tx_error_task()
1186 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
1188 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
1189 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); in macb_tx_error_task()
1192 queue->tx_head = 0; in macb_tx_error_task()
1193 queue->tx_tail = 0; in macb_tx_error_task()
1203 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1206 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1207 napi_enable(&queue->napi_tx); in macb_tx_error_task()
1217 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in ptp_one_step_sync()
1229 if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) in ptp_one_step_sync()
1242 struct macb *bp = queue->bp; in macb_tx_complete()
1243 u16 queue_index = queue - bp->queues; in macb_tx_complete()
1248 spin_lock(&queue->tx_ptr_lock); in macb_tx_complete()
1249 head = queue->tx_head; in macb_tx_complete()
1250 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { in macb_tx_complete()
1261 ctrl = desc->ctrl; in macb_tx_complete()
1272 skb = tx_skb->skb; in macb_tx_complete()
1276 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in macb_tx_complete()
1280 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_complete()
1282 skb->data); in macb_tx_complete()
1283 bp->dev->stats.tx_packets++; in macb_tx_complete()
1284 queue->stats.tx_packets++; in macb_tx_complete()
1285 bp->dev->stats.tx_bytes += skb->len; in macb_tx_complete()
1286 queue->stats.tx_bytes += skb->len; in macb_tx_complete()
1302 queue->tx_tail = tail; in macb_tx_complete()
1303 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_complete()
1304 CIRC_CNT(queue->tx_head, queue->tx_tail, in macb_tx_complete()
1305 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_complete()
1306 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_complete()
1307 spin_unlock(&queue->tx_ptr_lock); in macb_tx_complete()
1317 struct macb *bp = queue->bp; in gem_rx_refill()
1320 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, in gem_rx_refill()
1321 bp->rx_ring_size) > 0) { in gem_rx_refill()
1322 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1329 if (!queue->rx_skbuff[entry]) { in gem_rx_refill()
1331 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1333 netdev_err(bp->dev, in gem_rx_refill()
1339 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1340 bp->rx_buffer_size, in gem_rx_refill()
1342 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1347 queue->rx_skbuff[entry] = skb; in gem_rx_refill()
1349 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1351 desc->ctrl = 0; in gem_rx_refill()
1361 desc->ctrl = 0; in gem_rx_refill()
1363 desc->addr &= ~MACB_BIT(RX_USED); in gem_rx_refill()
1365 queue->rx_prepared_head++; in gem_rx_refill()
1371 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1372 queue, queue->rx_prepared_head, queue->rx_tail); in gem_rx_refill()
1384 desc->addr &= ~MACB_BIT(RX_USED); in discard_partial_frame()
1399 struct macb *bp = queue->bp; in gem_rx()
1411 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1417 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; in gem_rx()
1423 /* Ensure ctrl is at least as up-to-date as rxused */ in gem_rx()
1426 ctrl = desc->ctrl; in gem_rx()
1428 queue->rx_tail++; in gem_rx()
1432 netdev_err(bp->dev, in gem_rx()
1434 bp->dev->stats.rx_dropped++; in gem_rx()
1435 queue->stats.rx_dropped++; in gem_rx()
1438 skb = queue->rx_skbuff[entry]; in gem_rx()
1440 netdev_err(bp->dev, in gem_rx()
1441 "inconsistent Rx descriptor chain\n"); in gem_rx()
1442 bp->dev->stats.rx_dropped++; in gem_rx()
1443 queue->stats.rx_dropped++; in gem_rx()
1447 queue->rx_skbuff[entry] = NULL; in gem_rx()
1448 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1450 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1453 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1454 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1456 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1458 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1459 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1461 skb->ip_summed = CHECKSUM_UNNECESSARY; in gem_rx()
1463 bp->dev->stats.rx_packets++; in gem_rx()
1464 queue->stats.rx_packets++; in gem_rx()
1465 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1466 queue->stats.rx_bytes += skb->len; in gem_rx()
1471 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1472 skb->len, skb->csum); in gem_rx()
1476 skb->data, 32, true); in gem_rx()
1495 struct macb *bp = queue->bp; in macb_rx_frame()
1498 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1500 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1506 * payload word-aligned. in macb_rx_frame()
1512 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1514 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1517 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1534 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1539 return -1; in macb_rx_frame()
1541 frag_len = len - offset; in macb_rx_frame()
1546 offset += bp->rx_buffer_size; in macb_rx_frame()
1548 desc->addr &= ~MACB_BIT(RX_USED); in macb_rx_frame()
1558 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1560 bp->dev->stats.rx_packets++; in macb_rx_frame()
1561 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1562 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1563 skb->len, skb->csum); in macb_rx_frame()
1571 struct macb *bp = queue->bp; in macb_init_rx_ring()
1576 addr = queue->rx_buffers_dma; in macb_init_rx_ring()
1577 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1580 desc->ctrl = 0; in macb_init_rx_ring()
1581 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1583 desc->addr |= MACB_BIT(RX_WRAP); in macb_init_rx_ring()
1584 queue->rx_tail = 0; in macb_init_rx_ring()
1590 struct macb *bp = queue->bp; in macb_rx()
1594 int first_frag = -1; in macb_rx()
1596 for (tail = queue->rx_tail; budget > 0; tail++) { in macb_rx()
1603 if (!(desc->addr & MACB_BIT(RX_USED))) in macb_rx()
1606 /* Ensure ctrl is at least as up-to-date as addr */ in macb_rx()
1609 ctrl = desc->ctrl; in macb_rx()
1612 if (first_frag != -1) in macb_rx()
1620 if (unlikely(first_frag == -1)) { in macb_rx()
1626 first_frag = -1; in macb_rx()
1633 budget--; in macb_rx()
1642 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1644 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1650 queue_writel(queue, RBQP, queue->rx_ring_dma); in macb_rx()
1654 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1658 if (first_frag != -1) in macb_rx()
1659 queue->rx_tail = first_frag; in macb_rx()
1661 queue->rx_tail = tail; in macb_rx()
1668 struct macb *bp = queue->bp; in macb_rx_pending()
1672 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in macb_rx_pending()
1678 return (desc->addr & MACB_BIT(RX_USED)) != 0; in macb_rx_pending()
1684 struct macb *bp = queue->bp; in macb_rx_poll()
1687 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_rx_poll()
1689 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n", in macb_rx_poll()
1690 (unsigned int)(queue - bp->queues), work_done, budget); in macb_rx_poll()
1693 queue_writel(queue, IER, bp->rx_intr_mask); in macb_rx_poll()
1699 * interrupts are re-enabled. in macb_rx_poll()
1706 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_rx_poll()
1707 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_rx_poll()
1709 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n"); in macb_rx_poll()
1721 struct macb *bp = queue->bp; in macb_tx_restart()
1724 spin_lock(&queue->tx_ptr_lock); in macb_tx_restart()
1726 if (queue->tx_head == queue->tx_tail) in macb_tx_restart()
1731 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); in macb_tx_restart()
1736 spin_lock_irq(&bp->lock); in macb_tx_restart()
1738 spin_unlock_irq(&bp->lock); in macb_tx_restart()
1741 spin_unlock(&queue->tx_ptr_lock); in macb_tx_restart()
1748 spin_lock(&queue->tx_ptr_lock); in macb_tx_complete_pending()
1749 if (queue->tx_head != queue->tx_tail) { in macb_tx_complete_pending()
1753 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) in macb_tx_complete_pending()
1756 spin_unlock(&queue->tx_ptr_lock); in macb_tx_complete_pending()
1763 struct macb *bp = queue->bp; in macb_tx_poll()
1769 if (queue->txubr_pending) { in macb_tx_poll()
1770 queue->txubr_pending = false; in macb_tx_poll()
1771 netdev_vdbg(bp->dev, "poll: tx restart\n"); in macb_tx_poll()
1775 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n", in macb_tx_poll()
1776 (unsigned int)(queue - bp->queues), work_done, budget); in macb_tx_poll()
1785 * interrupts are re-enabled. in macb_tx_poll()
1793 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_poll()
1795 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n"); in macb_tx_poll()
1806 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1811 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1812 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1823 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1825 /* Initialize TX and RX buffers */ in macb_hresp_error_task()
1829 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1831 bp->rx_intr_mask | in macb_hresp_error_task()
1845 struct macb *bp = queue->bp; in macb_wol_interrupt()
1853 spin_lock(&bp->lock); in macb_wol_interrupt()
1858 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1859 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1861 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1863 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1866 spin_unlock(&bp->lock); in macb_wol_interrupt()
1874 struct macb *bp = queue->bp; in gem_wol_interrupt()
1882 spin_lock(&bp->lock); in gem_wol_interrupt()
1887 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1888 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1890 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1892 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1895 spin_unlock(&bp->lock); in gem_wol_interrupt()
1903 struct macb *bp = queue->bp; in macb_interrupt()
1904 struct net_device *dev = bp->dev; in macb_interrupt()
1912 spin_lock(&bp->lock); in macb_interrupt()
1917 queue_writel(queue, IDR, -1); in macb_interrupt()
1918 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1919 queue_writel(queue, ISR, -1); in macb_interrupt()
1923 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1924 (unsigned int)(queue - bp->queues), in macb_interrupt()
1927 if (status & bp->rx_intr_mask) { in macb_interrupt()
1934 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1935 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1938 if (napi_schedule_prep(&queue->napi_rx)) { in macb_interrupt()
1939 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1940 __napi_schedule(&queue->napi_rx); in macb_interrupt()
1947 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1952 queue->txubr_pending = true; in macb_interrupt()
1956 if (napi_schedule_prep(&queue->napi_tx)) { in macb_interrupt()
1957 netdev_vdbg(bp->dev, "scheduling TX softirq\n"); in macb_interrupt()
1958 __napi_schedule(&queue->napi_tx); in macb_interrupt()
1964 schedule_work(&queue->tx_error_task); in macb_interrupt()
1966 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1973 * add that if/when we get our hands on a full-blown MII PHY. in macb_interrupt()
1978 * interrupts but it can be cleared by re-enabling RX. See in macb_interrupt()
1989 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1996 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1998 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
2000 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
2005 queue_work(system_bh_wq, &bp->hresp_err_bh_work); in macb_interrupt()
2008 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
2014 spin_unlock(&bp->lock); in macb_interrupt()
2020 /* Polling receive - used by netconsole and other diagnostic tools
2031 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
2032 macb_interrupt(dev->irq, queue); in macb_poll_controller()
2043 unsigned int len, entry, i, tx_head = queue->tx_head; in macb_tx_map()
2047 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; in macb_tx_map()
2052 if (skb_shinfo(skb)->gso_size != 0) { in macb_tx_map()
2053 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_tx_map()
2054 /* UDP - UFO */ in macb_tx_map()
2057 /* TCP - TSO */ in macb_tx_map()
2061 /* First, map non-paged data */ in macb_tx_map()
2070 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2072 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
2073 skb->data + offset, in macb_tx_map()
2075 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2079 tx_skb->skb = NULL; in macb_tx_map()
2080 tx_skb->mapping = mapping; in macb_tx_map()
2081 tx_skb->size = size; in macb_tx_map()
2082 tx_skb->mapped_as_page = false; in macb_tx_map()
2084 len -= size; in macb_tx_map()
2089 size = min(len, bp->max_tx_length); in macb_tx_map()
2094 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_tx_map()
2099 size = min(len, bp->max_tx_length); in macb_tx_map()
2101 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2103 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
2105 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2109 tx_skb->skb = NULL; in macb_tx_map()
2110 tx_skb->mapping = mapping; in macb_tx_map()
2111 tx_skb->size = size; in macb_tx_map()
2112 tx_skb->mapped_as_page = true; in macb_tx_map()
2114 len -= size; in macb_tx_map()
2123 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
2128 tx_skb->skb = skb; in macb_tx_map()
2141 desc->ctrl = ctrl; in macb_tx_map()
2146 mss_mfs = skb_shinfo(skb)->gso_size + in macb_tx_map()
2150 mss_mfs = skb_shinfo(skb)->gso_size; in macb_tx_map()
2159 i--; in macb_tx_map()
2161 tx_skb = &queue->tx_skb[entry]; in macb_tx_map()
2164 ctrl = (u32)tx_skb->size; in macb_tx_map()
2169 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
2173 if (i == queue->tx_head) { in macb_tx_map()
2176 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2177 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && in macb_tx_map()
2187 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2188 /* desc->addr must be visible to hardware before clearing in macb_tx_map()
2189 * 'TX_USED' bit in desc->ctrl. in macb_tx_map()
2192 desc->ctrl = ctrl; in macb_tx_map()
2193 } while (i != queue->tx_head); in macb_tx_map()
2195 queue->tx_head = tx_head; in macb_tx_map()
2200 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2202 for (i = queue->tx_head; i != tx_head; i++) { in macb_tx_map()
2221 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) in macb_features_check()
2231 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) in macb_features_check()
2234 nr_frags = skb_shinfo(skb)->nr_frags; in macb_features_check()
2236 nr_frags--; in macb_features_check()
2238 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in macb_features_check()
2249 if (skb->ip_summed != CHECKSUM_PARTIAL) in macb_clear_csum()
2254 return -1; in macb_clear_csum()
2257 * This is required - at least for Zynq, which otherwise calculates in macb_clear_csum()
2260 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; in macb_clear_csum()
2268 int padlen = ETH_ZLEN - (*skb)->len; in macb_pad_and_fcs()
2273 if (!(ndev->features & NETIF_F_HW_CSUM) || in macb_pad_and_fcs()
2274 !((*skb)->ip_summed != CHECKSUM_PARTIAL) || in macb_pad_and_fcs()
2275 skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb)) in macb_pad_and_fcs()
2293 return -ENOMEM; in macb_pad_and_fcs()
2300 skb_put_zero(*skb, padlen - ETH_FCS_LEN); in macb_pad_and_fcs()
2304 fcs = crc32_le(~0, (*skb)->data, (*skb)->len); in macb_pad_and_fcs()
2319 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2336 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in macb_start_xmit()
2337 (bp->hw_dma_cap & HW_DMA_CAP_PTP)) in macb_start_xmit()
2338 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in macb_start_xmit()
2341 is_lso = (skb_shinfo(skb)->gso_size != 0); in macb_start_xmit()
2345 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in macb_start_xmit()
2351 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2356 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2359 netdev_vdbg(bp->dev, in macb_start_xmit()
2361 queue_index, skb->len, skb->head, skb->data, in macb_start_xmit()
2364 skb->data, 16, true); in macb_start_xmit()
2373 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2375 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2376 nr_frags = skb_shinfo(skb)->nr_frags; in macb_start_xmit()
2378 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); in macb_start_xmit()
2379 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2382 spin_lock_bh(&queue->tx_ptr_lock); in macb_start_xmit()
2385 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, in macb_start_xmit()
2386 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2388 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2389 queue->tx_head, queue->tx_tail); in macb_start_xmit()
2404 spin_lock_irq(&bp->lock); in macb_start_xmit()
2406 spin_unlock_irq(&bp->lock); in macb_start_xmit()
2408 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2412 spin_unlock_bh(&queue->tx_ptr_lock); in macb_start_xmit()
2420 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2422 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2424 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2425 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2426 "RX buffer must be multiple of %d bytes, expanding\n", in macb_init_rx_buffer_size()
2428 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2429 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2433 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2434 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2446 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2447 if (!queue->rx_skbuff) in gem_free_rx_buffers()
2450 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2451 skb = queue->rx_skbuff[i]; in gem_free_rx_buffers()
2459 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2465 kfree(queue->rx_skbuff); in gem_free_rx_buffers()
2466 queue->rx_skbuff = NULL; in gem_free_rx_buffers()
2472 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2474 if (queue->rx_buffers) { in macb_free_rx_buffers()
2475 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2476 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2477 queue->rx_buffers, queue->rx_buffers_dma); in macb_free_rx_buffers()
2478 queue->rx_buffers = NULL; in macb_free_rx_buffers()
2488 if (bp->rx_ring_tieoff) { in macb_free_consistent()
2489 dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp), in macb_free_consistent()
2490 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); in macb_free_consistent()
2491 bp->rx_ring_tieoff = NULL; in macb_free_consistent()
2494 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2496 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2497 kfree(queue->tx_skb); in macb_free_consistent()
2498 queue->tx_skb = NULL; in macb_free_consistent()
2499 if (queue->tx_ring) { in macb_free_consistent()
2500 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2501 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2502 queue->tx_ring, queue->tx_ring_dma); in macb_free_consistent()
2503 queue->tx_ring = NULL; in macb_free_consistent()
2505 if (queue->rx_ring) { in macb_free_consistent()
2506 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2507 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2508 queue->rx_ring, queue->rx_ring_dma); in macb_free_consistent()
2509 queue->rx_ring = NULL; in macb_free_consistent()
2520 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2521 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2522 queue->rx_skbuff = kzalloc(size, GFP_KERNEL); in gem_alloc_rx_buffers()
2523 if (!queue->rx_skbuff) in gem_alloc_rx_buffers()
2524 return -ENOMEM; in gem_alloc_rx_buffers()
2526 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2527 "Allocated %d RX struct sk_buff entries at %p\n", in gem_alloc_rx_buffers()
2528 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2535 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2538 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2539 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2540 &queue->rx_buffers_dma, GFP_KERNEL); in macb_alloc_rx_buffers()
2541 if (!queue->rx_buffers) in macb_alloc_rx_buffers()
2542 return -ENOMEM; in macb_alloc_rx_buffers()
2544 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2545 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", in macb_alloc_rx_buffers()
2546 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); in macb_alloc_rx_buffers()
2556 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2557 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2558 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2559 &queue->tx_ring_dma, in macb_alloc_consistent()
2561 if (!queue->tx_ring) in macb_alloc_consistent()
2563 netdev_dbg(bp->dev, in macb_alloc_consistent()
2565 q, size, (unsigned long)queue->tx_ring_dma, in macb_alloc_consistent()
2566 queue->tx_ring); in macb_alloc_consistent()
2568 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2569 queue->tx_skb = kmalloc(size, GFP_KERNEL); in macb_alloc_consistent()
2570 if (!queue->tx_skb) in macb_alloc_consistent()
2573 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2574 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2575 &queue->rx_ring_dma, GFP_KERNEL); in macb_alloc_consistent()
2576 if (!queue->rx_ring) in macb_alloc_consistent()
2578 netdev_dbg(bp->dev, in macb_alloc_consistent()
2579 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", in macb_alloc_consistent()
2580 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); in macb_alloc_consistent()
2582 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2586 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) { in macb_alloc_consistent()
2587 bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev, in macb_alloc_consistent()
2589 &bp->rx_ring_tieoff_dma, in macb_alloc_consistent()
2591 if (!bp->rx_ring_tieoff) in macb_alloc_consistent()
2599 return -ENOMEM; in macb_alloc_consistent()
2604 struct macb_dma_desc *desc = bp->rx_ring_tieoff; in macb_init_tieoff()
2606 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) in macb_init_tieoff()
2609 * (WRAP and USED) to tie off/disable unused RX queues. in macb_init_tieoff()
2612 desc->ctrl = 0; in macb_init_tieoff()
2622 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2623 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2626 desc->ctrl = MACB_BIT(TX_USED); in gem_init_rings()
2628 desc->ctrl |= MACB_BIT(TX_WRAP); in gem_init_rings()
2629 queue->tx_head = 0; in gem_init_rings()
2630 queue->tx_tail = 0; in gem_init_rings()
2632 queue->rx_tail = 0; in gem_init_rings()
2633 queue->rx_prepared_head = 0; in gem_init_rings()
2646 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2648 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2649 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2651 desc->ctrl = MACB_BIT(TX_USED); in macb_init_rings()
2653 bp->queues[0].tx_head = 0; in macb_init_rings()
2654 bp->queues[0].tx_tail = 0; in macb_init_rings()
2655 desc->ctrl |= MACB_BIT(TX_WRAP); in macb_init_rings()
2666 /* Disable RX and TX (XXX: Should we halt the transmission in macb_reset_hw()
2677 macb_writel(bp, TSR, -1); in macb_reset_hw()
2678 macb_writel(bp, RSR, -1); in macb_reset_hw()
2680 /* Disable RX partial store and forward and reset watermark value */ in macb_reset_hw()
2684 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2685 queue_writel(queue, IDR, -1); in macb_reset_hw()
2687 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2688 queue_writel(queue, ISR, -1); in macb_reset_hw()
2694 u32 config; in gem_mdc_clk_div() local
2695 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2698 config = GEM_BF(CLK, GEM_CLK_DIV8); in gem_mdc_clk_div()
2700 config = GEM_BF(CLK, GEM_CLK_DIV16); in gem_mdc_clk_div()
2702 config = GEM_BF(CLK, GEM_CLK_DIV32); in gem_mdc_clk_div()
2704 config = GEM_BF(CLK, GEM_CLK_DIV48); in gem_mdc_clk_div()
2706 config = GEM_BF(CLK, GEM_CLK_DIV64); in gem_mdc_clk_div()
2708 config = GEM_BF(CLK, GEM_CLK_DIV96); in gem_mdc_clk_div()
2710 config = GEM_BF(CLK, GEM_CLK_DIV128); in gem_mdc_clk_div()
2712 config = GEM_BF(CLK, GEM_CLK_DIV224); in gem_mdc_clk_div()
2714 return config; in gem_mdc_clk_div()
2719 u32 config; in macb_mdc_clk_div() local
2725 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2727 config = MACB_BF(CLK, MACB_CLK_DIV8); in macb_mdc_clk_div()
2729 config = MACB_BF(CLK, MACB_CLK_DIV16); in macb_mdc_clk_div()
2731 config = MACB_BF(CLK, MACB_CLK_DIV32); in macb_mdc_clk_div()
2733 config = MACB_BF(CLK, MACB_CLK_DIV64); in macb_mdc_clk_div()
2735 return config; in macb_mdc_clk_div()
2759 * - use the correct receive buffer size
2760 * - set best burst length for DMA operations
2762 * - set both rx/tx packet buffers to full memory size
2772 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2774 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2775 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2781 if (bp->dma_burst_length) in macb_configure_dma()
2782 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2783 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); in macb_configure_dma()
2786 if (bp->native_io) in macb_configure_dma()
2791 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2798 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2802 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2805 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2813 u32 config; in macb_init_hw() local
2818 config = macb_mdc_clk_div(bp); in macb_init_hw()
2819 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ in macb_init_hw()
2820 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ in macb_init_hw()
2821 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2822 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ in macb_init_hw()
2824 config |= MACB_BIT(BIG); /* Receive oversized frames */ in macb_init_hw()
2825 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2826 config |= MACB_BIT(CAF); /* Copy All Frames */ in macb_init_hw()
2827 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2828 config |= GEM_BIT(RXCOEN); in macb_init_hw()
2829 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2830 config |= MACB_BIT(NBC); /* No BroadCast */ in macb_init_hw()
2831 config |= macb_dbw(bp); in macb_init_hw()
2832 macb_writel(bp, NCFGR, config); in macb_init_hw()
2833 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2834 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2835 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2836 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2837 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2841 /* Enable RX partial store and forward and set watermark */ in macb_init_hw()
2842 if (bp->rx_watermark) in macb_init_hw()
2843 gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU))); in macb_init_hw()
2902 /* Add multicast addresses to the internal multicast-hash table. */
2914 bitnr = hash_get_index(ha->addr); in macb_sethashtable()
2930 if (dev->flags & IFF_PROMISC) { in macb_set_rx_mode()
2934 /* Disable RX checksum offload */ in macb_set_rx_mode()
2941 /* Enable RX checksum offload only if requested */ in macb_set_rx_mode()
2942 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2946 if (dev->flags & IFF_ALLMULTI) { in macb_set_rx_mode()
2948 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2949 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2955 } else if (dev->flags & (~IFF_ALLMULTI)) { in macb_set_rx_mode()
2967 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; in macb_open()
2973 netdev_dbg(bp->dev, "open\n"); in macb_open()
2975 err = pm_runtime_resume_and_get(&bp->pdev->dev); in macb_open()
2979 /* RX buffers initialization */ in macb_open()
2989 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
2990 napi_enable(&queue->napi_rx); in macb_open()
2991 napi_enable(&queue->napi_tx); in macb_open()
2996 err = phy_power_on(bp->sgmii_phy); in macb_open()
3006 if (bp->ptp_info) in macb_open()
3007 bp->ptp_info->ptp_init(dev); in macb_open()
3012 phy_power_off(bp->sgmii_phy); in macb_open()
3016 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_open()
3017 napi_disable(&queue->napi_rx); in macb_open()
3018 napi_disable(&queue->napi_tx); in macb_open()
3022 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
3035 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_close()
3036 napi_disable(&queue->napi_rx); in macb_close()
3037 napi_disable(&queue->napi_tx); in macb_close()
3040 phylink_stop(bp->phylink); in macb_close()
3041 phylink_disconnect_phy(bp->phylink); in macb_close()
3043 phy_power_off(bp->sgmii_phy); in macb_close()
3045 spin_lock_irqsave(&bp->lock, flags); in macb_close()
3048 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
3052 if (bp->ptp_info) in macb_close()
3053 bp->ptp_info->ptp_remove(dev); in macb_close()
3055 pm_runtime_put(&bp->pdev->dev); in macb_close()
3063 return -EBUSY; in macb_change_mtu()
3065 WRITE_ONCE(dev->mtu, new_mtu); in macb_change_mtu()
3088 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
3092 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
3094 bp->ethtool_stats[i] += val; in gem_update_stats()
3099 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
3100 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
3106 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
3107 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) in gem_update_stats()
3108 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
3113 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
3114 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
3116 if (!netif_running(bp->dev)) in gem_get_stats()
3121 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + in gem_get_stats()
3122 hwstat->rx_alignment_errors + in gem_get_stats()
3123 hwstat->rx_resource_errors + in gem_get_stats()
3124 hwstat->rx_overruns + in gem_get_stats()
3125 hwstat->rx_oversize_frames + in gem_get_stats()
3126 hwstat->rx_jabbers + in gem_get_stats()
3127 hwstat->rx_undersized_frames + in gem_get_stats()
3128 hwstat->rx_length_field_frame_errors); in gem_get_stats()
3129 nstat->tx_errors = (hwstat->tx_late_collisions + in gem_get_stats()
3130 hwstat->tx_excessive_collisions + in gem_get_stats()
3131 hwstat->tx_underrun + in gem_get_stats()
3132 hwstat->tx_carrier_sense_errors); in gem_get_stats()
3133 nstat->multicast = hwstat->rx_multicast_frames; in gem_get_stats()
3134 nstat->collisions = (hwstat->tx_single_collision_frames + in gem_get_stats()
3135 hwstat->tx_multiple_collision_frames + in gem_get_stats()
3136 hwstat->tx_excessive_collisions); in gem_get_stats()
3137 nstat->rx_length_errors = (hwstat->rx_oversize_frames + in gem_get_stats()
3138 hwstat->rx_jabbers + in gem_get_stats()
3139 hwstat->rx_undersized_frames + in gem_get_stats()
3140 hwstat->rx_length_field_frame_errors); in gem_get_stats()
3141 nstat->rx_over_errors = hwstat->rx_resource_errors; in gem_get_stats()
3142 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; in gem_get_stats()
3143 nstat->rx_frame_errors = hwstat->rx_alignment_errors; in gem_get_stats()
3144 nstat->rx_fifo_errors = hwstat->rx_overruns; in gem_get_stats()
3145 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; in gem_get_stats()
3146 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; in gem_get_stats()
3147 nstat->tx_fifo_errors = hwstat->tx_underrun; in gem_get_stats()
3159 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
3169 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
3171 return -EOPNOTSUPP; in gem_get_sset_count()
3189 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
3203 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
3204 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
3213 nstat->rx_errors = (hwstat->rx_fcs_errors + in macb_get_stats()
3214 hwstat->rx_align_errors + in macb_get_stats()
3215 hwstat->rx_resource_errors + in macb_get_stats()
3216 hwstat->rx_overruns + in macb_get_stats()
3217 hwstat->rx_oversize_pkts + in macb_get_stats()
3218 hwstat->rx_jabbers + in macb_get_stats()
3219 hwstat->rx_undersize_pkts + in macb_get_stats()
3220 hwstat->rx_length_mismatch); in macb_get_stats()
3221 nstat->tx_errors = (hwstat->tx_late_cols + in macb_get_stats()
3222 hwstat->tx_excessive_cols + in macb_get_stats()
3223 hwstat->tx_underruns + in macb_get_stats()
3224 hwstat->tx_carrier_errors + in macb_get_stats()
3225 hwstat->sqe_test_errors); in macb_get_stats()
3226 nstat->collisions = (hwstat->tx_single_cols + in macb_get_stats()
3227 hwstat->tx_multiple_cols + in macb_get_stats()
3228 hwstat->tx_excessive_cols); in macb_get_stats()
3229 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + in macb_get_stats()
3230 hwstat->rx_jabbers + in macb_get_stats()
3231 hwstat->rx_undersize_pkts + in macb_get_stats()
3232 hwstat->rx_length_mismatch); in macb_get_stats()
3233 nstat->rx_over_errors = hwstat->rx_resource_errors + in macb_get_stats()
3234 hwstat->rx_overruns; in macb_get_stats()
3235 nstat->rx_crc_errors = hwstat->rx_fcs_errors; in macb_get_stats()
3236 nstat->rx_frame_errors = hwstat->rx_align_errors; in macb_get_stats()
3237 nstat->rx_fifo_errors = hwstat->rx_overruns; in macb_get_stats()
3239 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; in macb_get_stats()
3240 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; in macb_get_stats()
3241 nstat->tx_fifo_errors = hwstat->tx_underruns; in macb_get_stats()
3259 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3262 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3263 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3276 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3277 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3279 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3289 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3290 wol->supported |= (WAKE_MAGIC | WAKE_ARP); in macb_get_wol()
3293 wol->wolopts |= bp->wolopts; in macb_get_wol()
3302 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3304 if (ret && ret != -EOPNOTSUPP) in macb_set_wol()
3307 bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0; in macb_set_wol()
3308 bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0; in macb_set_wol()
3309 bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0; in macb_set_wol()
3311 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); in macb_set_wol()
3321 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3329 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3339 ring->rx_max_pending = MAX_RX_RING_SIZE; in macb_get_ringparam()
3340 ring->tx_max_pending = MAX_TX_RING_SIZE; in macb_get_ringparam()
3342 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3343 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3355 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in macb_set_ringparam()
3356 return -EINVAL; in macb_set_ringparam()
3358 new_rx_size = clamp_t(u32, ring->rx_pending, in macb_set_ringparam()
3362 new_tx_size = clamp_t(u32, ring->tx_pending, in macb_set_ringparam()
3366 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3367 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3372 if (netif_running(bp->dev)) { in macb_set_ringparam()
3374 macb_close(bp->dev); in macb_set_ringparam()
3377 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3378 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3381 macb_open(bp->dev); in macb_set_ringparam()
3392 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3396 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3397 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3400 return -ENOTSUPP; in gem_get_tsu_rate()
3414 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3419 info->so_timestamping = in gem_get_ts_info()
3424 info->tx_types = in gem_get_ts_info()
3428 info->rx_filters = in gem_get_ts_info()
3432 if (bp->ptp_clock) in gem_get_ts_info()
3433 info->phc_index = ptp_clock_index(bp->ptp_clock); in gem_get_ts_info()
3454 if (bp->ptp_info) in macb_get_ts_info()
3455 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3462 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3467 if (!(netdev->features & NETIF_F_NTUPLE)) in gem_enable_flow_filters()
3472 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3473 struct ethtool_rx_flow_spec *fs = &item->fs; in gem_enable_flow_filters()
3476 if (fs->location >= num_t2_scr) in gem_enable_flow_filters()
3479 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3485 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_enable_flow_filters()
3487 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) in gem_enable_flow_filters()
3492 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) in gem_enable_flow_filters()
3497 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) in gem_enable_flow_filters()
3502 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3509 uint16_t index = fs->location; in gem_prog_cmp_regs()
3518 tp4sp_v = &(fs->h_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3519 tp4sp_m = &(fs->m_u.tcp_ip4_spec); in gem_prog_cmp_regs()
3522 if (tp4sp_m->ip4src == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3523 /* 1st compare reg - IP source address */ in gem_prog_cmp_regs()
3526 w0 = tp4sp_v->ip4src; in gem_prog_cmp_regs()
3527 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3536 if (tp4sp_m->ip4dst == 0xFFFFFFFF) { in gem_prog_cmp_regs()
3537 /* 2nd compare reg - IP destination address */ in gem_prog_cmp_regs()
3540 w0 = tp4sp_v->ip4dst; in gem_prog_cmp_regs()
3541 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3550 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { in gem_prog_cmp_regs()
3551 /* 3rd compare reg - source port, destination port */ in gem_prog_cmp_regs()
3555 if (tp4sp_m->psrc == tp4sp_m->pdst) { in gem_prog_cmp_regs()
3556 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3557 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3558 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ in gem_prog_cmp_regs()
3562 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ in gem_prog_cmp_regs()
3564 if (tp4sp_m->psrc == 0xFFFF) { /* src port */ in gem_prog_cmp_regs()
3565 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); in gem_prog_cmp_regs()
3568 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); in gem_prog_cmp_regs()
3578 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); in gem_prog_cmp_regs()
3593 struct ethtool_rx_flow_spec *fs = &cmd->fs; in gem_add_flow_filter()
3596 int ret = -EINVAL; in gem_add_flow_filter()
3601 return -ENOMEM; in gem_add_flow_filter()
3602 memcpy(&newfs->fs, fs, sizeof(newfs->fs)); in gem_add_flow_filter()
3606 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_add_flow_filter()
3607 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_add_flow_filter()
3608 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_add_flow_filter()
3609 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), in gem_add_flow_filter()
3610 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); in gem_add_flow_filter()
3612 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3615 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3616 if (item->fs.location > newfs->fs.location) { in gem_add_flow_filter()
3617 list_add_tail(&newfs->list, &item->list); in gem_add_flow_filter()
3620 } else if (item->fs.location == fs->location) { in gem_add_flow_filter()
3622 fs->location); in gem_add_flow_filter()
3623 ret = -EBUSY; in gem_add_flow_filter()
3628 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3631 bp->rx_fs_list.count++; in gem_add_flow_filter()
3635 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3639 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3652 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3654 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3655 if (item->fs.location == cmd->fs.location) { in gem_del_flow_filter()
3657 fs = &(item->fs); in gem_del_flow_filter()
3660 fs->flow_type, (int)fs->ring_cookie, fs->location, in gem_del_flow_filter()
3661 htonl(fs->h_u.tcp_ip4_spec.ip4src), in gem_del_flow_filter()
3662 htonl(fs->h_u.tcp_ip4_spec.ip4dst), in gem_del_flow_filter()
3663 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc), in gem_del_flow_filter()
3664 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst)); in gem_del_flow_filter()
3666 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3668 list_del(&item->list); in gem_del_flow_filter()
3669 bp->rx_fs_list.count--; in gem_del_flow_filter()
3670 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3676 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3677 return -EINVAL; in gem_del_flow_filter()
3686 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3687 if (item->fs.location == cmd->fs.location) { in gem_get_flow_entry()
3688 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); in gem_get_flow_entry()
3692 return -EINVAL; in gem_get_flow_entry()
3702 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3703 if (cnt == cmd->rule_cnt) in gem_get_all_flow_entries()
3704 return -EMSGSIZE; in gem_get_all_flow_entries()
3705 rule_locs[cnt] = item->fs.location; in gem_get_all_flow_entries()
3708 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3709 cmd->rule_cnt = cnt; in gem_get_all_flow_entries()
3720 switch (cmd->cmd) { in gem_get_rxnfc()
3722 cmd->data = bp->num_queues; in gem_get_rxnfc()
3725 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3735 "Command parameter %d is not supported\n", cmd->cmd); in gem_get_rxnfc()
3736 ret = -EOPNOTSUPP; in gem_get_rxnfc()
3747 switch (cmd->cmd) { in gem_set_rxnfc()
3749 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3750 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3751 ret = -EINVAL; in gem_set_rxnfc()
3761 "Command parameter %d is not supported\n", cmd->cmd); in gem_set_rxnfc()
3762 ret = -EOPNOTSUPP; in gem_set_rxnfc()
3804 return -EINVAL; in macb_ioctl()
3806 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3815 return -EINVAL; in macb_hwtstamp_get()
3817 if (!bp->ptp_info) in macb_hwtstamp_get()
3818 return -EOPNOTSUPP; in macb_hwtstamp_get()
3820 return bp->ptp_info->get_hwtst(dev, cfg); in macb_hwtstamp_get()
3830 return -EINVAL; in macb_hwtstamp_set()
3832 if (!bp->ptp_info) in macb_hwtstamp_set()
3833 return -EOPNOTSUPP; in macb_hwtstamp_set()
3835 return bp->ptp_info->set_hwtst(dev, cfg, extack); in macb_hwtstamp_set()
3858 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3865 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) in macb_set_rxcsum_feature()
3886 netdev_features_t changed = features ^ netdev->features; in macb_set_features()
3892 /* RX checksum offload */ in macb_set_features()
3896 /* RX Flow Filters */ in macb_set_features()
3905 struct net_device *netdev = bp->dev; in macb_restore_features()
3906 netdev_features_t features = netdev->features; in macb_restore_features()
3912 /* RX checksum offload */ in macb_restore_features()
3915 /* RX Flow Filters */ in macb_restore_features()
3916 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3917 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3950 bp->caps = dt_conf->caps; in macb_configure_caps()
3952 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3953 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3957 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3959 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
3962 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
3965 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3968 dev_err(&bp->pdev->dev, in macb_configure_caps()
3972 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3973 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3979 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
4025 pdata = dev_get_platdata(&pdev->dev); in macb_clk_init()
4027 *pclk = pdata->pclk; in macb_clk_init()
4028 *hclk = pdata->hclk; in macb_clk_init()
4030 *pclk = devm_clk_get(&pdev->dev, "pclk"); in macb_clk_init()
4031 *hclk = devm_clk_get(&pdev->dev, "hclk"); in macb_clk_init()
4035 return dev_err_probe(&pdev->dev, in macb_clk_init()
4036 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, in macb_clk_init()
4040 return dev_err_probe(&pdev->dev, in macb_clk_init()
4041 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, in macb_clk_init()
4044 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); in macb_clk_init()
4048 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); in macb_clk_init()
4052 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); in macb_clk_init()
4058 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in macb_clk_init()
4064 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); in macb_clk_init()
4070 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); in macb_clk_init()
4076 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); in macb_clk_init()
4082 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); in macb_clk_init()
4112 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
4113 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
4120 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
4123 queue = &bp->queues[q]; in macb_init()
4124 queue->bp = bp; in macb_init()
4125 spin_lock_init(&queue->tx_ptr_lock); in macb_init()
4126 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll); in macb_init()
4127 netif_napi_add(dev, &queue->napi_tx, macb_tx_poll); in macb_init()
4129 queue->ISR = GEM_ISR(hw_q - 1); in macb_init()
4130 queue->IER = GEM_IER(hw_q - 1); in macb_init()
4131 queue->IDR = GEM_IDR(hw_q - 1); in macb_init()
4132 queue->IMR = GEM_IMR(hw_q - 1); in macb_init()
4133 queue->TBQP = GEM_TBQP(hw_q - 1); in macb_init()
4134 queue->RBQP = GEM_RBQP(hw_q - 1); in macb_init()
4135 queue->RBQS = GEM_RBQS(hw_q - 1); in macb_init()
4137 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
4138 queue->TBQPH = GEM_TBQPH(hw_q - 1); in macb_init()
4139 queue->RBQPH = GEM_RBQPH(hw_q - 1); in macb_init()
4144 queue->ISR = MACB_ISR; in macb_init()
4145 queue->IER = MACB_IER; in macb_init()
4146 queue->IDR = MACB_IDR; in macb_init()
4147 queue->IMR = MACB_IMR; in macb_init()
4148 queue->TBQP = MACB_TBQP; in macb_init()
4149 queue->RBQP = MACB_RBQP; in macb_init()
4151 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
4152 queue->TBQPH = MACB_TBQPH; in macb_init()
4153 queue->RBQPH = MACB_RBQPH; in macb_init()
4163 queue->irq = platform_get_irq(pdev, q); in macb_init()
4164 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, in macb_init()
4165 IRQF_SHARED, dev->name, queue); in macb_init()
4167 dev_err(&pdev->dev, in macb_init()
4169 queue->irq, err); in macb_init()
4173 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); in macb_init()
4177 dev->netdev_ops = &macb_netdev_ops; in macb_init()
4181 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
4182 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
4183 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
4184 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
4185 dev->ethtool_ops = &gem_ethtool_ops; in macb_init()
4187 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
4188 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
4189 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
4190 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
4191 dev->ethtool_ops = &macb_ethtool_ops; in macb_init()
4196 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in macb_init()
4199 dev->hw_features = NETIF_F_SG; in macb_init()
4203 dev->hw_features |= MACB_NETIF_LSO; in macb_init()
4206 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
4207 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; in macb_init()
4208 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
4209 dev->hw_features &= ~NETIF_F_SG; in macb_init()
4210 dev->features = dev->hw_features; in macb_init()
4212 /* Check RX Flow Filters support. in macb_init()
4213 * Max Rx flows set by availability of screeners & compare regs: in macb_init()
4214 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs in macb_init()
4217 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
4219 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
4220 if (bp->max_tuples > 0) { in macb_init()
4228 dev->hw_features |= NETIF_F_NTUPLE; in macb_init()
4229 /* init Rx flow definitions */ in macb_init()
4230 bp->rx_fs_list.count = 0; in macb_init()
4231 spin_lock_init(&bp->rx_fs_lock); in macb_init()
4233 bp->max_tuples = 0; in macb_init()
4236 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
4238 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
4239 val = bp->usrio->rgmii; in macb_init()
4240 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
4241 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4242 val = bp->usrio->rmii; in macb_init()
4243 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4244 val = bp->usrio->mii; in macb_init()
4246 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
4247 val |= bp->usrio->refclk; in macb_init()
4255 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4279 struct macb_queue *q = &lp->queues[0]; in at91ether_alloc_coherent()
4281 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4284 &q->rx_ring_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4285 if (!q->rx_ring) in at91ether_alloc_coherent()
4286 return -ENOMEM; in at91ether_alloc_coherent()
4288 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4291 &q->rx_buffers_dma, GFP_KERNEL); in at91ether_alloc_coherent()
4292 if (!q->rx_buffers) { in at91ether_alloc_coherent()
4293 dma_free_coherent(&lp->pdev->dev, in at91ether_alloc_coherent()
4296 q->rx_ring, q->rx_ring_dma); in at91ether_alloc_coherent()
4297 q->rx_ring = NULL; in at91ether_alloc_coherent()
4298 return -ENOMEM; in at91ether_alloc_coherent()
4306 struct macb_queue *q = &lp->queues[0]; in at91ether_free_coherent()
4308 if (q->rx_ring) { in at91ether_free_coherent()
4309 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4312 q->rx_ring, q->rx_ring_dma); in at91ether_free_coherent()
4313 q->rx_ring = NULL; in at91ether_free_coherent()
4316 if (q->rx_buffers) { in at91ether_free_coherent()
4317 dma_free_coherent(&lp->pdev->dev, in at91ether_free_coherent()
4320 q->rx_buffers, q->rx_buffers_dma); in at91ether_free_coherent()
4321 q->rx_buffers = NULL; in at91ether_free_coherent()
4328 struct macb_queue *q = &lp->queues[0]; in at91ether_start()
4338 addr = q->rx_buffers_dma; in at91ether_start()
4342 desc->ctrl = 0; in at91ether_start()
4347 desc->addr |= MACB_BIT(RX_WRAP); in at91ether_start()
4350 q->rx_tail = 0; in at91ether_start()
4352 /* Program address of descriptor list in Rx Buffer Queue register */ in at91ether_start()
4353 macb_writel(lp, RBQP, q->rx_ring_dma); in at91ether_start()
4399 ret = pm_runtime_resume_and_get(&lp->pdev->dev); in at91ether_open()
4424 pm_runtime_put_sync(&lp->pdev->dev); in at91ether_open()
4435 phylink_stop(lp->phylink); in at91ether_close()
4436 phylink_disconnect_phy(lp->phylink); in at91ether_close()
4440 return pm_runtime_put(&lp->pdev->dev); in at91ether_close()
4455 lp->rm9200_txq[desc].skb = skb; in at91ether_start_xmit()
4456 lp->rm9200_txq[desc].size = skb->len; in at91ether_start_xmit()
4457 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, in at91ether_start_xmit()
4458 skb->len, DMA_TO_DEVICE); in at91ether_start_xmit()
4459 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { in at91ether_start_xmit()
4461 dev->stats.tx_dropped++; in at91ether_start_xmit()
4467 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); in at91ether_start_xmit()
4469 macb_writel(lp, TCR, skb->len); in at91ether_start_xmit()
4485 struct macb_queue *q = &lp->queues[0]; in at91ether_rx()
4491 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4492 while (desc->addr & MACB_BIT(RX_USED)) { in at91ether_rx()
4493 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; in at91ether_rx()
4494 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); in at91ether_rx()
4500 skb->protocol = eth_type_trans(skb, dev); in at91ether_rx()
4501 dev->stats.rx_packets++; in at91ether_rx()
4502 dev->stats.rx_bytes += pktlen; in at91ether_rx()
4505 dev->stats.rx_dropped++; in at91ether_rx()
4508 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) in at91ether_rx()
4509 dev->stats.multicast++; in at91ether_rx()
4512 desc->addr &= ~MACB_BIT(RX_USED); in at91ether_rx()
4515 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) in at91ether_rx()
4516 q->rx_tail = 0; in at91ether_rx()
4518 q->rx_tail++; in at91ether_rx()
4520 desc = macb_rx_desc(q, q->rx_tail); in at91ether_rx()
4545 dev->stats.tx_errors++; in at91ether_interrupt()
4548 if (lp->rm9200_txq[desc].skb) { in at91ether_interrupt()
4549 dev_consume_skb_irq(lp->rm9200_txq[desc].skb); in at91ether_interrupt()
4550 lp->rm9200_txq[desc].skb = NULL; in at91ether_interrupt()
4551 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, in at91ether_interrupt()
4552 lp->rm9200_txq[desc].size, DMA_TO_DEVICE); in at91ether_interrupt()
4553 dev->stats.tx_packets++; in at91ether_interrupt()
4554 dev->stats.tx_bytes += lp->rm9200_txq[desc].size; in at91ether_interrupt()
4559 /* Work-around for EMAC Errata section 41.3.1 */ in at91ether_interrupt()
4579 at91ether_interrupt(dev->irq, dev); in at91ether_poll_controller()
4611 *pclk = devm_clk_get(&pdev->dev, "ether_clk"); in at91ether_clk_init()
4617 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); in at91ether_clk_init()
4630 bp->queues[0].bp = bp; in at91ether_init()
4632 dev->netdev_ops = &at91ether_netdev_ops; in at91ether_init()
4633 dev->ethtool_ops = &macb_ethtool_ops; in at91ether_init()
4635 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, in at91ether_init()
4636 0, dev->name, dev); in at91ether_init()
4650 return mgmt->rate; in fu540_macb_tx_recalc_rate()
4683 iowrite32(1, mgmt->reg); in fu540_macb_tx_set_rate()
4685 iowrite32(0, mgmt->reg); in fu540_macb_tx_set_rate()
4686 mgmt->rate = rate; in fu540_macb_tx_set_rate()
4708 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); in fu540_c000_clk_init()
4710 err = -ENOMEM; in fu540_c000_clk_init()
4714 init.name = "sifive-gemgxl-mgmt"; in fu540_c000_clk_init()
4719 mgmt->rate = 0; in fu540_c000_clk_init()
4720 mgmt->hw.init = &init; in fu540_c000_clk_init()
4722 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); in fu540_c000_clk_init()
4730 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); in fu540_c000_clk_init()
4734 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); in fu540_c000_clk_init()
4747 mgmt->reg = devm_platform_ioremap_resource(pdev, 1); in fu540_c000_init()
4748 if (IS_ERR(mgmt->reg)) in fu540_c000_init()
4749 return PTR_ERR(mgmt->reg); in fu540_c000_init()
4760 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in init_reset_optional()
4762 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); in init_reset_optional()
4764 if (IS_ERR(bp->sgmii_phy)) in init_reset_optional()
4765 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy), in init_reset_optional()
4768 ret = phy_init(bp->sgmii_phy); in init_reset_optional()
4770 return dev_err_probe(&pdev->dev, ret, in init_reset_optional()
4777 ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains", in init_reset_optional()
4780 dev_err(&pdev->dev, "Failed to read power management information\n"); in init_reset_optional()
4795 ret = device_reset_optional(&pdev->dev); in init_reset_optional()
4797 phy_exit(bp->sgmii_phy); in init_reset_optional()
4798 return dev_err_probe(&pdev->dev, ret, "failed to reset controller"); in init_reset_optional()
4805 phy_exit(bp->sgmii_phy); in init_reset_optional()
4962 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4964 { .compatible = "cdns,np4-macb", .data = &np4_config },
4965 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4967 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4968 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4969 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
4970 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4971 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4972 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4973 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4975 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
4976 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
4977 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4978 { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
4979 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
4980 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
4981 { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
4982 { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
4983 { .compatible = "xlnx,versal-gem", .data = &versal_config},
5005 struct clk **) = macb_config->clk_init; in macb_probe()
5006 int (*init)(struct platform_device *) = macb_config->init; in macb_probe()
5007 struct device_node *np = pdev->dev.of_node; in macb_probe()
5028 if (match && match->data) { in macb_probe()
5029 macb_config = match->data; in macb_probe()
5030 clk_init = macb_config->clk_init; in macb_probe()
5031 init = macb_config->init; in macb_probe()
5039 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); in macb_probe()
5040 pm_runtime_use_autosuspend(&pdev->dev); in macb_probe()
5041 pm_runtime_get_noresume(&pdev->dev); in macb_probe()
5042 pm_runtime_set_active(&pdev->dev); in macb_probe()
5043 pm_runtime_enable(&pdev->dev); in macb_probe()
5049 err = -ENOMEM; in macb_probe()
5053 dev->base_addr = regs->start; in macb_probe()
5055 SET_NETDEV_DEV(dev, &pdev->dev); in macb_probe()
5058 bp->pdev = pdev; in macb_probe()
5059 bp->dev = dev; in macb_probe()
5060 bp->regs = mem; in macb_probe()
5061 bp->native_io = native_io; in macb_probe()
5063 bp->macb_reg_readl = hw_readl_native; in macb_probe()
5064 bp->macb_reg_writel = hw_writel_native; in macb_probe()
5066 bp->macb_reg_readl = hw_readl; in macb_probe()
5067 bp->macb_reg_writel = hw_writel; in macb_probe()
5069 bp->num_queues = num_queues; in macb_probe()
5070 bp->queue_mask = queue_mask; in macb_probe()
5072 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
5073 bp->pclk = pclk; in macb_probe()
5074 bp->hclk = hclk; in macb_probe()
5075 bp->tx_clk = tx_clk; in macb_probe()
5076 bp->rx_clk = rx_clk; in macb_probe()
5077 bp->tsu_clk = tsu_clk; in macb_probe()
5079 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
5081 if (!hw_is_gem(bp->regs, bp->native_io)) in macb_probe()
5082 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_probe()
5083 else if (macb_config->max_tx_length) in macb_probe()
5084 bp->max_tx_length = macb_config->max_tx_length; in macb_probe()
5086 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_probe()
5088 bp->wol = 0; in macb_probe()
5089 device_set_wakeup_capable(&pdev->dev, 1); in macb_probe()
5091 bp->usrio = macb_config->usrio; in macb_probe()
5097 err = of_property_read_u32(bp->pdev->dev.of_node, in macb_probe()
5098 "cdns,rx-watermark", in macb_probe()
5099 &bp->rx_watermark); in macb_probe()
5105 wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1; in macb_probe()
5106 if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) { in macb_probe()
5107 dev_info(&bp->pdev->dev, "Invalid watermark value\n"); in macb_probe()
5108 bp->rx_watermark = 0; in macb_probe()
5112 spin_lock_init(&bp->lock); in macb_probe()
5119 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); in macb_probe()
5120 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
5125 dev->irq = platform_get_irq(pdev, 0); in macb_probe()
5126 if (dev->irq < 0) { in macb_probe()
5127 err = dev->irq; in macb_probe()
5131 /* MTU range: 68 - 1518 or 10240 */ in macb_probe()
5132 dev->min_mtu = GEM_MTU_MIN_SIZE; in macb_probe()
5133 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_probe()
5134 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
5136 dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
5138 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
5141 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
5146 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
5150 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
5151 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
5152 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
5154 err = of_get_ethdev_address(np, bp->dev); in macb_probe()
5155 if (err == -EPROBE_DEFER) in macb_probe()
5163 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
5165 bp->phy_interface = interface; in macb_probe()
5180 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); in macb_probe()
5184 INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task); in macb_probe()
5188 dev->base_addr, dev->irq, dev->dev_addr); in macb_probe()
5190 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
5191 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
5196 mdiobus_unregister(bp->mii_bus); in macb_probe()
5197 mdiobus_free(bp->mii_bus); in macb_probe()
5200 phy_exit(bp->sgmii_phy); in macb_probe()
5207 pm_runtime_disable(&pdev->dev); in macb_probe()
5208 pm_runtime_set_suspended(&pdev->dev); in macb_probe()
5209 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_probe()
5223 phy_exit(bp->sgmii_phy); in macb_remove()
5224 mdiobus_unregister(bp->mii_bus); in macb_remove()
5225 mdiobus_free(bp->mii_bus); in macb_remove()
5228 cancel_work_sync(&bp->hresp_err_bh_work); in macb_remove()
5229 pm_runtime_disable(&pdev->dev); in macb_remove()
5230 pm_runtime_dont_use_autosuspend(&pdev->dev); in macb_remove()
5231 if (!pm_runtime_suspended(&pdev->dev)) { in macb_remove()
5232 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, in macb_remove()
5233 bp->rx_clk, bp->tsu_clk); in macb_remove()
5234 pm_runtime_set_suspended(&pdev->dev); in macb_remove()
5236 phylink_destroy(bp->phylink); in macb_remove()
5253 if (!device_may_wakeup(&bp->dev->dev)) in macb_suspend()
5254 phy_exit(bp->sgmii_phy); in macb_suspend()
5259 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
5261 idev = __in_dev_get_rcu(bp->dev); in macb_suspend()
5263 ifa = rcu_dereference(idev->ifa_list); in macb_suspend()
5264 if ((bp->wolopts & WAKE_ARP) && !ifa) { in macb_suspend()
5266 return -EOPNOTSUPP; in macb_suspend()
5268 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5270 /* Disable Tx and Rx engines before disabling the queues, in macb_suspend()
5275 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5277 /* Disable RX queues */ in macb_suspend()
5278 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) { in macb_suspend()
5281 /* Tie off RX queues */ in macb_suspend()
5283 lower_32_bits(bp->rx_ring_tieoff_dma)); in macb_suspend()
5286 upper_32_bits(bp->rx_ring_tieoff_dma)); in macb_suspend()
5290 queue_writel(queue, IDR, -1); in macb_suspend()
5292 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
5293 queue_writel(queue, ISR, -1); in macb_suspend()
5298 macb_writel(bp, TSR, -1); in macb_suspend()
5299 macb_writel(bp, RSR, -1); in macb_suspend()
5301 tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0; in macb_suspend()
5302 if (bp->wolopts & WAKE_ARP) { in macb_suspend()
5305 tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local)); in macb_suspend()
5311 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
5313 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
5314 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5318 bp->queues[0].irq, err); in macb_suspend()
5319 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5322 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
5325 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
5326 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
5330 bp->queues[0].irq, err); in macb_suspend()
5331 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5334 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
5337 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5339 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
5343 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
5345 napi_disable(&queue->napi_rx); in macb_suspend()
5346 napi_disable(&queue->napi_tx); in macb_suspend()
5349 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
5351 phylink_stop(bp->phylink); in macb_suspend()
5353 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
5355 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
5358 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
5359 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
5361 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_suspend()
5362 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
5364 if (bp->ptp_info) in macb_suspend()
5365 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
5381 if (!device_may_wakeup(&bp->dev->dev)) in macb_resume()
5382 phy_init(bp->sgmii_phy); in macb_resume()
5390 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
5391 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
5394 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
5397 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
5401 queue_readl(bp->queues, ISR); in macb_resume()
5402 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
5403 queue_writel(bp->queues, ISR, -1); in macb_resume()
5405 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
5406 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
5407 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
5411 bp->queues[0].irq, err); in macb_resume()
5412 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5415 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5417 disable_irq_wake(bp->queues[0].irq); in macb_resume()
5423 phylink_stop(bp->phylink); in macb_resume()
5427 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5429 napi_enable(&queue->napi_rx); in macb_resume()
5430 napi_enable(&queue->napi_tx); in macb_resume()
5433 if (netdev->hw_features & NETIF_F_NTUPLE) in macb_resume()
5434 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5436 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5437 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5445 phylink_start(bp->phylink); in macb_resume()
5449 if (bp->ptp_info) in macb_resume()
5450 bp->ptp_info->ptp_init(netdev); in macb_resume()
5461 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5462 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) in macb_runtime_suspend()
5463 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5474 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5475 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5476 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5477 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5478 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()
5479 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) { in macb_runtime_resume()
5480 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()