Lines Matching +full:enable +full:- +full:lpa

1 // SPDX-License-Identifier: GPL-2.0-or-later
17 #include <linux/dma-mapping.h>
38 return bcm_readl(priv->base + off); in enet_readl()
44 bcm_writel(val, priv->base + off); in enet_writel()
52 return bcm_readl(priv->base + off); in enetsw_readl()
58 bcm_writel(val, priv->base + off); in enetsw_writel()
63 return bcm_readw(priv->base + off); in enetsw_readw()
69 bcm_writew(val, priv->base + off); in enetsw_writew()
74 return bcm_readb(priv->base + off); in enetsw_readb()
80 bcm_writeb(val, priv->base + off); in enetsw_writeb()
99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); in enet_dmac_readl()
106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); in enet_dmac_writel()
111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); in enet_dmas_readl()
117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); in enet_dmas_writel()
140 } while (limit-- > 0); in do_mdio_op()
159 return -1; in bcm_enet_mdio_read()
190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum); in bcm_enet_mdio_read_phylib()
199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); in bcm_enet_mdio_write_phylib()
229 while (priv->rx_desc_count < priv->rx_ring_size) { in bcm_enet_refill_rx()
234 desc_idx = priv->rx_dirty_desc; in bcm_enet_refill_rx()
235 desc = &priv->rx_desc_cpu[desc_idx]; in bcm_enet_refill_rx()
237 if (!priv->rx_buf[desc_idx]) { in bcm_enet_refill_rx()
241 buf = napi_alloc_frag(priv->rx_frag_size); in bcm_enet_refill_rx()
243 buf = netdev_alloc_frag(priv->rx_frag_size); in bcm_enet_refill_rx()
246 priv->rx_buf[desc_idx] = buf; in bcm_enet_refill_rx()
247 desc->address = dma_map_single(&priv->pdev->dev, in bcm_enet_refill_rx()
248 buf + priv->rx_buf_offset, in bcm_enet_refill_rx()
249 priv->rx_buf_size, in bcm_enet_refill_rx()
253 len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT; in bcm_enet_refill_rx()
255 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { in bcm_enet_refill_rx()
256 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); in bcm_enet_refill_rx()
257 priv->rx_dirty_desc = 0; in bcm_enet_refill_rx()
259 priv->rx_dirty_desc++; in bcm_enet_refill_rx()
262 desc->len_stat = len_stat; in bcm_enet_refill_rx()
264 priv->rx_desc_count++; in bcm_enet_refill_rx()
267 if (priv->dma_has_sram) in bcm_enet_refill_rx()
268 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); in bcm_enet_refill_rx()
270 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); in bcm_enet_refill_rx()
275 if (priv->rx_desc_count == 0 && netif_running(dev)) { in bcm_enet_refill_rx()
276 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); in bcm_enet_refill_rx()
277 priv->rx_timeout.expires = jiffies + HZ; in bcm_enet_refill_rx()
278 add_timer(&priv->rx_timeout); in bcm_enet_refill_rx()
290 struct net_device *dev = priv->net_dev; in bcm_enet_refill_rx_timer()
292 spin_lock(&priv->rx_lock); in bcm_enet_refill_rx_timer()
294 spin_unlock(&priv->rx_lock); in bcm_enet_refill_rx_timer()
309 kdev = &priv->pdev->dev; in bcm_enet_receive_queue()
314 if (budget > priv->rx_desc_count) in bcm_enet_receive_queue()
315 budget = priv->rx_desc_count; in bcm_enet_receive_queue()
325 desc_idx = priv->rx_curr_desc; in bcm_enet_receive_queue()
326 desc = &priv->rx_desc_cpu[desc_idx]; in bcm_enet_receive_queue()
332 len_stat = desc->len_stat; in bcm_enet_receive_queue()
339 priv->rx_curr_desc++; in bcm_enet_receive_queue()
340 if (priv->rx_curr_desc == priv->rx_ring_size) in bcm_enet_receive_queue()
341 priv->rx_curr_desc = 0; in bcm_enet_receive_queue()
345 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != in bcm_enet_receive_queue()
346 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { in bcm_enet_receive_queue()
347 dev->stats.rx_dropped++; in bcm_enet_receive_queue()
352 if (!priv->enet_is_sw && in bcm_enet_receive_queue()
354 dev->stats.rx_errors++; in bcm_enet_receive_queue()
357 dev->stats.rx_length_errors++; in bcm_enet_receive_queue()
359 dev->stats.rx_crc_errors++; in bcm_enet_receive_queue()
361 dev->stats.rx_frame_errors++; in bcm_enet_receive_queue()
363 dev->stats.rx_fifo_errors++; in bcm_enet_receive_queue()
368 buf = priv->rx_buf[desc_idx]; in bcm_enet_receive_queue()
371 len -= 4; in bcm_enet_receive_queue()
374 skb = napi_alloc_skb(&priv->napi, len); in bcm_enet_receive_queue()
377 dev->stats.rx_dropped++; in bcm_enet_receive_queue()
381 dma_sync_single_for_cpu(kdev, desc->address, in bcm_enet_receive_queue()
383 memcpy(skb->data, buf + priv->rx_buf_offset, len); in bcm_enet_receive_queue()
384 dma_sync_single_for_device(kdev, desc->address, in bcm_enet_receive_queue()
387 dma_unmap_single(kdev, desc->address, in bcm_enet_receive_queue()
388 priv->rx_buf_size, DMA_FROM_DEVICE); in bcm_enet_receive_queue()
389 priv->rx_buf[desc_idx] = NULL; in bcm_enet_receive_queue()
391 skb = napi_build_skb(buf, priv->rx_frag_size); in bcm_enet_receive_queue()
394 dev->stats.rx_dropped++; in bcm_enet_receive_queue()
397 skb_reserve(skb, priv->rx_buf_offset); in bcm_enet_receive_queue()
401 skb->protocol = eth_type_trans(skb, dev); in bcm_enet_receive_queue()
402 dev->stats.rx_packets++; in bcm_enet_receive_queue()
403 dev->stats.rx_bytes += len; in bcm_enet_receive_queue()
404 list_add_tail(&skb->list, &rx_list); in bcm_enet_receive_queue()
409 priv->rx_desc_count -= processed; in bcm_enet_receive_queue()
411 if (processed || !priv->rx_desc_count) { in bcm_enet_receive_queue()
415 enet_dmac_writel(priv, priv->dma_chan_en_mask, in bcm_enet_receive_queue()
416 ENETDMAC_CHANCFG, priv->rx_chan); in bcm_enet_receive_queue()
436 while (priv->tx_desc_count < priv->tx_ring_size) { in bcm_enet_tx_reclaim()
442 spin_lock(&priv->tx_lock); in bcm_enet_tx_reclaim()
444 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; in bcm_enet_tx_reclaim()
446 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { in bcm_enet_tx_reclaim()
447 spin_unlock(&priv->tx_lock); in bcm_enet_tx_reclaim()
455 skb = priv->tx_skb[priv->tx_dirty_desc]; in bcm_enet_tx_reclaim()
456 priv->tx_skb[priv->tx_dirty_desc] = NULL; in bcm_enet_tx_reclaim()
457 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, in bcm_enet_tx_reclaim()
460 priv->tx_dirty_desc++; in bcm_enet_tx_reclaim()
461 if (priv->tx_dirty_desc == priv->tx_ring_size) in bcm_enet_tx_reclaim()
462 priv->tx_dirty_desc = 0; in bcm_enet_tx_reclaim()
463 priv->tx_desc_count++; in bcm_enet_tx_reclaim()
465 spin_unlock(&priv->tx_lock); in bcm_enet_tx_reclaim()
467 if (desc->len_stat & DMADESC_UNDER_MASK) in bcm_enet_tx_reclaim()
468 dev->stats.tx_errors++; in bcm_enet_tx_reclaim()
470 bytes += skb->len; in bcm_enet_tx_reclaim()
493 dev = priv->net_dev; in bcm_enet_poll()
496 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_poll()
497 ENETDMAC_IR, priv->rx_chan); in bcm_enet_poll()
498 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_poll()
499 ENETDMAC_IR, priv->tx_chan); in bcm_enet_poll()
504 spin_lock(&priv->rx_lock); in bcm_enet_poll()
506 spin_unlock(&priv->rx_lock); in bcm_enet_poll()
518 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_poll()
519 ENETDMAC_IRMASK, priv->rx_chan); in bcm_enet_poll()
520 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_poll()
521 ENETDMAC_IRMASK, priv->tx_chan); in bcm_enet_poll()
547 schedule_work(&priv->mib_update_task); in bcm_enet_isr_mac()
564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); in bcm_enet_isr_dma()
565 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); in bcm_enet_isr_dma()
567 napi_schedule(&priv->napi); in bcm_enet_isr_dma()
586 spin_lock(&priv->tx_lock); in bcm_enet_start_xmit()
590 if (unlikely(!priv->tx_desc_count)) { in bcm_enet_start_xmit()
592 dev_err(&priv->pdev->dev, "xmit called with no tx desc " in bcm_enet_start_xmit()
599 if (priv->enet_is_sw && skb->len < 64) { in bcm_enet_start_xmit()
600 int needed = 64 - skb->len; in bcm_enet_start_xmit()
618 desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; in bcm_enet_start_xmit()
619 priv->tx_skb[priv->tx_curr_desc] = skb; in bcm_enet_start_xmit()
622 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, in bcm_enet_start_xmit()
625 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; in bcm_enet_start_xmit()
626 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | in bcm_enet_start_xmit()
630 priv->tx_curr_desc++; in bcm_enet_start_xmit()
631 if (priv->tx_curr_desc == priv->tx_ring_size) { in bcm_enet_start_xmit()
632 priv->tx_curr_desc = 0; in bcm_enet_start_xmit()
633 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); in bcm_enet_start_xmit()
635 priv->tx_desc_count--; in bcm_enet_start_xmit()
640 desc->len_stat = len_stat; in bcm_enet_start_xmit()
643 netdev_sent_queue(dev, skb->len); in bcm_enet_start_xmit()
646 if (!netdev_xmit_more() || !priv->tx_desc_count) in bcm_enet_start_xmit()
647 enet_dmac_writel(priv, priv->dma_chan_en_mask, in bcm_enet_start_xmit()
648 ENETDMAC_CHANCFG, priv->tx_chan); in bcm_enet_start_xmit()
651 if (!priv->tx_desc_count) in bcm_enet_start_xmit()
654 dev->stats.tx_bytes += skb->len; in bcm_enet_start_xmit()
655 dev->stats.tx_packets++; in bcm_enet_start_xmit()
659 spin_unlock(&priv->tx_lock); in bcm_enet_start_xmit()
673 eth_hw_addr_set(dev, addr->sa_data); in bcm_enet_set_mac_address()
676 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | in bcm_enet_set_mac_address()
677 (dev->dev_addr[4] << 8) | dev->dev_addr[5]; in bcm_enet_set_mac_address()
680 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); in bcm_enet_set_mac_address()
701 if (dev->flags & IFF_PROMISC) in bcm_enet_set_multicast_list()
708 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) in bcm_enet_set_multicast_list()
728 dmi_addr = ha->addr; in bcm_enet_set_multicast_list()
776 if (!priv->dma_has_sram) in bcm_enet_set_flow()
782 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); in bcm_enet_set_flow()
784 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); in bcm_enet_set_flow()
798 phydev = dev->phydev; in bcm_enet_adjust_phy_link()
801 if (priv->old_link != phydev->link) { in bcm_enet_adjust_phy_link()
803 priv->old_link = phydev->link; in bcm_enet_adjust_phy_link()
807 if (phydev->link && phydev->duplex != priv->old_duplex) { in bcm_enet_adjust_phy_link()
809 (phydev->duplex == DUPLEX_FULL) ? 1 : 0); in bcm_enet_adjust_phy_link()
811 priv->old_duplex = phydev->duplex; in bcm_enet_adjust_phy_link()
814 /* enable flow control if remote advertise it (trust phylib to in bcm_enet_adjust_phy_link()
816 if (phydev->link && phydev->pause != priv->old_pause) { in bcm_enet_adjust_phy_link()
819 if (phydev->pause) { in bcm_enet_adjust_phy_link()
820 /* pause was advertised by lpa and us */ in bcm_enet_adjust_phy_link()
823 } else if (!priv->pause_auto) { in bcm_enet_adjust_phy_link()
825 rx_pause_en = priv->pause_rx; in bcm_enet_adjust_phy_link()
826 tx_pause_en = priv->pause_tx; in bcm_enet_adjust_phy_link()
834 priv->old_pause = phydev->pause; in bcm_enet_adjust_phy_link()
838 pr_info("%s: link %s", dev->name, phydev->link ? in bcm_enet_adjust_phy_link()
840 if (phydev->link) in bcm_enet_adjust_phy_link()
841 pr_cont(" - %d/%s - flow control %s", phydev->speed, in bcm_enet_adjust_phy_link()
842 DUPLEX_FULL == phydev->duplex ? "full" : "half", in bcm_enet_adjust_phy_link()
843 phydev->pause == 1 ? "rx&tx" : "off"); in bcm_enet_adjust_phy_link()
857 bcm_enet_set_duplex(priv, priv->force_duplex_full); in bcm_enet_adjust_link()
858 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); in bcm_enet_adjust_link()
861 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", in bcm_enet_adjust_link()
862 dev->name, in bcm_enet_adjust_link()
863 priv->force_speed_100 ? 100 : 10, in bcm_enet_adjust_link()
864 priv->force_duplex_full ? "full" : "half", in bcm_enet_adjust_link()
865 priv->pause_rx ? "rx" : "off", in bcm_enet_adjust_link()
866 priv->pause_tx ? "tx" : "off"); in bcm_enet_adjust_link()
873 for (i = 0; i < priv->rx_ring_size; i++) { in bcm_enet_free_rx_buf_ring()
876 if (!priv->rx_buf[i]) in bcm_enet_free_rx_buf_ring()
879 desc = &priv->rx_desc_cpu[i]; in bcm_enet_free_rx_buf_ring()
880 dma_unmap_single(kdev, desc->address, priv->rx_buf_size, in bcm_enet_free_rx_buf_ring()
882 skb_free_frag(priv->rx_buf[i]); in bcm_enet_free_rx_buf_ring()
884 kfree(priv->rx_buf); in bcm_enet_free_rx_buf_ring()
903 kdev = &priv->pdev->dev; in bcm_enet_open()
905 if (priv->has_phy) { in bcm_enet_open()
908 priv->mii_bus->id, priv->phy_id); in bcm_enet_open()
921 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, in bcm_enet_open()
922 priv->pause_auto); in bcm_enet_open()
926 priv->old_link = 0; in bcm_enet_open()
927 priv->old_duplex = -1; in bcm_enet_open()
928 priv->old_pause = -1; in bcm_enet_open()
935 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); in bcm_enet_open()
936 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); in bcm_enet_open()
938 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); in bcm_enet_open()
942 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, in bcm_enet_open()
943 dev->name, dev); in bcm_enet_open()
947 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, in bcm_enet_open()
948 0, dev->name, dev); in bcm_enet_open()
959 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); in bcm_enet_open()
963 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); in bcm_enet_open()
964 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); in bcm_enet_open()
966 ret = -ENOMEM; in bcm_enet_open()
970 priv->rx_desc_alloc_size = size; in bcm_enet_open()
971 priv->rx_desc_cpu = p; in bcm_enet_open()
974 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); in bcm_enet_open()
975 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); in bcm_enet_open()
977 ret = -ENOMEM; in bcm_enet_open()
981 priv->tx_desc_alloc_size = size; in bcm_enet_open()
982 priv->tx_desc_cpu = p; in bcm_enet_open()
984 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), in bcm_enet_open()
986 if (!priv->tx_skb) { in bcm_enet_open()
987 ret = -ENOMEM; in bcm_enet_open()
991 priv->tx_desc_count = priv->tx_ring_size; in bcm_enet_open()
992 priv->tx_dirty_desc = 0; in bcm_enet_open()
993 priv->tx_curr_desc = 0; in bcm_enet_open()
994 spin_lock_init(&priv->tx_lock); in bcm_enet_open()
997 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), in bcm_enet_open()
999 if (!priv->rx_buf) { in bcm_enet_open()
1000 ret = -ENOMEM; in bcm_enet_open()
1004 priv->rx_desc_count = 0; in bcm_enet_open()
1005 priv->rx_dirty_desc = 0; in bcm_enet_open()
1006 priv->rx_curr_desc = 0; in bcm_enet_open()
1009 if (priv->dma_has_sram) in bcm_enet_open()
1011 ENETDMA_BUFALLOC_REG(priv->rx_chan)); in bcm_enet_open()
1014 ENETDMAC_BUFALLOC, priv->rx_chan); in bcm_enet_open()
1018 ret = -ENOMEM; in bcm_enet_open()
1023 if (priv->dma_has_sram) { in bcm_enet_open()
1024 enet_dmas_writel(priv, priv->rx_desc_dma, in bcm_enet_open()
1025 ENETDMAS_RSTART_REG, priv->rx_chan); in bcm_enet_open()
1026 enet_dmas_writel(priv, priv->tx_desc_dma, in bcm_enet_open()
1027 ENETDMAS_RSTART_REG, priv->tx_chan); in bcm_enet_open()
1029 enet_dmac_writel(priv, priv->rx_desc_dma, in bcm_enet_open()
1030 ENETDMAC_RSTART, priv->rx_chan); in bcm_enet_open()
1031 enet_dmac_writel(priv, priv->tx_desc_dma, in bcm_enet_open()
1032 ENETDMAC_RSTART, priv->tx_chan); in bcm_enet_open()
1036 if (priv->dma_has_sram) { in bcm_enet_open()
1037 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); in bcm_enet_open()
1038 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); in bcm_enet_open()
1039 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); in bcm_enet_open()
1040 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); in bcm_enet_open()
1041 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); in bcm_enet_open()
1042 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); in bcm_enet_open()
1044 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); in bcm_enet_open()
1045 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); in bcm_enet_open()
1049 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); in bcm_enet_open()
1050 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); in bcm_enet_open()
1053 enet_dmac_writel(priv, priv->dma_maxburst, in bcm_enet_open()
1054 ENETDMAC_MAXBURST, priv->rx_chan); in bcm_enet_open()
1055 enet_dmac_writel(priv, priv->dma_maxburst, in bcm_enet_open()
1056 ENETDMAC_MAXBURST, priv->tx_chan); in bcm_enet_open()
1062 if (priv->dma_has_sram) { in bcm_enet_open()
1063 val = priv->rx_ring_size / 3; in bcm_enet_open()
1064 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); in bcm_enet_open()
1065 val = (priv->rx_ring_size * 2) / 3; in bcm_enet_open()
1066 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); in bcm_enet_open()
1068 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); in bcm_enet_open()
1069 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); in bcm_enet_open()
1070 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); in bcm_enet_open()
1073 /* all set, enable mac and interrupts, start dma engine and in bcm_enet_open()
1079 if (priv->dma_has_sram) in bcm_enet_open()
1081 enet_dmac_writel(priv, priv->dma_chan_en_mask, in bcm_enet_open()
1082 ENETDMAC_CHANCFG, priv->rx_chan); in bcm_enet_open()
1089 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_open()
1090 ENETDMAC_IR, priv->rx_chan); in bcm_enet_open()
1091 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_open()
1092 ENETDMAC_IR, priv->tx_chan); in bcm_enet_open()
1094 /* make sure we enable napi before rx interrupt */ in bcm_enet_open()
1095 napi_enable(&priv->napi); in bcm_enet_open()
1097 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_open()
1098 ENETDMAC_IRMASK, priv->rx_chan); in bcm_enet_open()
1099 enet_dmac_writel(priv, priv->dma_chan_int_mask, in bcm_enet_open()
1100 ENETDMAC_IRMASK, priv->tx_chan); in bcm_enet_open()
1114 kfree(priv->tx_skb); in bcm_enet_open()
1117 dma_free_coherent(kdev, priv->tx_desc_alloc_size, in bcm_enet_open()
1118 priv->tx_desc_cpu, priv->tx_desc_dma); in bcm_enet_open()
1121 dma_free_coherent(kdev, priv->rx_desc_alloc_size, in bcm_enet_open()
1122 priv->rx_desc_cpu, priv->rx_desc_dma); in bcm_enet_open()
1125 free_irq(priv->irq_tx, dev); in bcm_enet_open()
1128 free_irq(priv->irq_rx, dev); in bcm_enet_open()
1131 free_irq(dev->irq, dev); in bcm_enet_open()
1160 } while (limit--); in bcm_enet_disable_mac()
1180 } while (limit--); in bcm_enet_disable_dma()
1192 kdev = &priv->pdev->dev; in bcm_enet_stop()
1195 napi_disable(&priv->napi); in bcm_enet_stop()
1196 if (priv->has_phy) in bcm_enet_stop()
1197 phy_stop(dev->phydev); in bcm_enet_stop()
1198 del_timer_sync(&priv->rx_timeout); in bcm_enet_stop()
1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); in bcm_enet_stop()
1203 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); in bcm_enet_stop()
1206 cancel_work_sync(&priv->mib_update_task); in bcm_enet_stop()
1209 bcm_enet_disable_dma(priv, priv->tx_chan); in bcm_enet_stop()
1210 bcm_enet_disable_dma(priv, priv->rx_chan); in bcm_enet_stop()
1220 kfree(priv->tx_skb); in bcm_enet_stop()
1221 dma_free_coherent(kdev, priv->rx_desc_alloc_size, in bcm_enet_stop()
1222 priv->rx_desc_cpu, priv->rx_desc_dma); in bcm_enet_stop()
1223 dma_free_coherent(kdev, priv->tx_desc_alloc_size, in bcm_enet_stop()
1224 priv->tx_desc_cpu, priv->tx_desc_dma); in bcm_enet_stop()
1225 free_irq(priv->irq_tx, dev); in bcm_enet_stop()
1226 free_irq(priv->irq_rx, dev); in bcm_enet_stop()
1227 free_irq(dev->irq, dev); in bcm_enet_stop()
1230 if (priv->has_phy) in bcm_enet_stop()
1231 phy_disconnect(dev->phydev); in bcm_enet_stop()
1249 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1251 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1255 { "rx_packets", DEV_STAT(rx_packets), -1 },
1256 { "tx_packets", DEV_STAT(tx_packets), -1 },
1257 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259 { "rx_errors", DEV_STAT(rx_errors), -1 },
1260 { "tx_errors", DEV_STAT(tx_errors), -1 },
1261 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1262 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1324 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); in bcm_enet_get_drvinfo()
1325 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); in bcm_enet_get_drvinfo()
1335 return -EINVAL; in bcm_enet_get_sset_count()
1365 if (s->mib_reg == -1) in update_mib_counters()
1368 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); in update_mib_counters()
1369 p = (char *)priv + s->stat_offset; in update_mib_counters()
1371 if (s->sizeof_stat == sizeof(u64)) in update_mib_counters()
1388 mutex_lock(&priv->mib_update_lock); in bcm_enet_update_mib_counters_defer()
1390 mutex_unlock(&priv->mib_update_lock); in bcm_enet_update_mib_counters_defer()
1393 if (netif_running(priv->net_dev)) in bcm_enet_update_mib_counters_defer()
1406 mutex_lock(&priv->mib_update_lock); in bcm_enet_get_ethtool_stats()
1414 if (s->mib_reg == -1) in bcm_enet_get_ethtool_stats()
1415 p = (char *)&netdev->stats; in bcm_enet_get_ethtool_stats()
1418 p += s->stat_offset; in bcm_enet_get_ethtool_stats()
1419 data[i] = (s->sizeof_stat == sizeof(u64)) ? in bcm_enet_get_ethtool_stats()
1422 mutex_unlock(&priv->mib_update_lock); in bcm_enet_get_ethtool_stats()
1430 if (priv->has_phy) in bcm_enet_nway_reset()
1433 return -EOPNOTSUPP; in bcm_enet_nway_reset()
1444 if (priv->has_phy) { in bcm_enet_get_link_ksettings()
1445 if (!dev->phydev) in bcm_enet_get_link_ksettings()
1446 return -ENODEV; in bcm_enet_get_link_ksettings()
1448 phy_ethtool_ksettings_get(dev->phydev, cmd); in bcm_enet_get_link_ksettings()
1452 cmd->base.autoneg = 0; in bcm_enet_get_link_ksettings()
1453 cmd->base.speed = (priv->force_speed_100) ? in bcm_enet_get_link_ksettings()
1455 cmd->base.duplex = (priv->force_duplex_full) ? in bcm_enet_get_link_ksettings()
1463 cmd->link_modes.supported, supported); in bcm_enet_get_link_ksettings()
1465 cmd->link_modes.advertising, advertising); in bcm_enet_get_link_ksettings()
1466 cmd->base.port = PORT_MII; in bcm_enet_get_link_ksettings()
1477 if (priv->has_phy) { in bcm_enet_set_link_ksettings()
1478 if (!dev->phydev) in bcm_enet_set_link_ksettings()
1479 return -ENODEV; in bcm_enet_set_link_ksettings()
1480 return phy_ethtool_ksettings_set(dev->phydev, cmd); in bcm_enet_set_link_ksettings()
1483 if (cmd->base.autoneg || in bcm_enet_set_link_ksettings()
1484 (cmd->base.speed != SPEED_100 && in bcm_enet_set_link_ksettings()
1485 cmd->base.speed != SPEED_10) || in bcm_enet_set_link_ksettings()
1486 cmd->base.port != PORT_MII) in bcm_enet_set_link_ksettings()
1487 return -EINVAL; in bcm_enet_set_link_ksettings()
1489 priv->force_speed_100 = in bcm_enet_set_link_ksettings()
1490 (cmd->base.speed == SPEED_100) ? 1 : 0; in bcm_enet_set_link_ksettings()
1491 priv->force_duplex_full = in bcm_enet_set_link_ksettings()
1492 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; in bcm_enet_set_link_ksettings()
1511 ering->rx_max_pending = 8192; in bcm_enet_get_ringparam()
1512 ering->tx_max_pending = 8192; in bcm_enet_get_ringparam()
1513 ering->rx_pending = priv->rx_ring_size; in bcm_enet_get_ringparam()
1514 ering->tx_pending = priv->tx_ring_size; in bcm_enet_get_ringparam()
1533 priv->rx_ring_size = ering->rx_pending; in bcm_enet_set_ringparam()
1534 priv->tx_ring_size = ering->tx_pending; in bcm_enet_set_ringparam()
1554 ecmd->autoneg = priv->pause_auto; in bcm_enet_get_pauseparam()
1555 ecmd->rx_pause = priv->pause_rx; in bcm_enet_get_pauseparam()
1556 ecmd->tx_pause = priv->pause_tx; in bcm_enet_get_pauseparam()
1566 if (priv->has_phy) { in bcm_enet_set_pauseparam()
1567 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { in bcm_enet_set_pauseparam()
1571 return -EINVAL; in bcm_enet_set_pauseparam()
1575 if (ecmd->autoneg) in bcm_enet_set_pauseparam()
1576 return -EINVAL; in bcm_enet_set_pauseparam()
1579 priv->pause_auto = ecmd->autoneg; in bcm_enet_set_pauseparam()
1580 priv->pause_rx = ecmd->rx_pause; in bcm_enet_set_pauseparam()
1581 priv->pause_tx = ecmd->tx_pause; in bcm_enet_set_pauseparam()
1606 if (priv->has_phy) { in bcm_enet_ioctl()
1607 if (!dev->phydev) in bcm_enet_ioctl()
1608 return -ENODEV; in bcm_enet_ioctl()
1609 return phy_mii_ioctl(dev->phydev, rq, cmd); in bcm_enet_ioctl()
1632 return -EBUSY; in bcm_enet_change_mtu()
1643 priv->hw_mtu = actual_mtu; in bcm_enet_change_mtu()
1649 priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN, in bcm_enet_change_mtu()
1650 priv->dma_maxburst * 4); in bcm_enet_change_mtu()
1652 priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + in bcm_enet_change_mtu()
1655 WRITE_ONCE(dev->mtu, new_mtu); in bcm_enet_change_mtu()
1681 } while (limit--); in bcm_enet_hw_preinit()
1685 if (priv->use_external_mii) in bcm_enet_hw_preinit()
1695 /* set mib counters to self-clear when read */ in bcm_enet_hw_preinit()
1724 return -EPROBE_DEFER; in bcm_enet_probe()
1730 return -ENODEV; in bcm_enet_probe()
1734 return -ENOMEM; in bcm_enet_probe()
1737 priv->enet_is_sw = false; in bcm_enet_probe()
1738 priv->dma_maxburst = BCMENET_DMA_MAXBURST; in bcm_enet_probe()
1739 priv->rx_buf_offset = NET_SKB_PAD; in bcm_enet_probe()
1741 ret = bcm_enet_change_mtu(dev, dev->mtu); in bcm_enet_probe()
1745 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcm_enet_probe()
1746 if (IS_ERR(priv->base)) { in bcm_enet_probe()
1747 ret = PTR_ERR(priv->base); in bcm_enet_probe()
1751 dev->irq = priv->irq = irq; in bcm_enet_probe()
1752 priv->irq_rx = irq_rx; in bcm_enet_probe()
1753 priv->irq_tx = irq_tx; in bcm_enet_probe()
1755 priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); in bcm_enet_probe()
1756 if (IS_ERR(priv->mac_clk)) { in bcm_enet_probe()
1757 ret = PTR_ERR(priv->mac_clk); in bcm_enet_probe()
1760 ret = clk_prepare_enable(priv->mac_clk); in bcm_enet_probe()
1765 priv->rx_ring_size = BCMENET_DEF_RX_DESC; in bcm_enet_probe()
1766 priv->tx_ring_size = BCMENET_DEF_TX_DESC; in bcm_enet_probe()
1768 pd = dev_get_platdata(&pdev->dev); in bcm_enet_probe()
1770 eth_hw_addr_set(dev, pd->mac_addr); in bcm_enet_probe()
1771 priv->has_phy = pd->has_phy; in bcm_enet_probe()
1772 priv->phy_id = pd->phy_id; in bcm_enet_probe()
1773 priv->has_phy_interrupt = pd->has_phy_interrupt; in bcm_enet_probe()
1774 priv->phy_interrupt = pd->phy_interrupt; in bcm_enet_probe()
1775 priv->use_external_mii = !pd->use_internal_phy; in bcm_enet_probe()
1776 priv->pause_auto = pd->pause_auto; in bcm_enet_probe()
1777 priv->pause_rx = pd->pause_rx; in bcm_enet_probe()
1778 priv->pause_tx = pd->pause_tx; in bcm_enet_probe()
1779 priv->force_duplex_full = pd->force_duplex_full; in bcm_enet_probe()
1780 priv->force_speed_100 = pd->force_speed_100; in bcm_enet_probe()
1781 priv->dma_chan_en_mask = pd->dma_chan_en_mask; in bcm_enet_probe()
1782 priv->dma_chan_int_mask = pd->dma_chan_int_mask; in bcm_enet_probe()
1783 priv->dma_chan_width = pd->dma_chan_width; in bcm_enet_probe()
1784 priv->dma_has_sram = pd->dma_has_sram; in bcm_enet_probe()
1785 priv->dma_desc_shift = pd->dma_desc_shift; in bcm_enet_probe()
1786 priv->rx_chan = pd->rx_chan; in bcm_enet_probe()
1787 priv->tx_chan = pd->tx_chan; in bcm_enet_probe()
1790 if (priv->has_phy && !priv->use_external_mii) { in bcm_enet_probe()
1791 /* using internal PHY, enable clock */ in bcm_enet_probe()
1792 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); in bcm_enet_probe()
1793 if (IS_ERR(priv->phy_clk)) { in bcm_enet_probe()
1794 ret = PTR_ERR(priv->phy_clk); in bcm_enet_probe()
1795 priv->phy_clk = NULL; in bcm_enet_probe()
1798 ret = clk_prepare_enable(priv->phy_clk); in bcm_enet_probe()
1807 if (priv->has_phy) { in bcm_enet_probe()
1809 priv->mii_bus = mdiobus_alloc(); in bcm_enet_probe()
1810 if (!priv->mii_bus) { in bcm_enet_probe()
1811 ret = -ENOMEM; in bcm_enet_probe()
1815 bus = priv->mii_bus; in bcm_enet_probe()
1816 bus->name = "bcm63xx_enet MII bus"; in bcm_enet_probe()
1817 bus->parent = &pdev->dev; in bcm_enet_probe()
1818 bus->priv = priv; in bcm_enet_probe()
1819 bus->read = bcm_enet_mdio_read_phylib; in bcm_enet_probe()
1820 bus->write = bcm_enet_mdio_write_phylib; in bcm_enet_probe()
1821 sprintf(bus->id, "%s-%d", pdev->name, pdev->id); in bcm_enet_probe()
1826 bus->phy_mask = ~(1 << priv->phy_id); in bcm_enet_probe()
1828 if (priv->has_phy_interrupt) in bcm_enet_probe()
1829 bus->irq[priv->phy_id] = priv->phy_interrupt; in bcm_enet_probe()
1833 dev_err(&pdev->dev, "unable to register mdio bus\n"); in bcm_enet_probe()
1839 if (pd && pd->mii_config && in bcm_enet_probe()
1840 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, in bcm_enet_probe()
1842 dev_err(&pdev->dev, "unable to configure mdio bus\n"); in bcm_enet_probe()
1847 spin_lock_init(&priv->rx_lock); in bcm_enet_probe()
1850 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); in bcm_enet_probe()
1853 mutex_init(&priv->mib_update_lock); in bcm_enet_probe()
1854 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); in bcm_enet_probe()
1861 dev->netdev_ops = &bcm_enet_ops; in bcm_enet_probe()
1862 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); in bcm_enet_probe()
1864 dev->ethtool_ops = &bcm_enet_ethtool_ops; in bcm_enet_probe()
1865 /* MTU range: 46 - 2028 */ in bcm_enet_probe()
1866 dev->min_mtu = ETH_ZLEN - ETH_HLEN; in bcm_enet_probe()
1867 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; in bcm_enet_probe()
1868 SET_NETDEV_DEV(dev, &pdev->dev); in bcm_enet_probe()
1876 priv->pdev = pdev; in bcm_enet_probe()
1877 priv->net_dev = dev; in bcm_enet_probe()
1882 if (priv->mii_bus) in bcm_enet_probe()
1883 mdiobus_unregister(priv->mii_bus); in bcm_enet_probe()
1886 if (priv->mii_bus) in bcm_enet_probe()
1887 mdiobus_free(priv->mii_bus); in bcm_enet_probe()
1892 clk_disable_unprepare(priv->phy_clk); in bcm_enet_probe()
1895 clk_disable_unprepare(priv->mac_clk); in bcm_enet_probe()
1918 if (priv->has_phy) { in bcm_enet_remove()
1919 mdiobus_unregister(priv->mii_bus); in bcm_enet_remove()
1920 mdiobus_free(priv->mii_bus); in bcm_enet_remove()
1924 pd = dev_get_platdata(&pdev->dev); in bcm_enet_remove()
1925 if (pd && pd->mii_config) in bcm_enet_remove()
1926 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, in bcm_enet_remove()
1931 clk_disable_unprepare(priv->phy_clk); in bcm_enet_remove()
1932 clk_disable_unprepare(priv->mac_clk); in bcm_enet_remove()
1954 spin_lock_bh(&priv->enetsw_mdio_lock); in bcmenet_sw_mdio_read()
1967 spin_unlock_bh(&priv->enetsw_mdio_lock); in bcmenet_sw_mdio_read()
1977 spin_lock_bh(&priv->enetsw_mdio_lock); in bcmenet_sw_mdio_write()
1991 spin_unlock_bh(&priv->enetsw_mdio_lock); in bcmenet_sw_mdio_write()
2007 for (i = 0; i < priv->num_ports; i++) { in swphy_poll_timer()
2009 int val, j, up, advertise, lpa, speed, duplex, media; in swphy_poll_timer() local
2013 port = &priv->used_ports[i]; in swphy_poll_timer()
2014 if (!port->used) in swphy_poll_timer()
2017 if (port->bypass_link) in swphy_poll_timer()
2023 port->phy_id, MII_BMSR); in swphy_poll_timer()
2029 if (!(up ^ priv->sw_port_link[i])) in swphy_poll_timer()
2032 priv->sw_port_link[i] = up; in swphy_poll_timer()
2036 dev_info(&priv->pdev->dev, "link DOWN on %s\n", in swphy_poll_timer()
2037 port->name); in swphy_poll_timer()
2047 port->phy_id, MII_ADVERTISE); in swphy_poll_timer()
2049 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, in swphy_poll_timer()
2052 /* figure out media and duplex from advertise and LPA values */ in swphy_poll_timer()
2053 media = mii_nway_result(lpa & advertise); in swphy_poll_timer()
2063 port->phy_id, MII_CTRL1000); in swphy_poll_timer()
2065 lpa = bcmenet_sw_mdio_read(priv, external_phy, in swphy_poll_timer()
2066 port->phy_id, MII_STAT1000); in swphy_poll_timer()
2069 && lpa & (LPA_1000FULL | LPA_1000HALF)) { in swphy_poll_timer()
2071 duplex = (lpa & LPA_1000FULL); in swphy_poll_timer()
2075 dev_info(&priv->pdev->dev, in swphy_poll_timer()
2076 "link UP on %s, %dMbps, %s-duplex\n", in swphy_poll_timer()
2077 port->name, speed, duplex ? "full" : "half"); in swphy_poll_timer()
2093 priv->swphy_poll.expires = jiffies + HZ; in swphy_poll_timer()
2094 add_timer(&priv->swphy_poll); in swphy_poll_timer()
2110 kdev = &priv->pdev->dev; in bcm_enetsw_open()
2113 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); in bcm_enetsw_open()
2114 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); in bcm_enetsw_open()
2116 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, in bcm_enetsw_open()
2117 0, dev->name, dev); in bcm_enetsw_open()
2121 if (priv->irq_tx != -1) { in bcm_enetsw_open()
2122 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, in bcm_enetsw_open()
2123 0, dev->name, dev); in bcm_enetsw_open()
2129 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); in bcm_enetsw_open()
2130 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); in bcm_enetsw_open()
2133 ret = -ENOMEM; in bcm_enetsw_open()
2137 priv->rx_desc_alloc_size = size; in bcm_enetsw_open()
2138 priv->rx_desc_cpu = p; in bcm_enetsw_open()
2141 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); in bcm_enetsw_open()
2142 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); in bcm_enetsw_open()
2145 ret = -ENOMEM; in bcm_enetsw_open()
2149 priv->tx_desc_alloc_size = size; in bcm_enetsw_open()
2150 priv->tx_desc_cpu = p; in bcm_enetsw_open()
2152 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), in bcm_enetsw_open()
2154 if (!priv->tx_skb) { in bcm_enetsw_open()
2156 ret = -ENOMEM; in bcm_enetsw_open()
2160 priv->tx_desc_count = priv->tx_ring_size; in bcm_enetsw_open()
2161 priv->tx_dirty_desc = 0; in bcm_enetsw_open()
2162 priv->tx_curr_desc = 0; in bcm_enetsw_open()
2163 spin_lock_init(&priv->tx_lock); in bcm_enetsw_open()
2166 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), in bcm_enetsw_open()
2168 if (!priv->rx_buf) { in bcm_enetsw_open()
2170 ret = -ENOMEM; in bcm_enetsw_open()
2174 priv->rx_desc_count = 0; in bcm_enetsw_open()
2175 priv->rx_dirty_desc = 0; in bcm_enetsw_open()
2176 priv->rx_curr_desc = 0; in bcm_enetsw_open()
2179 for (i = 0; i < priv->num_ports; i++) { in bcm_enetsw_open()
2186 priv->sw_port_link[i] = 0; in bcm_enetsw_open()
2203 /* enable switch forward engine */ in bcm_enetsw_open()
2208 /* enable jumbo on all ports */ in bcm_enetsw_open()
2214 ENETDMA_BUFALLOC_REG(priv->rx_chan)); in bcm_enetsw_open()
2218 ret = -ENOMEM; in bcm_enetsw_open()
2223 enet_dmas_writel(priv, priv->rx_desc_dma, in bcm_enetsw_open()
2224 ENETDMAS_RSTART_REG, priv->rx_chan); in bcm_enetsw_open()
2225 enet_dmas_writel(priv, priv->tx_desc_dma, in bcm_enetsw_open()
2226 ENETDMAS_RSTART_REG, priv->tx_chan); in bcm_enetsw_open()
2229 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); in bcm_enetsw_open()
2230 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); in bcm_enetsw_open()
2231 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); in bcm_enetsw_open()
2232 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); in bcm_enetsw_open()
2233 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); in bcm_enetsw_open()
2234 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); in bcm_enetsw_open()
2237 enet_dmac_writel(priv, priv->dma_maxburst, in bcm_enetsw_open()
2238 ENETDMAC_MAXBURST, priv->rx_chan); in bcm_enetsw_open()
2239 enet_dmac_writel(priv, priv->dma_maxburst, in bcm_enetsw_open()
2240 ENETDMAC_MAXBURST, priv->tx_chan); in bcm_enetsw_open()
2243 val = priv->rx_ring_size / 3; in bcm_enetsw_open()
2244 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); in bcm_enetsw_open()
2245 val = (priv->rx_ring_size * 2) / 3; in bcm_enetsw_open()
2246 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); in bcm_enetsw_open()
2248 /* all set, enable mac and interrupts, start dma engine and in bcm_enetsw_open()
2254 ENETDMAC_CHANCFG, priv->rx_chan); in bcm_enetsw_open()
2258 ENETDMAC_IR, priv->rx_chan); in bcm_enetsw_open()
2260 ENETDMAC_IR, priv->tx_chan); in bcm_enetsw_open()
2262 /* make sure we enable napi before rx interrupt */ in bcm_enetsw_open()
2263 napi_enable(&priv->napi); in bcm_enetsw_open()
2266 ENETDMAC_IRMASK, priv->rx_chan); in bcm_enetsw_open()
2268 ENETDMAC_IRMASK, priv->tx_chan); in bcm_enetsw_open()
2274 for (i = 0; i < priv->num_ports; i++) { in bcm_enetsw_open()
2277 port = &priv->used_ports[i]; in bcm_enetsw_open()
2278 if (!port->used) in bcm_enetsw_open()
2281 if (!port->bypass_link) in bcm_enetsw_open()
2287 switch (port->force_speed) { in bcm_enetsw_open()
2298 port->name); in bcm_enetsw_open()
2302 if (port->force_duplex_full) in bcm_enetsw_open()
2311 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); in bcm_enetsw_open()
2312 mod_timer(&priv->swphy_poll, jiffies); in bcm_enetsw_open()
2319 kfree(priv->tx_skb); in bcm_enetsw_open()
2322 dma_free_coherent(kdev, priv->tx_desc_alloc_size, in bcm_enetsw_open()
2323 priv->tx_desc_cpu, priv->tx_desc_dma); in bcm_enetsw_open()
2326 dma_free_coherent(kdev, priv->rx_desc_alloc_size, in bcm_enetsw_open()
2327 priv->rx_desc_cpu, priv->rx_desc_dma); in bcm_enetsw_open()
2330 if (priv->irq_tx != -1) in bcm_enetsw_open()
2331 free_irq(priv->irq_tx, dev); in bcm_enetsw_open()
2334 free_irq(priv->irq_rx, dev); in bcm_enetsw_open()
2347 kdev = &priv->pdev->dev; in bcm_enetsw_stop()
2349 del_timer_sync(&priv->swphy_poll); in bcm_enetsw_stop()
2351 napi_disable(&priv->napi); in bcm_enetsw_stop()
2352 del_timer_sync(&priv->rx_timeout); in bcm_enetsw_stop()
2355 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); in bcm_enetsw_stop()
2356 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); in bcm_enetsw_stop()
2359 bcm_enet_disable_dma(priv, priv->tx_chan); in bcm_enetsw_stop()
2360 bcm_enet_disable_dma(priv, priv->rx_chan); in bcm_enetsw_stop()
2369 kfree(priv->tx_skb); in bcm_enetsw_stop()
2370 dma_free_coherent(kdev, priv->rx_desc_alloc_size, in bcm_enetsw_stop()
2371 priv->rx_desc_cpu, priv->rx_desc_dma); in bcm_enetsw_stop()
2372 dma_free_coherent(kdev, priv->tx_desc_alloc_size, in bcm_enetsw_stop()
2373 priv->tx_desc_cpu, priv->tx_desc_dma); in bcm_enetsw_stop()
2374 if (priv->irq_tx != -1) in bcm_enetsw_stop()
2375 free_irq(priv->irq_tx, dev); in bcm_enetsw_stop()
2376 free_irq(priv->irq_rx, dev); in bcm_enetsw_stop()
2393 for (i = 0; i < priv->num_ports; ++i) { in bcm_enetsw_phy_is_external()
2394 if (!priv->used_ports[i].used) in bcm_enetsw_phy_is_external()
2396 if (priv->used_ports[i].phy_id == phy_id) in bcm_enetsw_phy_is_external()
2457 { "rx_packets", DEV_STAT(rx_packets), -1 },
2458 { "tx_packets", DEV_STAT(tx_packets), -1 },
2459 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2460 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2461 { "rx_errors", DEV_STAT(rx_errors), -1 },
2462 { "tx_errors", DEV_STAT(tx_errors), -1 },
2463 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2464 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2526 return -EINVAL; in bcm_enetsw_get_sset_count()
2533 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); in bcm_enetsw_get_drvinfo()
2534 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); in bcm_enetsw_get_drvinfo()
2554 reg = s->mib_reg; in bcm_enetsw_get_ethtool_stats()
2555 if (reg == -1) in bcm_enetsw_get_ethtool_stats()
2559 p = (char *)priv + s->stat_offset; in bcm_enetsw_get_ethtool_stats()
2561 if (s->sizeof_stat == sizeof(u64)) { in bcm_enetsw_get_ethtool_stats()
2575 if (s->mib_reg == -1) in bcm_enetsw_get_ethtool_stats()
2576 p = (char *)&netdev->stats + s->stat_offset; in bcm_enetsw_get_ethtool_stats()
2578 p = (char *)priv + s->stat_offset; in bcm_enetsw_get_ethtool_stats()
2580 data[i] = (s->sizeof_stat == sizeof(u64)) ? in bcm_enetsw_get_ethtool_stats()
2596 ering->rx_max_pending = 8192; in bcm_enetsw_get_ringparam()
2597 ering->tx_max_pending = 8192; in bcm_enetsw_get_ringparam()
2598 ering->rx_mini_max_pending = 0; in bcm_enetsw_get_ringparam()
2599 ering->rx_jumbo_max_pending = 0; in bcm_enetsw_get_ringparam()
2600 ering->rx_pending = priv->rx_ring_size; in bcm_enetsw_get_ringparam()
2601 ering->tx_pending = priv->tx_ring_size; in bcm_enetsw_get_ringparam()
2621 priv->rx_ring_size = ering->rx_pending; in bcm_enetsw_set_ringparam()
2622 priv->tx_ring_size = ering->tx_pending; in bcm_enetsw_set_ringparam()
2653 return -EPROBE_DEFER; in bcm_enetsw_probe()
2659 return -ENODEV; in bcm_enetsw_probe()
2663 return -ENOMEM; in bcm_enetsw_probe()
2667 priv->enet_is_sw = true; in bcm_enetsw_probe()
2668 priv->irq_rx = irq_rx; in bcm_enetsw_probe()
2669 priv->irq_tx = irq_tx; in bcm_enetsw_probe()
2670 priv->rx_ring_size = BCMENET_DEF_RX_DESC; in bcm_enetsw_probe()
2671 priv->tx_ring_size = BCMENET_DEF_TX_DESC; in bcm_enetsw_probe()
2672 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; in bcm_enetsw_probe()
2673 priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN; in bcm_enetsw_probe()
2675 pd = dev_get_platdata(&pdev->dev); in bcm_enetsw_probe()
2677 eth_hw_addr_set(dev, pd->mac_addr); in bcm_enetsw_probe()
2678 memcpy(priv->used_ports, pd->used_ports, in bcm_enetsw_probe()
2679 sizeof(pd->used_ports)); in bcm_enetsw_probe()
2680 priv->num_ports = pd->num_ports; in bcm_enetsw_probe()
2681 priv->dma_has_sram = pd->dma_has_sram; in bcm_enetsw_probe()
2682 priv->dma_chan_en_mask = pd->dma_chan_en_mask; in bcm_enetsw_probe()
2683 priv->dma_chan_int_mask = pd->dma_chan_int_mask; in bcm_enetsw_probe()
2684 priv->dma_chan_width = pd->dma_chan_width; in bcm_enetsw_probe()
2687 ret = bcm_enet_change_mtu(dev, dev->mtu); in bcm_enetsw_probe()
2691 priv->base = devm_ioremap_resource(&pdev->dev, res_mem); in bcm_enetsw_probe()
2692 if (IS_ERR(priv->base)) { in bcm_enetsw_probe()
2693 ret = PTR_ERR(priv->base); in bcm_enetsw_probe()
2697 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); in bcm_enetsw_probe()
2698 if (IS_ERR(priv->mac_clk)) { in bcm_enetsw_probe()
2699 ret = PTR_ERR(priv->mac_clk); in bcm_enetsw_probe()
2702 ret = clk_prepare_enable(priv->mac_clk); in bcm_enetsw_probe()
2706 priv->rx_chan = 0; in bcm_enetsw_probe()
2707 priv->tx_chan = 1; in bcm_enetsw_probe()
2708 spin_lock_init(&priv->rx_lock); in bcm_enetsw_probe()
2711 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); in bcm_enetsw_probe()
2714 dev->netdev_ops = &bcm_enetsw_ops; in bcm_enetsw_probe()
2715 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); in bcm_enetsw_probe()
2716 dev->ethtool_ops = &bcm_enetsw_ethtool_ops; in bcm_enetsw_probe()
2717 SET_NETDEV_DEV(dev, &pdev->dev); in bcm_enetsw_probe()
2719 spin_lock_init(&priv->enetsw_mdio_lock); in bcm_enetsw_probe()
2727 priv->pdev = pdev; in bcm_enetsw_probe()
2728 priv->net_dev = dev; in bcm_enetsw_probe()
2733 clk_disable_unprepare(priv->mac_clk); in bcm_enetsw_probe()
2751 clk_disable_unprepare(priv->mac_clk); in bcm_enetsw_remove()