Lines Matching +full:rx +full:- +full:port +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom BCM7xxx System Port Ethernet MAC driver
34 u32 reg = readl_relaxed(priv->base + offset + off); \
40 writel_relaxed(val, priv->base + offset + off); \
59 if (priv->is_lite && off >= RDMA_STATUS) in rdma_readl()
61 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); in rdma_readl()
66 if (priv->is_lite && off >= RDMA_STATUS) in rdma_writel()
68 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); in rdma_writel()
73 if (!priv->is_lite) { in tdma_control_bit()
83 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
90 priv->irq##which##_mask &= ~(mask); \
97 priv->irq##which##_mask |= (mask); \
104 * nanoseconds), so keep the check for 64-bits explicit here to save
105 * one register write per-packet on 32-bits platforms.
125 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); in bcm_sysport_set_rx_csum()
131 if (priv->rx_chk_en) in bcm_sysport_set_rx_csum()
137 * a valid CHK bit to be set in the per-packet status word in bcm_sysport_set_rx_csum()
139 if (priv->rx_chk_en && priv->crc_fwd) in bcm_sysport_set_rx_csum()
145 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom in bcm_sysport_set_rx_csum()
165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bcm_sysport_set_tx_csum()
168 if (priv->tsb_en) in bcm_sysport_set_tx_csum()
193 ret = clk_prepare_enable(priv->clk); in bcm_sysport_set_features()
198 if (!priv->is_lite) in bcm_sysport_set_features()
199 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); in bcm_sysport_set_features()
201 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & in bcm_sysport_set_features()
207 clk_disable_unprepare(priv->clk); in bcm_sysport_set_features()
227 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
228 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
229 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
230 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
231 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
232 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
233 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
234 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
235 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
236 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
237 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
238 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
239 STAT_MIB_RX("rx_multicast", mib.rx.mca),
240 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
241 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
242 STAT_MIB_RX("rx_control", mib.rx.cf),
243 STAT_MIB_RX("rx_pause", mib.rx.pf),
244 STAT_MIB_RX("rx_unknown", mib.rx.uo),
245 STAT_MIB_RX("rx_align", mib.rx.aln),
246 STAT_MIB_RX("rx_outrange", mib.rx.flr),
247 STAT_MIB_RX("rx_code", mib.rx.cde),
248 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
249 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
250 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
251 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
252 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
253 STAT_MIB_RX("rx_unicast", mib.rx.uc),
254 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
255 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
305 /* Per TX-queue statistics are dynamically appended */
313 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in bcm_sysport_get_drvinfo()
314 strscpy(info->bus_info, "platform", sizeof(info->bus_info)); in bcm_sysport_get_drvinfo()
321 return priv->msg_enable; in bcm_sysport_get_msglvl()
328 priv->msg_enable = enable; in bcm_sysport_set_msglvl()
356 if (priv->is_lite && in bcm_sysport_get_sset_count()
357 !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_sset_count()
361 /* Include per-queue statistics */ in bcm_sysport_get_sset_count()
362 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_sset_count()
364 return -EOPNOTSUPP; in bcm_sysport_get_sset_count()
380 if (priv->is_lite && in bcm_sysport_get_strings()
381 !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_strings()
384 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, in bcm_sysport_get_strings()
389 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_strings()
417 switch (s->type) { in bcm_sysport_update_mib_counters()
425 if (priv->is_lite) in bcm_sysport_update_mib_counters()
428 if (s->type != BCM_SYSPORT_STAT_MIB_RX) in bcm_sysport_update_mib_counters()
433 val = rxchk_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
435 rxchk_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
438 val = rbuf_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
440 rbuf_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
443 if (!priv->is_lite) in bcm_sysport_update_mib_counters()
446 val = rdma_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
448 rdma_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
452 j += s->stat_sizeof; in bcm_sysport_update_mib_counters()
453 p = (char *)priv + s->stat_offset; in bcm_sysport_update_mib_counters()
457 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); in bcm_sysport_update_mib_counters()
468 for (q = 0; q < priv->netdev->num_tx_queues; q++) { in bcm_sysport_update_tx_stats()
469 ring = &priv->tx_rings[q]; in bcm_sysport_update_tx_stats()
471 start = u64_stats_fetch_begin(&priv->syncp); in bcm_sysport_update_tx_stats()
472 bytes = ring->bytes; in bcm_sysport_update_tx_stats()
473 packets = ring->packets; in bcm_sysport_update_tx_stats()
474 } while (u64_stats_fetch_retry(&priv->syncp, start)); in bcm_sysport_update_tx_stats()
485 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_get_stats()
486 struct u64_stats_sync *syncp = &priv->syncp; in bcm_sysport_get_stats()
495 stats64->tx_bytes = tx_bytes; in bcm_sysport_get_stats()
496 stats64->tx_packets = tx_packets; in bcm_sysport_get_stats()
504 if (s->type == BCM_SYSPORT_STAT_NETDEV) in bcm_sysport_get_stats()
505 p = (char *)&dev->stats; in bcm_sysport_get_stats()
506 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) in bcm_sysport_get_stats()
511 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_stats()
513 p += s->stat_offset; in bcm_sysport_get_stats()
515 if (s->stat_sizeof == sizeof(u64) && in bcm_sysport_get_stats()
516 s->type == BCM_SYSPORT_STAT_NETDEV64) { in bcm_sysport_get_stats()
531 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - in bcm_sysport_get_stats()
532 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_stats()
534 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_stats()
535 ring = &priv->tx_rings[i]; in bcm_sysport_get_stats()
536 data[j] = ring->packets; in bcm_sysport_get_stats()
538 data[j] = ring->bytes; in bcm_sysport_get_stats()
548 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; in bcm_sysport_get_wol()
549 wol->wolopts = priv->wolopts; in bcm_sysport_get_wol()
551 if (!(priv->wolopts & WAKE_MAGICSECURE)) in bcm_sysport_get_wol()
554 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); in bcm_sysport_get_wol()
561 struct device *kdev = &priv->pdev->dev; in bcm_sysport_set_wol()
565 return -ENOTSUPP; in bcm_sysport_set_wol()
567 if (wol->wolopts & ~supported) in bcm_sysport_set_wol()
568 return -EINVAL; in bcm_sysport_set_wol()
570 if (wol->wolopts & WAKE_MAGICSECURE) in bcm_sysport_set_wol()
571 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); in bcm_sysport_set_wol()
574 if (wol->wolopts) { in bcm_sysport_set_wol()
576 if (priv->wol_irq_disabled) in bcm_sysport_set_wol()
577 enable_irq_wake(priv->wol_irq); in bcm_sysport_set_wol()
578 priv->wol_irq_disabled = 0; in bcm_sysport_set_wol()
582 if (!priv->wol_irq_disabled) in bcm_sysport_set_wol()
583 disable_irq_wake(priv->wol_irq); in bcm_sysport_set_wol()
584 priv->wol_irq_disabled = 1; in bcm_sysport_set_wol()
587 priv->wolopts = wol->wolopts; in bcm_sysport_set_wol()
608 struct bcm_sysport_priv *priv = ring->priv; in bcm_sysport_set_tx_coalesce()
611 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); in bcm_sysport_set_tx_coalesce()
614 reg |= ec->tx_max_coalesced_frames; in bcm_sysport_set_tx_coalesce()
615 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << in bcm_sysport_set_tx_coalesce()
617 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); in bcm_sysport_set_tx_coalesce()
630 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; in bcm_sysport_get_coalesce()
631 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; in bcm_sysport_get_coalesce()
635 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; in bcm_sysport_get_coalesce()
636 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; in bcm_sysport_get_coalesce()
637 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; in bcm_sysport_get_coalesce()
656 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || in bcm_sysport_set_coalesce()
657 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || in bcm_sysport_set_coalesce()
658 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || in bcm_sysport_set_coalesce()
659 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) in bcm_sysport_set_coalesce()
660 return -EINVAL; in bcm_sysport_set_coalesce()
662 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || in bcm_sysport_set_coalesce()
663 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) in bcm_sysport_set_coalesce()
664 return -EINVAL; in bcm_sysport_set_coalesce()
666 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_set_coalesce()
667 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); in bcm_sysport_set_coalesce()
669 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcm_sysport_set_coalesce()
670 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcm_sysport_set_coalesce()
671 usecs = priv->rx_coalesce_usecs; in bcm_sysport_set_coalesce()
672 pkts = priv->rx_max_coalesced_frames; in bcm_sysport_set_coalesce()
674 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { in bcm_sysport_set_coalesce()
675 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); in bcm_sysport_set_coalesce()
680 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcm_sysport_set_coalesce()
690 dev_consume_skb_any(cb->skb); in bcm_sysport_free_cb()
691 cb->skb = NULL; in bcm_sysport_free_cb()
698 struct device *kdev = &priv->pdev->dev; in bcm_sysport_rx_refill()
699 struct net_device *ndev = priv->netdev; in bcm_sysport_rx_refill()
701 dma_addr_t mapping; in bcm_sysport_rx_refill() local
704 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, in bcm_sysport_rx_refill()
707 priv->mib.alloc_rx_buff_failed++; in bcm_sysport_rx_refill()
712 mapping = dma_map_single(kdev, skb->data, in bcm_sysport_rx_refill()
714 if (dma_mapping_error(kdev, mapping)) { in bcm_sysport_rx_refill()
715 priv->mib.rx_dma_failed++; in bcm_sysport_rx_refill()
717 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); in bcm_sysport_rx_refill()
722 rx_skb = cb->skb; in bcm_sysport_rx_refill()
728 cb->skb = skb; in bcm_sysport_rx_refill()
729 dma_unmap_addr_set(cb, dma_addr, mapping); in bcm_sysport_rx_refill()
730 dma_desc_set_addr(priv, cb->bd_addr, mapping); in bcm_sysport_rx_refill()
732 netif_dbg(priv, rx_status, ndev, "RX refill\n"); in bcm_sysport_rx_refill()
744 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_alloc_rx_bufs()
745 cb = &priv->rx_cbs[i]; in bcm_sysport_alloc_rx_bufs()
748 if (!cb->skb) in bcm_sysport_alloc_rx_bufs()
749 return -ENOMEM; in bcm_sysport_alloc_rx_bufs()
759 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_desc_rx()
760 struct net_device *ndev = priv->netdev; in bcm_sysport_desc_rx()
773 * groups the producer and consumer indexes into the same 32-bit in bcm_sysport_desc_rx()
776 if (!priv->is_lite) in bcm_sysport_desc_rx()
782 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; in bcm_sysport_desc_rx()
786 p_index, priv->rx_c_index, to_process); in bcm_sysport_desc_rx()
789 cb = &priv->rx_cbs[priv->rx_read_ptr]; in bcm_sysport_desc_rx()
794 * DMA mapping for this incoming packet since in bcm_sysport_desc_rx()
795 * bcm_sysport_rx_refill always either has both skb and mapping in bcm_sysport_desc_rx()
800 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
801 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
806 rsb = (struct bcm_rsb *)skb->data; in bcm_sysport_desc_rx()
807 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; in bcm_sysport_desc_rx()
808 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & in bcm_sysport_desc_rx()
813 p_index, priv->rx_c_index, priv->rx_read_ptr, in bcm_sysport_desc_rx()
818 ndev->stats.rx_length_errors++; in bcm_sysport_desc_rx()
819 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
826 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
827 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
835 ndev->stats.rx_over_errors++; in bcm_sysport_desc_rx()
836 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
837 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
846 skb->ip_summed = CHECKSUM_UNNECESSARY; in bcm_sysport_desc_rx()
848 /* Hardware pre-pends packets with 2bytes before Ethernet in bcm_sysport_desc_rx()
853 len -= (sizeof(*rsb) + 2); in bcm_sysport_desc_rx()
857 if (priv->crc_fwd) { in bcm_sysport_desc_rx()
858 skb_trim(skb, len - ETH_FCS_LEN); in bcm_sysport_desc_rx()
859 len -= ETH_FCS_LEN; in bcm_sysport_desc_rx()
862 skb->protocol = eth_type_trans(skb, ndev); in bcm_sysport_desc_rx()
863 ndev->stats.rx_packets++; in bcm_sysport_desc_rx()
864 ndev->stats.rx_bytes += len; in bcm_sysport_desc_rx()
865 u64_stats_update_begin(&priv->syncp); in bcm_sysport_desc_rx()
866 stats64->rx_packets++; in bcm_sysport_desc_rx()
867 stats64->rx_bytes += len; in bcm_sysport_desc_rx()
868 u64_stats_update_end(&priv->syncp); in bcm_sysport_desc_rx()
870 napi_gro_receive(&priv->napi, skb); in bcm_sysport_desc_rx()
873 priv->rx_read_ptr++; in bcm_sysport_desc_rx()
875 if (priv->rx_read_ptr == priv->num_rx_bds) in bcm_sysport_desc_rx()
876 priv->rx_read_ptr = 0; in bcm_sysport_desc_rx()
879 priv->dim.packets = processed; in bcm_sysport_desc_rx()
880 priv->dim.bytes = processed_bytes; in bcm_sysport_desc_rx()
890 struct bcm_sysport_priv *priv = ring->priv; in bcm_sysport_tx_reclaim_one()
891 struct device *kdev = &priv->pdev->dev; in bcm_sysport_tx_reclaim_one()
893 if (cb->skb) { in bcm_sysport_tx_reclaim_one()
894 *bytes_compl += cb->skb->len; in bcm_sysport_tx_reclaim_one()
914 struct net_device *ndev = priv->netdev; in __bcm_sysport_tx_reclaim()
922 if (!ring->priv->is_lite) in __bcm_sysport_tx_reclaim()
923 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); in __bcm_sysport_tx_reclaim()
925 intrl2_0_writel(ring->priv, BIT(ring->index + in __bcm_sysport_tx_reclaim()
929 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); in __bcm_sysport_tx_reclaim()
931 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; in __bcm_sysport_tx_reclaim()
935 ring->index, ring->c_index, c_index, txbds_ready); in __bcm_sysport_tx_reclaim()
938 cb = &ring->cbs[ring->clean_index]; in __bcm_sysport_tx_reclaim()
941 ring->desc_count++; in __bcm_sysport_tx_reclaim()
944 if (likely(ring->clean_index < ring->size - 1)) in __bcm_sysport_tx_reclaim()
945 ring->clean_index++; in __bcm_sysport_tx_reclaim()
947 ring->clean_index = 0; in __bcm_sysport_tx_reclaim()
950 u64_stats_update_begin(&priv->syncp); in __bcm_sysport_tx_reclaim()
951 ring->packets += pkts_compl; in __bcm_sysport_tx_reclaim()
952 ring->bytes += bytes_compl; in __bcm_sysport_tx_reclaim()
953 u64_stats_update_end(&priv->syncp); in __bcm_sysport_tx_reclaim()
955 ring->c_index = c_index; in __bcm_sysport_tx_reclaim()
959 ring->index, ring->c_index, pkts_compl, bytes_compl); in __bcm_sysport_tx_reclaim()
964 /* Locked version of the per-ring TX reclaim routine */
972 txq = netdev_get_tx_queue(priv->netdev, ring->index); in bcm_sysport_tx_reclaim()
974 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_tx_reclaim()
979 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_tx_reclaim()
984 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
990 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_tx_clean()
992 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_tx_clean()
1001 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); in bcm_sysport_tx_poll()
1005 /* re-enable TX interrupt */ in bcm_sysport_tx_poll()
1006 if (!ring->priv->is_lite) in bcm_sysport_tx_poll()
1007 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); in bcm_sysport_tx_poll()
1009 intrl2_0_mask_clear(ring->priv, BIT(ring->index + in bcm_sysport_tx_poll()
1022 for (q = 0; q < priv->netdev->num_tx_queues; q++) in bcm_sysport_tx_reclaim_all()
1023 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); in bcm_sysport_tx_reclaim_all()
1035 priv->rx_c_index += work_done; in bcm_sysport_poll()
1036 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; in bcm_sysport_poll()
1042 if (!priv->is_lite) in bcm_sysport_poll()
1043 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); in bcm_sysport_poll()
1045 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); in bcm_sysport_poll()
1049 /* re-enable RX interrupts */ in bcm_sysport_poll()
1053 if (priv->dim.use_dim) { in bcm_sysport_poll()
1054 dim_update_sample(priv->dim.event_ctr, priv->dim.packets, in bcm_sysport_poll()
1055 priv->dim.bytes, &dim_sample); in bcm_sysport_poll()
1056 net_dim(&priv->dim.dim, dim_sample); in bcm_sysport_poll()
1073 if (priv->is_lite) in mpd_enable_set()
1100 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_resume_from_wol()
1101 rxchk_writel(priv, priv->filters_loc[index] << in bcm_sysport_resume_from_wol()
1111 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); in bcm_sysport_resume_from_wol()
1116 netdev_info(priv->netdev, in bcm_sysport_resume_from_wol()
1117 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); in bcm_sysport_resume_from_wol()
1120 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); in bcm_sysport_resume_from_wol()
1130 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode, in bcm_sysport_dim_work()
1131 dim->profile_ix); in bcm_sysport_dim_work()
1134 dim->state = DIM_START_MEASURE; in bcm_sysport_dim_work()
1137 /* RX and misc interrupt routine */
1145 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & in bcm_sysport_rx_isr()
1147 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); in bcm_sysport_rx_isr()
1149 if (unlikely(priv->irq0_stat == 0)) { in bcm_sysport_rx_isr()
1150 netdev_warn(priv->netdev, "spurious RX interrupt\n"); in bcm_sysport_rx_isr()
1154 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { in bcm_sysport_rx_isr()
1155 priv->dim.event_ctr++; in bcm_sysport_rx_isr()
1156 if (likely(napi_schedule_prep(&priv->napi))) { in bcm_sysport_rx_isr()
1157 /* disable RX interrupts */ in bcm_sysport_rx_isr()
1159 __napi_schedule_irqoff(&priv->napi); in bcm_sysport_rx_isr()
1166 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) in bcm_sysport_rx_isr()
1169 if (!priv->is_lite) in bcm_sysport_rx_isr()
1172 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_rx_isr()
1174 if (!(priv->irq0_stat & ring_bit)) in bcm_sysport_rx_isr()
1177 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr()
1179 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr()
1181 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr()
1196 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & in bcm_sysport_tx_isr()
1200 if (unlikely(priv->irq1_stat == 0)) { in bcm_sysport_tx_isr()
1201 netdev_warn(priv->netdev, "spurious TX interrupt\n"); in bcm_sysport_tx_isr()
1205 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_tx_isr()
1206 if (!(priv->irq1_stat & BIT(ring))) in bcm_sysport_tx_isr()
1209 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr()
1211 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr()
1213 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
1224 pm_wakeup_event(&priv->pdev->dev, 0); in bcm_sysport_wol_isr()
1234 disable_irq(priv->irq0); in bcm_sysport_poll_controller()
1235 bcm_sysport_rx_isr(priv->irq0, priv); in bcm_sysport_poll_controller()
1236 enable_irq(priv->irq0); in bcm_sysport_poll_controller()
1238 if (!priv->is_lite) { in bcm_sysport_poll_controller()
1239 disable_irq(priv->irq1); in bcm_sysport_poll_controller()
1240 bcm_sysport_tx_isr(priv->irq1, priv); in bcm_sysport_poll_controller()
1241 enable_irq(priv->irq1); in bcm_sysport_poll_controller()
1257 /* Re-allocate SKB if needed */ in bcm_sysport_insert_tsb()
1262 priv->mib.tx_realloc_tsb_failed++; in bcm_sysport_insert_tsb()
1263 dev->stats.tx_errors++; in bcm_sysport_insert_tsb()
1264 dev->stats.tx_dropped++; in bcm_sysport_insert_tsb()
1269 priv->mib.tx_realloc_tsb++; in bcm_sysport_insert_tsb()
1273 /* Zero-out TSB by default */ in bcm_sysport_insert_tsb()
1277 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK; in bcm_sysport_insert_tsb()
1278 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; in bcm_sysport_insert_tsb()
1281 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcm_sysport_insert_tsb()
1282 ip_ver = skb->protocol; in bcm_sysport_insert_tsb()
1285 ip_proto = ip_hdr(skb)->protocol; in bcm_sysport_insert_tsb()
1288 ip_proto = ipv6_hdr(skb)->nexthdr; in bcm_sysport_insert_tsb()
1295 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); in bcm_sysport_insert_tsb()
1299 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; in bcm_sysport_insert_tsb()
1311 tsb->l4_ptr_dest_map = csum_info; in bcm_sysport_insert_tsb()
1321 struct device *kdev = &priv->pdev->dev; in bcm_sysport_xmit()
1328 dma_addr_t mapping; in bcm_sysport_xmit() local
1334 ring = &priv->tx_rings[queue]; in bcm_sysport_xmit()
1337 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_xmit()
1338 if (unlikely(ring->desc_count == 0)) { in bcm_sysport_xmit()
1346 if (priv->tsb_en) { in bcm_sysport_xmit()
1354 skb_len = skb->len; in bcm_sysport_xmit()
1356 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); in bcm_sysport_xmit()
1357 if (dma_mapping_error(kdev, mapping)) { in bcm_sysport_xmit()
1358 priv->mib.tx_dma_failed++; in bcm_sysport_xmit()
1360 skb->data, skb_len); in bcm_sysport_xmit()
1367 cb = &ring->cbs[ring->curr_desc]; in bcm_sysport_xmit()
1368 cb->skb = skb; in bcm_sysport_xmit()
1369 dma_unmap_addr_set(cb, dma_addr, mapping); in bcm_sysport_xmit()
1372 addr_lo = lower_32_bits(mapping); in bcm_sysport_xmit()
1373 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; in bcm_sysport_xmit()
1377 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcm_sysport_xmit()
1382 ring->curr_desc++; in bcm_sysport_xmit()
1383 if (ring->curr_desc == ring->size) in bcm_sysport_xmit()
1384 ring->curr_desc = 0; in bcm_sysport_xmit()
1385 ring->desc_count--; in bcm_sysport_xmit()
1388 spin_lock_irqsave(&priv->desc_lock, desc_flags); in bcm_sysport_xmit()
1389 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); in bcm_sysport_xmit()
1390 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); in bcm_sysport_xmit()
1391 spin_unlock_irqrestore(&priv->desc_lock, desc_flags); in bcm_sysport_xmit()
1394 if (ring->desc_count == 0) in bcm_sysport_xmit()
1398 ring->index, ring->desc_count, ring->curr_desc); in bcm_sysport_xmit()
1402 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_xmit()
1411 dev->stats.tx_errors++; in bcm_sysport_tx_timeout()
1420 struct phy_device *phydev = dev->phydev; in bcm_sysport_adj_link()
1424 if (priv->old_link != phydev->link) { in bcm_sysport_adj_link()
1426 priv->old_link = phydev->link; in bcm_sysport_adj_link()
1429 if (priv->old_duplex != phydev->duplex) { in bcm_sysport_adj_link()
1431 priv->old_duplex = phydev->duplex; in bcm_sysport_adj_link()
1434 if (priv->is_lite) in bcm_sysport_adj_link()
1437 switch (phydev->speed) { in bcm_sysport_adj_link()
1455 if (phydev->duplex == DUPLEX_HALF) in bcm_sysport_adj_link()
1458 if (priv->old_pause != phydev->pause) { in bcm_sysport_adj_link()
1460 priv->old_pause = phydev->pause; in bcm_sysport_adj_link()
1463 if (!phydev->pause) in bcm_sysport_adj_link()
1469 if (phydev->link) { in bcm_sysport_adj_link()
1485 struct bcm_sysport_net_dim *dim = &priv->dim; in bcm_sysport_init_dim()
1487 INIT_WORK(&dim->dim.work, cb); in bcm_sysport_init_dim()
1488 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcm_sysport_init_dim()
1489 dim->event_ctr = 0; in bcm_sysport_init_dim()
1490 dim->packets = 0; in bcm_sysport_init_dim()
1491 dim->bytes = 0; in bcm_sysport_init_dim()
1496 struct bcm_sysport_net_dim *dim = &priv->dim; in bcm_sysport_init_rx_coalesce()
1500 usecs = priv->rx_coalesce_usecs; in bcm_sysport_init_rx_coalesce()
1501 pkts = priv->rx_max_coalesced_frames; in bcm_sysport_init_rx_coalesce()
1503 /* If DIM was enabled, re-apply default parameters */ in bcm_sysport_init_rx_coalesce()
1504 if (dim->use_dim) { in bcm_sysport_init_rx_coalesce()
1505 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcm_sysport_init_rx_coalesce()
1516 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_init_tx_ring()
1523 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); in bcm_sysport_init_tx_ring()
1524 if (!ring->cbs) { in bcm_sysport_init_tx_ring()
1525 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); in bcm_sysport_init_tx_ring()
1526 return -ENOMEM; in bcm_sysport_init_tx_ring()
1530 spin_lock_init(&ring->lock); in bcm_sysport_init_tx_ring()
1531 ring->priv = priv; in bcm_sysport_init_tx_ring()
1532 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll); in bcm_sysport_init_tx_ring()
1533 ring->index = index; in bcm_sysport_init_tx_ring()
1534 ring->size = size; in bcm_sysport_init_tx_ring()
1535 ring->clean_index = 0; in bcm_sysport_init_tx_ring()
1536 ring->alloc_size = ring->size; in bcm_sysport_init_tx_ring()
1537 ring->desc_count = ring->size; in bcm_sysport_init_tx_ring()
1538 ring->curr_desc = 0; in bcm_sysport_init_tx_ring()
1546 /* Configure QID and port mapping */ in bcm_sysport_init_tx_ring()
1549 if (ring->inspect) { in bcm_sysport_init_tx_ring()
1550 reg |= ring->switch_queue & RING_QID_MASK; in bcm_sysport_init_tx_ring()
1551 reg |= ring->switch_port << RING_PORT_ID_SHIFT; in bcm_sysport_init_tx_ring()
1560 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) in bcm_sysport_init_tx_ring()
1573 if (priv->is_lite) in bcm_sysport_init_tx_ring()
1585 tdma_writel(priv, ring->size | in bcm_sysport_init_tx_ring()
1594 napi_enable(&ring->napi); in bcm_sysport_init_tx_ring()
1596 netif_dbg(priv, hw, priv->netdev, in bcm_sysport_init_tx_ring()
1597 "TDMA cfg, size=%d, switch q=%d,port=%d\n", in bcm_sysport_init_tx_ring()
1598 ring->size, ring->switch_queue, in bcm_sysport_init_tx_ring()
1599 ring->switch_port); in bcm_sysport_init_tx_ring()
1607 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_fini_tx_ring()
1613 netdev_warn(priv->netdev, "TDMA not stopped!\n"); in bcm_sysport_fini_tx_ring()
1615 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could in bcm_sysport_fini_tx_ring()
1619 if (!ring->cbs) in bcm_sysport_fini_tx_ring()
1622 napi_disable(&ring->napi); in bcm_sysport_fini_tx_ring()
1623 netif_napi_del(&ring->napi); in bcm_sysport_fini_tx_ring()
1627 kfree(ring->cbs); in bcm_sysport_fini_tx_ring()
1628 ring->cbs = NULL; in bcm_sysport_fini_tx_ring()
1629 ring->size = 0; in bcm_sysport_fini_tx_ring()
1630 ring->alloc_size = 0; in bcm_sysport_fini_tx_ring()
1632 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); in bcm_sysport_fini_tx_ring()
1655 } while (timeout-- > 0); in rdma_enable_set()
1657 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); in rdma_enable_set()
1659 return -ETIMEDOUT; in rdma_enable_set()
1683 } while (timeout-- > 0); in tdma_enable_set()
1685 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); in tdma_enable_set()
1687 return -ETIMEDOUT; in tdma_enable_set()
1697 /* Initialize SW view of the RX ring */ in bcm_sysport_init_rx_ring()
1698 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; in bcm_sysport_init_rx_ring()
1699 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; in bcm_sysport_init_rx_ring()
1700 priv->rx_c_index = 0; in bcm_sysport_init_rx_ring()
1701 priv->rx_read_ptr = 0; in bcm_sysport_init_rx_ring()
1702 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), in bcm_sysport_init_rx_ring()
1704 if (!priv->rx_cbs) { in bcm_sysport_init_rx_ring()
1705 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); in bcm_sysport_init_rx_ring()
1706 return -ENOMEM; in bcm_sysport_init_rx_ring()
1709 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_init_rx_ring()
1710 cb = priv->rx_cbs + i; in bcm_sysport_init_rx_ring()
1711 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; in bcm_sysport_init_rx_ring()
1716 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); in bcm_sysport_init_rx_ring()
1729 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | in bcm_sysport_init_rx_ring()
1735 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); in bcm_sysport_init_rx_ring()
1737 netif_dbg(priv, hw, priv->netdev, in bcm_sysport_init_rx_ring()
1739 priv->num_rx_bds, priv->rx_bds); in bcm_sysport_init_rx_ring()
1753 netdev_warn(priv->netdev, "RDMA not stopped!\n"); in bcm_sysport_fini_rx_ring()
1755 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_fini_rx_ring()
1756 cb = &priv->rx_cbs[i]; in bcm_sysport_fini_rx_ring()
1758 dma_unmap_single(&priv->pdev->dev, in bcm_sysport_fini_rx_ring()
1764 kfree(priv->rx_cbs); in bcm_sysport_fini_rx_ring()
1765 priv->rx_cbs = NULL; in bcm_sysport_fini_rx_ring()
1767 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); in bcm_sysport_fini_rx_ring()
1775 if (priv->is_lite) in bcm_sysport_set_rx_mode()
1779 if (dev->flags & IFF_PROMISC) in bcm_sysport_set_rx_mode()
1786 if (dev->flags & IFF_ALLMULTI) in bcm_sysport_set_rx_mode()
1795 if (!priv->is_lite) { in umac_enable_set()
1811 /* UniMAC stops on a packet boundary, wait for a full-sized packet in umac_enable_set()
1822 if (priv->is_lite) in umac_reset()
1841 if (!priv->is_lite) { in umac_set_hw_addr()
1864 if (!is_valid_ether_addr(addr->sa_data)) in bcm_sysport_change_mac()
1865 return -EINVAL; in bcm_sysport_change_mac()
1867 eth_hw_addr_set(dev, addr->sa_data); in bcm_sysport_change_mac()
1875 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_change_mac()
1884 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_get_stats64()
1887 netdev_stats_to_stats64(stats, &dev->stats); in bcm_sysport_get_stats64()
1889 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, in bcm_sysport_get_stats64()
1890 &stats->tx_packets); in bcm_sysport_get_stats64()
1893 start = u64_stats_fetch_begin(&priv->syncp); in bcm_sysport_get_stats64()
1894 stats->rx_packets = stats64->rx_packets; in bcm_sysport_get_stats64()
1895 stats->rx_bytes = stats64->rx_bytes; in bcm_sysport_get_stats64()
1896 } while (u64_stats_fetch_retry(&priv->syncp, start)); in bcm_sysport_get_stats64()
1906 napi_enable(&priv->napi); in bcm_sysport_netif_start()
1908 /* Enable RX interrupt and TX ring full interrupt */ in bcm_sysport_netif_start()
1911 phy_start(dev->phydev); in bcm_sysport_netif_start()
1914 if (!priv->is_lite) in bcm_sysport_netif_start()
1927 if (priv->is_lite) in rbuf_init()
1942 if (!priv->is_lite) { in bcm_sysport_mask_all_intrs()
1954 if (netdev_uses_dsa(priv->netdev)) { in gib_set_pad_extension()
1970 clk_prepare_enable(priv->clk); in bcm_sysport_open()
1975 /* Flush TX and RX FIFOs at TOPCTRL level */ in bcm_sysport_open()
1978 /* Disable the UniMAC RX/TX */ in bcm_sysport_open()
1985 if (!priv->is_lite) in bcm_sysport_open()
1993 bcm_sysport_set_features(dev, dev->features); in bcm_sysport_open()
1996 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_open()
1998 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, in bcm_sysport_open()
1999 0, priv->phy_interface); in bcm_sysport_open()
2002 ret = -ENODEV; in bcm_sysport_open()
2007 phydev->mac_managed_pm = true; in bcm_sysport_open()
2010 priv->old_duplex = -1; in bcm_sysport_open()
2011 priv->old_link = -1; in bcm_sysport_open()
2012 priv->old_pause = -1; in bcm_sysport_open()
2017 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); in bcm_sysport_open()
2019 netdev_err(dev, "failed to request RX interrupt\n"); in bcm_sysport_open()
2023 if (!priv->is_lite) { in bcm_sysport_open()
2024 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, in bcm_sysport_open()
2025 dev->name, dev); in bcm_sysport_open()
2033 spin_lock_init(&priv->desc_lock); in bcm_sysport_open()
2034 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_open()
2043 /* Initialize linked-list */ in bcm_sysport_open()
2046 /* Initialize RX ring */ in bcm_sysport_open()
2049 netdev_err(dev, "failed to initialize RX ring\n"); in bcm_sysport_open()
2063 /* Turn on UniMAC TX/RX */ in bcm_sysport_open()
2077 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_open()
2079 if (!priv->is_lite) in bcm_sysport_open()
2080 free_irq(priv->irq1, dev); in bcm_sysport_open()
2082 free_irq(priv->irq0, dev); in bcm_sysport_open()
2086 clk_disable_unprepare(priv->clk); in bcm_sysport_open()
2096 napi_disable(&priv->napi); in bcm_sysport_netif_stop()
2097 cancel_work_sync(&priv->dim.dim.work); in bcm_sysport_netif_stop()
2098 phy_stop(dev->phydev); in bcm_sysport_netif_stop()
2112 /* Disable UniMAC RX */ in bcm_sysport_stop()
2133 /* Free RX/TX rings SW structures */ in bcm_sysport_stop()
2134 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_stop()
2138 free_irq(priv->irq0, dev); in bcm_sysport_stop()
2139 if (!priv->is_lite) in bcm_sysport_stop()
2140 free_irq(priv->irq1, dev); in bcm_sysport_stop()
2143 phy_disconnect(dev->phydev); in bcm_sysport_stop()
2145 clk_disable_unprepare(priv->clk); in bcm_sysport_stop()
2156 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_rule_find()
2164 return -EINVAL; in bcm_sysport_rule_find()
2173 index = bcm_sysport_rule_find(priv, nfc->fs.location); in bcm_sysport_rule_get()
2175 return -EOPNOTSUPP; in bcm_sysport_rule_get()
2177 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; in bcm_sysport_rule_get()
2191 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) in bcm_sysport_rule_set()
2192 return -E2BIG; in bcm_sysport_rule_set()
2194 /* We cannot support flows that are not destined for a wake-up */ in bcm_sysport_rule_set()
2195 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) in bcm_sysport_rule_set()
2196 return -EOPNOTSUPP; in bcm_sysport_rule_set()
2198 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); in bcm_sysport_rule_set()
2201 return -ENOSPC; in bcm_sysport_rule_set()
2208 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; in bcm_sysport_rule_set()
2212 priv->filters_loc[index] = nfc->fs.location; in bcm_sysport_rule_set()
2213 set_bit(index, priv->filters); in bcm_sysport_rule_set()
2226 return -EOPNOTSUPP; in bcm_sysport_rule_del()
2231 clear_bit(index, priv->filters); in bcm_sysport_rule_del()
2232 priv->filters_loc[index] = 0; in bcm_sysport_rule_del()
2241 int ret = -EOPNOTSUPP; in bcm_sysport_get_rxnfc()
2243 switch (nfc->cmd) { in bcm_sysport_get_rxnfc()
2258 int ret = -EOPNOTSUPP; in bcm_sysport_set_rxnfc()
2260 switch (nfc->cmd) { in bcm_sysport_set_rxnfc()
2265 ret = bcm_sysport_rule_del(priv, nfc->fs.location); in bcm_sysport_set_rxnfc()
2301 unsigned int q, port; in bcm_sysport_select_queue() local
2308 port = BRCM_TAG_GET_PORT(queue); in bcm_sysport_select_queue()
2309 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; in bcm_sysport_select_queue()
2314 return tx_ring->index; in bcm_sysport_select_queue()
2339 unsigned int q, qp, port; in bcm_sysport_map_queues() local
2344 if (dp->ds->index) in bcm_sysport_map_queues()
2347 port = dp->index; in bcm_sysport_map_queues()
2350 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of in bcm_sysport_map_queues()
2351 * per-port (slave_dev) network devices queue, we achieve just that. in bcm_sysport_map_queues()
2355 if (priv->is_lite) in bcm_sysport_map_queues()
2357 slave_dev->num_tx_queues / 2); in bcm_sysport_map_queues()
2359 num_tx_queues = slave_dev->real_num_tx_queues; in bcm_sysport_map_queues()
2361 if (priv->per_port_num_tx_queues && in bcm_sysport_map_queues()
2362 priv->per_port_num_tx_queues != num_tx_queues) in bcm_sysport_map_queues()
2363 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); in bcm_sysport_map_queues()
2365 priv->per_port_num_tx_queues = num_tx_queues; in bcm_sysport_map_queues()
2367 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; in bcm_sysport_map_queues()
2369 ring = &priv->tx_rings[q]; in bcm_sysport_map_queues()
2371 if (ring->inspect) in bcm_sysport_map_queues()
2374 /* Just remember the mapping actual programming done in bcm_sysport_map_queues()
2377 ring->switch_queue = qp; in bcm_sysport_map_queues()
2378 ring->switch_port = port; in bcm_sysport_map_queues()
2379 ring->inspect = true; in bcm_sysport_map_queues()
2380 priv->ring_map[qp + port * num_tx_queues] = ring; in bcm_sysport_map_queues()
2394 unsigned int q, qp, port; in bcm_sysport_unmap_queues() local
2396 port = dp->index; in bcm_sysport_unmap_queues()
2398 num_tx_queues = slave_dev->real_num_tx_queues; in bcm_sysport_unmap_queues()
2400 for (q = 0; q < dev->num_tx_queues; q++) { in bcm_sysport_unmap_queues()
2401 ring = &priv->tx_rings[q]; in bcm_sysport_unmap_queues()
2403 if (ring->switch_port != port) in bcm_sysport_unmap_queues()
2406 if (!ring->inspect) in bcm_sysport_unmap_queues()
2409 ring->inspect = false; in bcm_sysport_unmap_queues()
2410 qp = ring->switch_queue; in bcm_sysport_unmap_queues()
2411 priv->ring_map[qp + port * num_tx_queues] = NULL; in bcm_sysport_unmap_queues()
2426 if (priv->netdev != dev) in bcm_sysport_netdevice_event()
2431 if (dev->netdev_ops != &bcm_sysport_netdev_ops) in bcm_sysport_netdevice_event()
2434 if (!dsa_user_dev_check(info->upper_dev)) in bcm_sysport_netdevice_event()
2437 if (info->linking) in bcm_sysport_netdevice_event()
2438 ret = bcm_sysport_map_queues(dev, info->upper_dev); in bcm_sysport_netdevice_event()
2440 ret = bcm_sysport_unmap_queues(dev, info->upper_dev); in bcm_sysport_netdevice_event()
2461 { .compatible = "brcm,systemportlite-v1.00",
2463 { .compatible = "brcm,systemport-v1.00",
2481 dn = pdev->dev.of_node; in bcm_sysport_probe()
2483 if (!of_id || !of_id->data) in bcm_sysport_probe()
2484 return -EINVAL; in bcm_sysport_probe()
2486 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcm_sysport_probe()
2488 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcm_sysport_probe()
2490 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); in bcm_sysport_probe()
2495 params = of_id->data; in bcm_sysport_probe()
2498 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) in bcm_sysport_probe()
2500 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) in bcm_sysport_probe()
2505 return -EINVAL; in bcm_sysport_probe()
2509 return -ENOMEM; in bcm_sysport_probe()
2514 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); in bcm_sysport_probe()
2515 if (IS_ERR(priv->clk)) { in bcm_sysport_probe()
2516 ret = PTR_ERR(priv->clk); in bcm_sysport_probe()
2521 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, in bcm_sysport_probe()
2524 if (!priv->tx_rings) { in bcm_sysport_probe()
2525 ret = -ENOMEM; in bcm_sysport_probe()
2529 priv->is_lite = params->is_lite; in bcm_sysport_probe()
2530 priv->num_rx_desc_words = params->num_rx_desc_words; in bcm_sysport_probe()
2532 priv->irq0 = platform_get_irq(pdev, 0); in bcm_sysport_probe()
2533 if (!priv->is_lite) { in bcm_sysport_probe()
2534 priv->irq1 = platform_get_irq(pdev, 1); in bcm_sysport_probe()
2535 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcm_sysport_probe()
2537 priv->wol_irq = platform_get_irq_optional(pdev, 1); in bcm_sysport_probe()
2539 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { in bcm_sysport_probe()
2540 ret = -EINVAL; in bcm_sysport_probe()
2544 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcm_sysport_probe()
2545 if (IS_ERR(priv->base)) { in bcm_sysport_probe()
2546 ret = PTR_ERR(priv->base); in bcm_sysport_probe()
2550 priv->netdev = dev; in bcm_sysport_probe()
2551 priv->pdev = pdev; in bcm_sysport_probe()
2553 ret = of_get_phy_mode(dn, &priv->phy_interface); in bcm_sysport_probe()
2556 priv->phy_interface = PHY_INTERFACE_MODE_GMII; in bcm_sysport_probe()
2564 dev_err(&pdev->dev, "failed to register fixed PHY\n"); in bcm_sysport_probe()
2568 priv->phy_dn = dn; in bcm_sysport_probe()
2574 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcm_sysport_probe()
2578 SET_NETDEV_DEV(dev, &pdev->dev); in bcm_sysport_probe()
2579 dev_set_drvdata(&pdev->dev, dev); in bcm_sysport_probe()
2580 dev->ethtool_ops = &bcm_sysport_ethtool_ops; in bcm_sysport_probe()
2581 dev->netdev_ops = &bcm_sysport_netdev_ops; in bcm_sysport_probe()
2582 netif_napi_add(dev, &priv->napi, bcm_sysport_poll); in bcm_sysport_probe()
2584 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | in bcm_sysport_probe()
2587 dev->hw_features |= dev->features; in bcm_sysport_probe()
2588 dev->vlan_features |= dev->features; in bcm_sysport_probe()
2589 dev->max_mtu = UMAC_MAX_MTU_SIZE; in bcm_sysport_probe()
2592 priv->wol_irq_disabled = 1; in bcm_sysport_probe()
2593 ret = devm_request_irq(&pdev->dev, priv->wol_irq, in bcm_sysport_probe()
2594 bcm_sysport_wol_isr, 0, dev->name, priv); in bcm_sysport_probe()
2596 device_set_wakeup_capable(&pdev->dev, 1); in bcm_sysport_probe()
2598 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); in bcm_sysport_probe()
2599 if (IS_ERR(priv->wol_clk)) { in bcm_sysport_probe()
2600 ret = PTR_ERR(priv->wol_clk); in bcm_sysport_probe()
2606 dev->needed_headroom += sizeof(struct bcm_tsb); in bcm_sysport_probe()
2611 priv->rx_max_coalesced_frames = 1; in bcm_sysport_probe()
2612 u64_stats_init(&priv->syncp); in bcm_sysport_probe()
2614 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event; in bcm_sysport_probe()
2616 ret = register_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_probe()
2618 dev_err(&pdev->dev, "failed to register DSA notifier\n"); in bcm_sysport_probe()
2624 dev_err(&pdev->dev, "failed to register net_device\n"); in bcm_sysport_probe()
2628 clk_prepare_enable(priv->clk); in bcm_sysport_probe()
2630 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; in bcm_sysport_probe()
2631 dev_info(&pdev->dev, in bcm_sysport_probe()
2634 priv->is_lite ? " Lite" : "", in bcm_sysport_probe()
2635 (priv->rev >> 8) & 0xff, priv->rev & 0xff, in bcm_sysport_probe()
2636 priv->irq0, priv->irq1, txq, rxq); in bcm_sysport_probe()
2638 clk_disable_unprepare(priv->clk); in bcm_sysport_probe()
2643 unregister_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_probe()
2654 struct net_device *dev = dev_get_drvdata(&pdev->dev); in bcm_sysport_remove()
2656 struct device_node *dn = pdev->dev.of_node; in bcm_sysport_remove()
2661 unregister_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_remove()
2666 dev_set_drvdata(&pdev->dev, NULL); in bcm_sysport_remove()
2671 struct net_device *ndev = priv->netdev; in bcm_sysport_suspend_to_wol()
2677 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) in bcm_sysport_suspend_to_wol()
2680 if (priv->wolopts & WAKE_MAGICSECURE) { in bcm_sysport_suspend_to_wol()
2682 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), in bcm_sysport_suspend_to_wol()
2684 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), in bcm_sysport_suspend_to_wol()
2690 if (priv->wolopts & WAKE_FILTER) { in bcm_sysport_suspend_to_wol()
2693 if (priv->is_lite) in bcm_sysport_suspend_to_wol()
2703 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_suspend_to_wol()
2718 } while (timeout-- > 0); in bcm_sysport_suspend_to_wol()
2724 return -ETIMEDOUT; in bcm_sysport_suspend_to_wol()
2750 phy_suspend(dev->phydev); in bcm_sysport_suspend()
2752 /* Disable UniMAC RX */ in bcm_sysport_suspend()
2762 if (priv->rx_chk_en) { in bcm_sysport_suspend()
2768 /* Flush RX pipe */ in bcm_sysport_suspend()
2769 if (!priv->wolopts) in bcm_sysport_suspend()
2785 /* Free RX/TX rings SW structures */ in bcm_sysport_suspend()
2786 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_suspend()
2790 /* Get prepared for Wake-on-LAN */ in bcm_sysport_suspend()
2791 if (device_may_wakeup(d) && priv->wolopts) { in bcm_sysport_suspend()
2792 clk_prepare_enable(priv->wol_clk); in bcm_sysport_suspend()
2796 clk_disable_unprepare(priv->clk); in bcm_sysport_suspend()
2811 clk_prepare_enable(priv->clk); in bcm_sysport_resume()
2812 if (priv->wolopts) in bcm_sysport_resume()
2813 clk_disable_unprepare(priv->wol_clk); in bcm_sysport_resume()
2817 /* Disable the UniMAC RX/TX */ in bcm_sysport_resume()
2826 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_resume()
2835 /* Initialize linked-list */ in bcm_sysport_resume()
2838 /* Initialize RX ring */ in bcm_sysport_resume()
2841 netdev_err(dev, "failed to initialize RX ring\n"); in bcm_sysport_resume()
2845 /* RX pipe enable */ in bcm_sysport_resume()
2855 bcm_sysport_set_features(dev, dev->features); in bcm_sysport_resume()
2860 if (!priv->is_lite) in bcm_sysport_resume()
2866 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_resume()
2881 phy_resume(dev->phydev); in bcm_sysport_resume()
2892 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_resume()
2894 clk_disable_unprepare(priv->clk); in bcm_sysport_resume()
2905 .name = "brcm-systemport",
2913 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2914 MODULE_ALIAS("platform:brcm-systemport");