Lines Matching +full:unimac +full:- +full:mdio

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2014-2024 Broadcom
23 #include <linux/dma-mapping.h>
51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
74 * peripheral registers for CPU-native byte order. in bcmgenet_writel()
104 * the platform is explicitly configured for 64-bits/LPAE. in dmadesc_set_addr()
107 if (priv->hw_params->flags & GENET_HAS_40BITS) in dmadesc_set_addr()
150 return bcmgenet_readl(priv->base + in bcmgenet_tbuf_ctrl_get()
151 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_get()
159 bcmgenet_writel(val, priv->base + in bcmgenet_tbuf_ctrl_set()
160 priv->hw_params->tbuf_offset + TBUF_CTRL); in bcmgenet_tbuf_ctrl_set()
168 return bcmgenet_readl(priv->base + in bcmgenet_bp_mc_get()
169 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_get()
177 bcmgenet_writel(val, priv->base + in bcmgenet_bp_mc_set()
178 priv->hw_params->tbuf_offset + TBUF_BP_MC); in bcmgenet_bp_mc_set()
320 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_readl()
327 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_writel()
334 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_readl()
341 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_writel()
372 /* GENET v4 supports 40-bits pointer addressing
412 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_readl()
421 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + in bcmgenet_tdma_ring_writel()
430 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_readl()
439 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + in bcmgenet_rdma_ring_writel()
499 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * in bcmgenet_hfb_set_filter_length()
515 size--; in bcmgenet_hfb_validate_mask()
518 return -EINVAL; in bcmgenet_hfb_validate_mask()
534 index = f_index * priv->hw_params->hfb_filter_size + offset / 2; in bcmgenet_hfb_insert_data()
537 while (size--) { in bcmgenet_hfb_insert_data()
581 struct ethtool_rx_flow_spec *fs = &rule->fs; in bcmgenet_hfb_create_rxnfc_filter()
588 f = fs->location; in bcmgenet_hfb_create_rxnfc_filter()
589 if (fs->flow_type & FLOW_MAC_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
591 &fs->h_ext.h_dest, &fs->m_ext.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
592 sizeof(fs->h_ext.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
595 if (fs->flow_type & FLOW_EXT) { in bcmgenet_hfb_create_rxnfc_filter()
596 if (fs->m_ext.vlan_etype || in bcmgenet_hfb_create_rxnfc_filter()
597 fs->m_ext.vlan_tci) { in bcmgenet_hfb_create_rxnfc_filter()
599 &fs->h_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
600 &fs->m_ext.vlan_etype, in bcmgenet_hfb_create_rxnfc_filter()
601 sizeof(fs->h_ext.vlan_etype)); in bcmgenet_hfb_create_rxnfc_filter()
603 &fs->h_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
604 &fs->m_ext.vlan_tci, in bcmgenet_hfb_create_rxnfc_filter()
605 sizeof(fs->h_ext.vlan_tci)); in bcmgenet_hfb_create_rxnfc_filter()
611 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_hfb_create_rxnfc_filter()
615 &fs->h_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
616 &fs->m_u.ether_spec.h_dest, in bcmgenet_hfb_create_rxnfc_filter()
617 sizeof(fs->h_u.ether_spec.h_dest)); in bcmgenet_hfb_create_rxnfc_filter()
619 &fs->h_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
620 &fs->m_u.ether_spec.h_source, in bcmgenet_hfb_create_rxnfc_filter()
621 sizeof(fs->h_u.ether_spec.h_source)); in bcmgenet_hfb_create_rxnfc_filter()
623 &fs->h_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
624 &fs->m_u.ether_spec.h_proto, in bcmgenet_hfb_create_rxnfc_filter()
625 sizeof(fs->h_u.ether_spec.h_proto)); in bcmgenet_hfb_create_rxnfc_filter()
635 &fs->h_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
636 &fs->m_u.usr_ip4_spec.tos, in bcmgenet_hfb_create_rxnfc_filter()
637 sizeof(fs->h_u.usr_ip4_spec.tos)); in bcmgenet_hfb_create_rxnfc_filter()
639 &fs->h_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
640 &fs->m_u.usr_ip4_spec.proto, in bcmgenet_hfb_create_rxnfc_filter()
641 sizeof(fs->h_u.usr_ip4_spec.proto)); in bcmgenet_hfb_create_rxnfc_filter()
643 &fs->h_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
644 &fs->m_u.usr_ip4_spec.ip4src, in bcmgenet_hfb_create_rxnfc_filter()
645 sizeof(fs->h_u.usr_ip4_spec.ip4src)); in bcmgenet_hfb_create_rxnfc_filter()
647 &fs->h_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
648 &fs->m_u.usr_ip4_spec.ip4dst, in bcmgenet_hfb_create_rxnfc_filter()
649 sizeof(fs->h_u.usr_ip4_spec.ip4dst)); in bcmgenet_hfb_create_rxnfc_filter()
650 if (!fs->m_u.usr_ip4_spec.l4_4_bytes) in bcmgenet_hfb_create_rxnfc_filter()
659 size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); in bcmgenet_hfb_create_rxnfc_filter()
662 &fs->h_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
663 &fs->m_u.usr_ip4_spec.l4_4_bytes, in bcmgenet_hfb_create_rxnfc_filter()
670 if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { in bcmgenet_hfb_create_rxnfc_filter()
675 rule->state = BCMGENET_RXNFC_STATE_DISABLED; in bcmgenet_hfb_create_rxnfc_filter()
679 fs->ring_cookie); in bcmgenet_hfb_create_rxnfc_filter()
681 rule->state = BCMGENET_RXNFC_STATE_ENABLED; in bcmgenet_hfb_create_rxnfc_filter()
693 base = f_index * priv->hw_params->hfb_filter_size; in bcmgenet_hfb_clear_filter()
694 for (i = 0; i < priv->hw_params->hfb_filter_size; i++) in bcmgenet_hfb_clear_filter()
712 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) in bcmgenet_hfb_clear()
716 for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) in bcmgenet_hfb_clear()
724 INIT_LIST_HEAD(&priv->rxnfc_list); in bcmgenet_hfb_init()
729 INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); in bcmgenet_hfb_init()
730 priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_hfb_init()
741 return clk_prepare_enable(priv->clk); in bcmgenet_begin()
749 clk_disable_unprepare(priv->clk); in bcmgenet_complete()
756 return -EINVAL; in bcmgenet_get_link_ksettings()
758 if (!dev->phydev) in bcmgenet_get_link_ksettings()
759 return -ENODEV; in bcmgenet_get_link_ksettings()
761 phy_ethtool_ksettings_get(dev->phydev, cmd); in bcmgenet_get_link_ksettings()
770 return -EINVAL; in bcmgenet_set_link_ksettings()
772 if (!dev->phydev) in bcmgenet_set_link_ksettings()
773 return -ENODEV; in bcmgenet_set_link_ksettings()
775 return phy_ethtool_ksettings_set(dev->phydev, cmd); in bcmgenet_set_link_ksettings()
785 ret = clk_prepare_enable(priv->clk); in bcmgenet_set_features()
791 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); in bcmgenet_set_features()
793 clk_disable_unprepare(priv->clk); in bcmgenet_set_features()
802 return priv->msg_enable; in bcmgenet_get_msglevel()
809 priv->msg_enable = level; in bcmgenet_set_msglevel()
821 ec->tx_max_coalesced_frames = in bcmgenet_get_coalesce()
824 ec->rx_max_coalesced_frames = in bcmgenet_get_coalesce()
827 ec->rx_coalesce_usecs = in bcmgenet_get_coalesce()
830 for (i = 0; i < priv->hw_params->rx_queues; i++) { in bcmgenet_get_coalesce()
831 ring = &priv->rx_rings[i]; in bcmgenet_get_coalesce()
832 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
834 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_get_coalesce()
835 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; in bcmgenet_get_coalesce()
843 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_set_rx_coalesce()
844 unsigned int i = ring->index; in bcmgenet_set_rx_coalesce()
861 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
862 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
863 usecs = ring->rx_coalesce_usecs; in bcmgenet_set_ring_rx_coalesce()
864 pkts = ring->rx_max_coalesced_frames; in bcmgenet_set_ring_rx_coalesce()
866 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { in bcmgenet_set_ring_rx_coalesce()
867 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); in bcmgenet_set_ring_rx_coalesce()
872 ring->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcmgenet_set_ring_rx_coalesce()
888 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
889 ec->tx_max_coalesced_frames == 0 || in bcmgenet_set_coalesce()
890 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || in bcmgenet_set_coalesce()
891 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) in bcmgenet_set_coalesce()
892 return -EINVAL; in bcmgenet_set_coalesce()
894 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) in bcmgenet_set_coalesce()
895 return -EINVAL; in bcmgenet_set_coalesce()
903 * ethtool knob to do coalescing on a per-queue basis in bcmgenet_set_coalesce()
905 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_set_coalesce()
907 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
910 ec->tx_max_coalesced_frames, in bcmgenet_set_coalesce()
913 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_set_coalesce()
914 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); in bcmgenet_set_coalesce()
915 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); in bcmgenet_set_coalesce()
928 epause->autoneg = priv->autoneg_pause; in bcmgenet_get_pauseparam()
933 epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
934 epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); in bcmgenet_get_pauseparam()
937 epause->tx_pause = priv->tx_pause; in bcmgenet_get_pauseparam()
938 epause->rx_pause = priv->rx_pause; in bcmgenet_get_pauseparam()
947 if (!dev->phydev) in bcmgenet_set_pauseparam()
948 return -ENODEV; in bcmgenet_set_pauseparam()
950 if (!phy_validate_pause(dev->phydev, epause)) in bcmgenet_set_pauseparam()
951 return -EINVAL; in bcmgenet_set_pauseparam()
953 priv->autoneg_pause = !!epause->autoneg; in bcmgenet_set_pauseparam()
954 priv->tx_pause = !!epause->tx_pause; in bcmgenet_set_pauseparam()
955 priv->rx_pause = !!epause->rx_pause; in bcmgenet_set_pauseparam()
957 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_set_pauseparam()
964 BCMGENET_STAT_NETDEV = -1,
983 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
990 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1002 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1041 /* UniMAC RSV counters */
1071 /* UniMAC TSV counters */
1101 /* UniMAC RUNT counters */
1106 /* Misc UniMAC counters */
1131 strscpy(info->driver, "bcmgenet", sizeof(info->driver)); in bcmgenet_get_drvinfo()
1140 return -EOPNOTSUPP; in bcmgenet_get_sset_count()
1210 switch (s->type) { in bcmgenet_update_mib_counters()
1227 val = bcmgenet_umac_readl(priv, s->reg_offset); in bcmgenet_update_mib_counters()
1231 s->reg_offset); in bcmgenet_update_mib_counters()
1234 s->reg_offset); in bcmgenet_update_mib_counters()
1239 j += s->stat_sizeof; in bcmgenet_update_mib_counters()
1240 p = (char *)priv + s->stat_offset; in bcmgenet_update_mib_counters()
1255 dev->netdev_ops->ndo_get_stats(dev); in bcmgenet_get_ethtool_stats()
1262 if (s->type == BCMGENET_STAT_NETDEV) in bcmgenet_get_ethtool_stats()
1263 p = (char *)&dev->stats; in bcmgenet_get_ethtool_stats()
1266 p += s->stat_offset; in bcmgenet_get_ethtool_stats()
1268 s->stat_sizeof == sizeof(unsigned long)) in bcmgenet_get_ethtool_stats()
1279 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; in bcmgenet_eee_enable_set()
1282 if (enable && !priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1283 clk_prepare_enable(priv->clk_eee); in bcmgenet_eee_enable_set()
1284 priv->clk_eee_enabled = true; in bcmgenet_eee_enable_set()
1295 reg = bcmgenet_readl(priv->base + off); in bcmgenet_eee_enable_set()
1300 bcmgenet_writel(reg, priv->base + off); in bcmgenet_eee_enable_set()
1310 if (!enable && priv->clk_eee_enabled) { in bcmgenet_eee_enable_set()
1311 clk_disable_unprepare(priv->clk_eee); in bcmgenet_eee_enable_set()
1312 priv->clk_eee_enabled = false; in bcmgenet_eee_enable_set()
1315 priv->eee.eee_enabled = enable; in bcmgenet_eee_enable_set()
1316 priv->eee.tx_lpi_enabled = tx_lpi_enabled; in bcmgenet_eee_enable_set()
1322 struct ethtool_keee *p = &priv->eee; in bcmgenet_get_eee()
1325 return -EOPNOTSUPP; in bcmgenet_get_eee()
1327 if (!dev->phydev) in bcmgenet_get_eee()
1328 return -ENODEV; in bcmgenet_get_eee()
1330 e->tx_lpi_enabled = p->tx_lpi_enabled; in bcmgenet_get_eee()
1331 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); in bcmgenet_get_eee()
1333 return phy_ethtool_get_eee(dev->phydev, e); in bcmgenet_get_eee()
1339 struct ethtool_keee *p = &priv->eee; in bcmgenet_set_eee()
1343 return -EOPNOTSUPP; in bcmgenet_set_eee()
1345 if (!dev->phydev) in bcmgenet_set_eee()
1346 return -ENODEV; in bcmgenet_set_eee()
1348 p->eee_enabled = e->eee_enabled; in bcmgenet_set_eee()
1350 if (!p->eee_enabled) { in bcmgenet_set_eee()
1353 active = phy_init_eee(dev->phydev, false) >= 0; in bcmgenet_set_eee()
1354 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); in bcmgenet_set_eee()
1355 bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); in bcmgenet_set_eee()
1358 return phy_ethtool_set_eee(dev->phydev, e); in bcmgenet_set_eee()
1367 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && in bcmgenet_validate_flow()
1368 cmd->fs.location != RX_CLS_LOC_ANY) { in bcmgenet_validate_flow()
1370 cmd->fs.location); in bcmgenet_validate_flow()
1371 return -EINVAL; in bcmgenet_validate_flow()
1374 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1376 l4_mask = &cmd->fs.m_u.usr_ip4_spec; in bcmgenet_validate_flow()
1378 if (VALIDATE_MASK(l4_mask->ip4src) || in bcmgenet_validate_flow()
1379 VALIDATE_MASK(l4_mask->ip4dst) || in bcmgenet_validate_flow()
1380 VALIDATE_MASK(l4_mask->l4_4_bytes) || in bcmgenet_validate_flow()
1381 VALIDATE_MASK(l4_mask->proto) || in bcmgenet_validate_flow()
1382 VALIDATE_MASK(l4_mask->ip_ver) || in bcmgenet_validate_flow()
1383 VALIDATE_MASK(l4_mask->tos)) { in bcmgenet_validate_flow()
1385 return -EINVAL; in bcmgenet_validate_flow()
1389 eth_mask = &cmd->fs.m_u.ether_spec; in bcmgenet_validate_flow()
1391 if (VALIDATE_MASK(eth_mask->h_dest) || in bcmgenet_validate_flow()
1392 VALIDATE_MASK(eth_mask->h_source) || in bcmgenet_validate_flow()
1393 VALIDATE_MASK(eth_mask->h_proto)) { in bcmgenet_validate_flow()
1395 return -EINVAL; in bcmgenet_validate_flow()
1400 cmd->fs.flow_type); in bcmgenet_validate_flow()
1401 return -EINVAL; in bcmgenet_validate_flow()
1404 if ((cmd->fs.flow_type & FLOW_EXT)) { in bcmgenet_validate_flow()
1406 if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || in bcmgenet_validate_flow()
1407 VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { in bcmgenet_validate_flow()
1409 return -EINVAL; in bcmgenet_validate_flow()
1411 if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { in bcmgenet_validate_flow()
1412 netdev_err(dev, "rxnfc: user-def not supported\n"); in bcmgenet_validate_flow()
1413 return -EINVAL; in bcmgenet_validate_flow()
1417 if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { in bcmgenet_validate_flow()
1419 if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { in bcmgenet_validate_flow()
1421 return -EINVAL; in bcmgenet_validate_flow()
1435 if (priv->hw_params->hfb_filter_size < 128) { in bcmgenet_insert_flow()
1437 return -EINVAL; in bcmgenet_insert_flow()
1440 if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && in bcmgenet_insert_flow()
1441 cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { in bcmgenet_insert_flow()
1443 cmd->fs.ring_cookie); in bcmgenet_insert_flow()
1444 return -EINVAL; in bcmgenet_insert_flow()
1451 if (cmd->fs.location == RX_CLS_LOC_ANY) { in bcmgenet_insert_flow()
1452 list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { in bcmgenet_insert_flow()
1453 cmd->fs.location = loc_rule->fs.location; in bcmgenet_insert_flow()
1454 err = memcmp(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1461 loc_rule = &priv->rxnfc_rules[i]; in bcmgenet_insert_flow()
1462 if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1463 cmd->fs.location = i; in bcmgenet_insert_flow()
1468 cmd->fs.location = RX_CLS_LOC_ANY; in bcmgenet_insert_flow()
1469 return -ENOSPC; in bcmgenet_insert_flow()
1472 loc_rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_insert_flow()
1474 if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_insert_flow()
1475 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); in bcmgenet_insert_flow()
1476 if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_insert_flow()
1477 list_del(&loc_rule->list); in bcmgenet_insert_flow()
1478 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); in bcmgenet_insert_flow()
1480 loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_insert_flow()
1481 memcpy(&loc_rule->fs, &cmd->fs, in bcmgenet_insert_flow()
1486 list_add_tail(&loc_rule->list, &priv->rxnfc_list); in bcmgenet_insert_flow()
1498 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) in bcmgenet_delete_flow()
1499 return -EINVAL; in bcmgenet_delete_flow()
1501 rule = &priv->rxnfc_rules[cmd->fs.location]; in bcmgenet_delete_flow()
1502 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1503 err = -ENOENT; in bcmgenet_delete_flow()
1507 if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) in bcmgenet_delete_flow()
1508 bcmgenet_hfb_disable_filter(priv, cmd->fs.location); in bcmgenet_delete_flow()
1509 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { in bcmgenet_delete_flow()
1510 list_del(&rule->list); in bcmgenet_delete_flow()
1511 bcmgenet_hfb_clear_filter(priv, cmd->fs.location); in bcmgenet_delete_flow()
1513 rule->state = BCMGENET_RXNFC_STATE_UNUSED; in bcmgenet_delete_flow()
1514 memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); in bcmgenet_delete_flow()
1525 switch (cmd->cmd) { in bcmgenet_set_rxnfc()
1533 netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", in bcmgenet_set_rxnfc()
1534 cmd->cmd); in bcmgenet_set_rxnfc()
1535 return -EINVAL; in bcmgenet_set_rxnfc()
1549 return -EINVAL; in bcmgenet_get_flow()
1551 rule = &priv->rxnfc_rules[loc]; in bcmgenet_get_flow()
1552 if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_get_flow()
1553 err = -ENOENT; in bcmgenet_get_flow()
1555 memcpy(&cmd->fs, &rule->fs, in bcmgenet_get_flow()
1566 list_for_each(pos, &priv->rxnfc_list) in bcmgenet_get_num_flows()
1580 switch (cmd->cmd) { in bcmgenet_get_rxnfc()
1582 cmd->data = priv->hw_params->rx_queues ?: 1; in bcmgenet_get_rxnfc()
1585 cmd->rule_cnt = bcmgenet_get_num_flows(priv); in bcmgenet_get_rxnfc()
1586 cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; in bcmgenet_get_rxnfc()
1589 err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); in bcmgenet_get_rxnfc()
1592 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_get_rxnfc()
1593 if (i < cmd->rule_cnt) in bcmgenet_get_rxnfc()
1594 rule_locs[i++] = rule->fs.location; in bcmgenet_get_rxnfc()
1595 cmd->rule_cnt = i; in bcmgenet_get_rxnfc()
1596 cmd->data = MAX_NUM_OF_FS_RULES; in bcmgenet_get_rxnfc()
1599 err = -EOPNOTSUPP; in bcmgenet_get_rxnfc()
1636 /* Power down the unimac, based on mode. */
1645 phy_detach(priv->dev->phydev); in bcmgenet_power_down()
1654 if (priv->hw_params->flags & GENET_HAS_EXT) { in bcmgenet_power_down()
1656 if (GENET_IS_V5(priv) && !priv->ephy_16nm) in bcmgenet_power_down()
1669 bcmgenet_phy_power_set(priv->dev, false); in bcmgenet_power_down()
1684 if (!(priv->hw_params->flags & GENET_HAS_EXT)) in bcmgenet_power_up()
1693 if (GENET_IS_V5(priv) && !priv->ephy_16nm) { in bcmgenet_power_up()
1710 bcmgenet_phy_power_set(priv->dev, true); in bcmgenet_power_up()
1733 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb()
1734 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_get_txcb()
1737 if (ring->write_ptr == ring->end_ptr) in bcmgenet_get_txcb()
1738 ring->write_ptr = ring->cb_ptr; in bcmgenet_get_txcb()
1740 ring->write_ptr++; in bcmgenet_get_txcb()
1750 tx_cb_ptr = ring->cbs; in bcmgenet_put_txcb()
1751 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; in bcmgenet_put_txcb()
1754 if (ring->write_ptr == ring->cb_ptr) in bcmgenet_put_txcb()
1755 ring->write_ptr = ring->end_ptr; in bcmgenet_put_txcb()
1757 ring->write_ptr--; in bcmgenet_put_txcb()
1764 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_disable()
1770 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, in bcmgenet_rx_ring16_int_enable()
1776 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_disable()
1777 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_disable()
1783 bcmgenet_intrl2_1_writel(ring->priv, in bcmgenet_rx_ring_int_enable()
1784 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), in bcmgenet_rx_ring_int_enable()
1790 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_disable()
1796 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, in bcmgenet_tx_ring16_int_enable()
1802 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_enable()
1808 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, in bcmgenet_tx_ring_int_disable()
1821 skb = cb->skb; in bcmgenet_free_tx_cb()
1824 cb->skb = NULL; in bcmgenet_free_tx_cb()
1825 if (cb == GENET_CB(skb)->first_cb) in bcmgenet_free_tx_cb()
1835 if (cb == GENET_CB(skb)->last_cb) in bcmgenet_free_tx_cb()
1855 skb = cb->skb; in bcmgenet_free_rx_cb()
1856 cb->skb = NULL; in bcmgenet_free_rx_cb()
1880 if (ring->index == DESC_INDEX) in __bcmgenet_tx_reclaim()
1884 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), in __bcmgenet_tx_reclaim()
1888 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) in __bcmgenet_tx_reclaim()
1890 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; in __bcmgenet_tx_reclaim()
1894 __func__, ring->index, ring->c_index, c_index, txbds_ready); in __bcmgenet_tx_reclaim()
1898 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, in __bcmgenet_tx_reclaim()
1899 &priv->tx_cbs[ring->clean_ptr]); in __bcmgenet_tx_reclaim()
1902 bytes_compl += GENET_CB(skb)->bytes_sent; in __bcmgenet_tx_reclaim()
1907 if (likely(ring->clean_ptr < ring->end_ptr)) in __bcmgenet_tx_reclaim()
1908 ring->clean_ptr++; in __bcmgenet_tx_reclaim()
1910 ring->clean_ptr = ring->cb_ptr; in __bcmgenet_tx_reclaim()
1913 ring->free_bds += txbds_processed; in __bcmgenet_tx_reclaim()
1914 ring->c_index = c_index; in __bcmgenet_tx_reclaim()
1916 ring->packets += pkts_compl; in __bcmgenet_tx_reclaim()
1917 ring->bytes += bytes_compl; in __bcmgenet_tx_reclaim()
1919 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), in __bcmgenet_tx_reclaim()
1930 spin_lock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1932 spin_unlock_bh(&ring->lock); in bcmgenet_tx_reclaim()
1944 spin_lock(&ring->lock); in bcmgenet_tx_poll()
1945 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); in bcmgenet_tx_poll()
1946 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { in bcmgenet_tx_poll()
1947 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); in bcmgenet_tx_poll()
1950 spin_unlock(&ring->lock); in bcmgenet_tx_poll()
1954 ring->int_enable(ring); in bcmgenet_tx_poll()
1968 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_tx_reclaim_all()
1969 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); in bcmgenet_tx_reclaim_all()
1972 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); in bcmgenet_tx_reclaim_all()
1996 priv->mib.tx_realloc_tsb_failed++; in bcmgenet_add_tsb()
1997 dev->stats.tx_dropped++; in bcmgenet_add_tsb()
2002 priv->mib.tx_realloc_tsb++; in bcmgenet_add_tsb()
2006 status = (struct status_64 *)skb->data; in bcmgenet_add_tsb()
2008 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcmgenet_add_tsb()
2009 ip_ver = skb->protocol; in bcmgenet_add_tsb()
2012 ip_proto = ip_hdr(skb)->protocol; in bcmgenet_add_tsb()
2015 ip_proto = ipv6_hdr(skb)->nexthdr; in bcmgenet_add_tsb()
2023 offset = skb_checksum_start_offset(skb) - sizeof(*status); in bcmgenet_add_tsb()
2025 (offset + skb->csum_offset) | in bcmgenet_add_tsb()
2032 status->tx_csum_info = tx_csum_info; in bcmgenet_add_tsb()
2046 struct device *kdev = &priv->pdev->dev; in bcmgenet_xmit()
2069 index -= 1; in bcmgenet_xmit()
2071 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
2072 txq = netdev_get_tx_queue(dev, ring->queue); in bcmgenet_xmit()
2074 nr_frags = skb_shinfo(skb)->nr_frags; in bcmgenet_xmit()
2076 spin_lock(&ring->lock); in bcmgenet_xmit()
2077 if (ring->free_bds <= (nr_frags + 1)) { in bcmgenet_xmit()
2087 GENET_CB(skb)->bytes_sent = skb->len; in bcmgenet_xmit()
2103 GENET_CB(skb)->first_cb = tx_cb_ptr; in bcmgenet_xmit()
2105 mapping = dma_map_single(kdev, skb->data, size, in bcmgenet_xmit()
2109 frag = &skb_shinfo(skb)->frags[i - 1]; in bcmgenet_xmit()
2117 priv->mib.tx_dma_failed++; in bcmgenet_xmit()
2125 tx_cb_ptr->skb = skb; in bcmgenet_xmit()
2128 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); in bcmgenet_xmit()
2137 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcmgenet_xmit()
2143 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); in bcmgenet_xmit()
2146 GENET_CB(skb)->last_cb = tx_cb_ptr; in bcmgenet_xmit()
2152 ring->free_bds -= nr_frags + 1; in bcmgenet_xmit()
2153 ring->prod_index += nr_frags + 1; in bcmgenet_xmit()
2154 ring->prod_index &= DMA_P_INDEX_MASK; in bcmgenet_xmit()
2156 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); in bcmgenet_xmit()
2158 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) in bcmgenet_xmit()
2163 bcmgenet_tdma_ring_writel(priv, ring->index, in bcmgenet_xmit()
2164 ring->prod_index, TDMA_PROD_INDEX); in bcmgenet_xmit()
2166 spin_unlock(&ring->lock); in bcmgenet_xmit()
2175 while (i-- > 0) { in bcmgenet_xmit()
2187 struct device *kdev = &priv->pdev->dev; in bcmgenet_rx_refill()
2193 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, in bcmgenet_rx_refill()
2196 priv->mib.alloc_rx_buff_failed++; in bcmgenet_rx_refill()
2197 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2202 /* DMA-map the new Rx skb */ in bcmgenet_rx_refill()
2203 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, in bcmgenet_rx_refill()
2206 priv->mib.rx_dma_failed++; in bcmgenet_rx_refill()
2208 netif_err(priv, rx_err, priv->dev, in bcmgenet_rx_refill()
2213 /* Grab the current Rx skb from the ring and DMA-unmap it */ in bcmgenet_rx_refill()
2217 cb->skb = skb; in bcmgenet_rx_refill()
2219 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); in bcmgenet_rx_refill()
2220 dmadesc_set_addr(priv, cb->bd_addr, mapping); in bcmgenet_rx_refill()
2226 /* bcmgenet_desc_rx - descriptor based rx process.
2232 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_desc_rx()
2233 struct net_device *dev = priv->dev; in bcmgenet_desc_rx()
2245 if (ring->index == DESC_INDEX) { in bcmgenet_desc_rx()
2249 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); in bcmgenet_desc_rx()
2255 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); in bcmgenet_desc_rx()
2259 if (discards > ring->old_discards) { in bcmgenet_desc_rx()
2260 discards = discards - ring->old_discards; in bcmgenet_desc_rx()
2261 ring->errors += discards; in bcmgenet_desc_rx()
2262 ring->old_discards += discards; in bcmgenet_desc_rx()
2265 if (ring->old_discards >= 0xC000) { in bcmgenet_desc_rx()
2266 ring->old_discards = 0; in bcmgenet_desc_rx()
2267 bcmgenet_rdma_ring_writel(priv, ring->index, 0, in bcmgenet_desc_rx()
2273 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2283 cb = &priv->rx_cbs[ring->read_ptr]; in bcmgenet_desc_rx()
2287 ring->dropped++; in bcmgenet_desc_rx()
2291 status = (struct status_64 *)skb->data; in bcmgenet_desc_rx()
2292 dma_length_status = status->length_status; in bcmgenet_desc_rx()
2293 if (dev->features & NETIF_F_RXCSUM) { in bcmgenet_desc_rx()
2294 rx_csum = (__force __be16)(status->rx_csum & 0xffff); in bcmgenet_desc_rx()
2296 skb->csum = (__force __wsum)ntohs(rx_csum); in bcmgenet_desc_rx()
2297 skb->ip_summed = CHECKSUM_COMPLETE; in bcmgenet_desc_rx()
2309 __func__, p_index, ring->c_index, in bcmgenet_desc_rx()
2310 ring->read_ptr, dma_length_status); in bcmgenet_desc_rx()
2314 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2315 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2323 ring->errors++; in bcmgenet_desc_rx()
2337 dev->stats.rx_crc_errors++; in bcmgenet_desc_rx()
2339 dev->stats.rx_over_errors++; in bcmgenet_desc_rx()
2341 dev->stats.rx_frame_errors++; in bcmgenet_desc_rx()
2343 dev->stats.rx_length_errors++; in bcmgenet_desc_rx()
2344 dev->stats.rx_errors++; in bcmgenet_desc_rx()
2353 len -= 66; in bcmgenet_desc_rx()
2355 if (priv->crc_fwd_en) { in bcmgenet_desc_rx()
2356 skb_trim(skb, len - ETH_FCS_LEN); in bcmgenet_desc_rx()
2357 len -= ETH_FCS_LEN; in bcmgenet_desc_rx()
2363 skb->protocol = eth_type_trans(skb, priv->dev); in bcmgenet_desc_rx()
2364 ring->packets++; in bcmgenet_desc_rx()
2365 ring->bytes += len; in bcmgenet_desc_rx()
2367 dev->stats.multicast++; in bcmgenet_desc_rx()
2370 napi_gro_receive(&ring->napi, skb); in bcmgenet_desc_rx()
2375 if (likely(ring->read_ptr < ring->end_ptr)) in bcmgenet_desc_rx()
2376 ring->read_ptr++; in bcmgenet_desc_rx()
2378 ring->read_ptr = ring->cb_ptr; in bcmgenet_desc_rx()
2380 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; in bcmgenet_desc_rx()
2381 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); in bcmgenet_desc_rx()
2384 ring->dim.bytes = bytes_processed; in bcmgenet_desc_rx()
2385 ring->dim.packets = rxpktprocessed; in bcmgenet_desc_rx()
2402 ring->int_enable(ring); in bcmgenet_rx_poll()
2405 if (ring->dim.use_dim) { in bcmgenet_rx_poll()
2406 dim_update_sample(ring->dim.event_ctr, ring->dim.packets, in bcmgenet_rx_poll()
2407 ring->dim.bytes, &dim_sample); in bcmgenet_rx_poll()
2408 net_dim(&ring->dim.dim, dim_sample); in bcmgenet_rx_poll()
2422 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in bcmgenet_dim_work()
2425 dim->state = DIM_START_MEASURE; in bcmgenet_dim_work()
2436 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_alloc_rx_buffers()
2439 for (i = 0; i < ring->size; i++) { in bcmgenet_alloc_rx_buffers()
2440 cb = ring->cbs + i; in bcmgenet_alloc_rx_buffers()
2444 if (!cb->skb) in bcmgenet_alloc_rx_buffers()
2445 return -ENOMEM; in bcmgenet_alloc_rx_buffers()
2457 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_free_rx_buffers()
2458 cb = &priv->rx_cbs[i]; in bcmgenet_free_rx_buffers()
2460 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); in bcmgenet_free_rx_buffers()
2470 spin_lock_bh(&priv->reg_lock); in umac_enable_set()
2473 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2481 spin_unlock_bh(&priv->reg_lock); in umac_enable_set()
2483 /* UniMAC stops on a packet boundary, wait for a full-size packet in umac_enable_set()
2497 spin_lock_bh(&priv->reg_lock); in reset_umac()
2500 spin_unlock_bh(&priv->reg_lock); in reset_umac()
2519 if (priv->internal_phy) { in bcmgenet_link_intr_enable()
2523 } else if (priv->ext_phy) { in bcmgenet_link_intr_enable()
2525 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in bcmgenet_link_intr_enable()
2526 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) in bcmgenet_link_intr_enable()
2534 struct device *kdev = &priv->pdev->dev; in init_umac()
2538 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); in init_umac()
2563 /* If UniMAC forwards CRC, we need to skip over it to get in init_umac()
2564 * a valid CHK bit to be set in the per-packet status word in init_umac()
2566 if (priv->crc_fwd_en) in init_umac()
2578 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { in init_umac()
2580 reg |= BIT(priv->hw_params->bp_in_en_shift); in init_umac()
2583 if (netif_is_multiqueue(priv->dev)) in init_umac()
2584 reg |= priv->hw_params->bp_in_mask; in init_umac()
2586 reg &= ~priv->hw_params->bp_in_mask; in init_umac()
2590 /* Enable MDIO interrupts on GENET v3+ */ in init_umac()
2591 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) in init_umac()
2602 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_dim()
2604 INIT_WORK(&dim->dim.work, cb); in bcmgenet_init_dim()
2605 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcmgenet_init_dim()
2606 dim->event_ctr = 0; in bcmgenet_init_dim()
2607 dim->packets = 0; in bcmgenet_init_dim()
2608 dim->bytes = 0; in bcmgenet_init_dim()
2613 struct bcmgenet_net_dim *dim = &ring->dim; in bcmgenet_init_rx_coalesce()
2617 usecs = ring->rx_coalesce_usecs; in bcmgenet_init_rx_coalesce()
2618 pkts = ring->rx_max_coalesced_frames; in bcmgenet_init_rx_coalesce()
2620 /* If DIM was enabled, re-apply default parameters */ in bcmgenet_init_rx_coalesce()
2621 if (dim->use_dim) { in bcmgenet_init_rx_coalesce()
2622 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcmgenet_init_rx_coalesce()
2635 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
2639 spin_lock_init(&ring->lock); in bcmgenet_init_tx_ring()
2640 ring->priv = priv; in bcmgenet_init_tx_ring()
2641 ring->index = index; in bcmgenet_init_tx_ring()
2643 ring->queue = 0; in bcmgenet_init_tx_ring()
2644 ring->int_enable = bcmgenet_tx_ring16_int_enable; in bcmgenet_init_tx_ring()
2645 ring->int_disable = bcmgenet_tx_ring16_int_disable; in bcmgenet_init_tx_ring()
2647 ring->queue = index + 1; in bcmgenet_init_tx_ring()
2648 ring->int_enable = bcmgenet_tx_ring_int_enable; in bcmgenet_init_tx_ring()
2649 ring->int_disable = bcmgenet_tx_ring_int_disable; in bcmgenet_init_tx_ring()
2651 ring->cbs = priv->tx_cbs + start_ptr; in bcmgenet_init_tx_ring()
2652 ring->size = size; in bcmgenet_init_tx_ring()
2653 ring->clean_ptr = start_ptr; in bcmgenet_init_tx_ring()
2654 ring->c_index = 0; in bcmgenet_init_tx_ring()
2655 ring->free_bds = size; in bcmgenet_init_tx_ring()
2656 ring->write_ptr = start_ptr; in bcmgenet_init_tx_ring()
2657 ring->cb_ptr = start_ptr; in bcmgenet_init_tx_ring()
2658 ring->end_ptr = end_ptr - 1; in bcmgenet_init_tx_ring()
2659 ring->prod_index = 0; in bcmgenet_init_tx_ring()
2682 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_tx_ring()
2686 netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); in bcmgenet_init_tx_ring()
2694 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; in bcmgenet_init_rx_ring()
2698 ring->priv = priv; in bcmgenet_init_rx_ring()
2699 ring->index = index; in bcmgenet_init_rx_ring()
2701 ring->int_enable = bcmgenet_rx_ring16_int_enable; in bcmgenet_init_rx_ring()
2702 ring->int_disable = bcmgenet_rx_ring16_int_disable; in bcmgenet_init_rx_ring()
2704 ring->int_enable = bcmgenet_rx_ring_int_enable; in bcmgenet_init_rx_ring()
2705 ring->int_disable = bcmgenet_rx_ring_int_disable; in bcmgenet_init_rx_ring()
2707 ring->cbs = priv->rx_cbs + start_ptr; in bcmgenet_init_rx_ring()
2708 ring->size = size; in bcmgenet_init_rx_ring()
2709 ring->c_index = 0; in bcmgenet_init_rx_ring()
2710 ring->read_ptr = start_ptr; in bcmgenet_init_rx_ring()
2711 ring->cb_ptr = start_ptr; in bcmgenet_init_rx_ring()
2712 ring->end_ptr = end_ptr - 1; in bcmgenet_init_rx_ring()
2722 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); in bcmgenet_init_rx_ring()
2741 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, in bcmgenet_init_rx_ring()
2752 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_enable_tx_napi()
2753 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
2754 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2755 ring->int_enable(ring); in bcmgenet_enable_tx_napi()
2758 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_enable_tx_napi()
2759 napi_enable(&ring->napi); in bcmgenet_enable_tx_napi()
2760 ring->int_enable(ring); in bcmgenet_enable_tx_napi()
2768 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_disable_tx_napi()
2769 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
2770 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2773 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_disable_tx_napi()
2774 napi_disable(&ring->napi); in bcmgenet_disable_tx_napi()
2782 for (i = 0; i < priv->hw_params->tx_queues; ++i) { in bcmgenet_fini_tx_napi()
2783 ring = &priv->tx_rings[i]; in bcmgenet_fini_tx_napi()
2784 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2787 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_fini_tx_napi()
2788 netif_napi_del(&ring->napi); in bcmgenet_fini_tx_napi()
2793 * Queues 0-3 are priority-based, each one has 32 descriptors,
2797 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2800 * - Tx queue 0 uses tx_cbs[0..31]
2801 * - Tx queue 1 uses tx_cbs[32..63]
2802 * - Tx queue 2 uses tx_cbs[64..95]
2803 * - Tx queue 3 uses tx_cbs[96..127]
2804 * - Tx queue 16 uses tx_cbs[128..255]
2825 for (i = 0; i < priv->hw_params->tx_queues; i++) { in bcmgenet_init_tx_queues()
2826 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2827 i * priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2828 (i + 1) * priv->hw_params->tx_bds_per_q); in bcmgenet_init_tx_queues()
2837 priv->hw_params->tx_queues * in bcmgenet_init_tx_queues()
2838 priv->hw_params->tx_bds_per_q, in bcmgenet_init_tx_queues()
2843 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << in bcmgenet_init_tx_queues()
2865 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_enable_rx_napi()
2866 ring = &priv->rx_rings[i]; in bcmgenet_enable_rx_napi()
2867 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2868 ring->int_enable(ring); in bcmgenet_enable_rx_napi()
2871 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_enable_rx_napi()
2872 napi_enable(&ring->napi); in bcmgenet_enable_rx_napi()
2873 ring->int_enable(ring); in bcmgenet_enable_rx_napi()
2881 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_disable_rx_napi()
2882 ring = &priv->rx_rings[i]; in bcmgenet_disable_rx_napi()
2883 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2884 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2887 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_disable_rx_napi()
2888 napi_disable(&ring->napi); in bcmgenet_disable_rx_napi()
2889 cancel_work_sync(&ring->dim.dim.work); in bcmgenet_disable_rx_napi()
2897 for (i = 0; i < priv->hw_params->rx_queues; ++i) { in bcmgenet_fini_rx_napi()
2898 ring = &priv->rx_rings[i]; in bcmgenet_fini_rx_napi()
2899 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2902 ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_fini_rx_napi()
2903 netif_napi_del(&ring->napi); in bcmgenet_fini_rx_napi()
2908 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2931 for (i = 0; i < priv->hw_params->rx_queues; i++) { in bcmgenet_init_rx_queues()
2933 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2934 i * priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2936 priv->hw_params->rx_bds_per_q); in bcmgenet_init_rx_queues()
2946 priv->hw_params->rx_queues * in bcmgenet_init_rx_queues()
2947 priv->hw_params->rx_bds_per_q, in bcmgenet_init_rx_queues()
2958 /* Configure ring as descriptor ring and re-enable DMA if enabled */ in bcmgenet_init_rx_queues()
2989 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); in bcmgenet_dma_teardown()
2990 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3012 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); in bcmgenet_dma_teardown()
3013 ret = -ETIMEDOUT; in bcmgenet_dma_teardown()
3017 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_dma_teardown()
3024 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_dma_teardown()
3041 for (i = 0; i < priv->num_tx_bds; i++) in bcmgenet_fini_dma()
3042 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, in bcmgenet_fini_dma()
3043 priv->tx_cbs + i)); in bcmgenet_fini_dma()
3045 for (i = 0; i < priv->hw_params->tx_queues; i++) { in bcmgenet_fini_dma()
3046 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); in bcmgenet_fini_dma()
3050 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); in bcmgenet_fini_dma()
3054 kfree(priv->rx_cbs); in bcmgenet_fini_dma()
3055 kfree(priv->tx_cbs); in bcmgenet_fini_dma()
3065 netif_dbg(priv, hw, priv->dev, "%s\n", __func__); in bcmgenet_init_dma()
3068 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; in bcmgenet_init_dma()
3069 priv->num_rx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3070 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3072 if (!priv->rx_cbs) in bcmgenet_init_dma()
3073 return -ENOMEM; in bcmgenet_init_dma()
3075 for (i = 0; i < priv->num_rx_bds; i++) { in bcmgenet_init_dma()
3076 cb = priv->rx_cbs + i; in bcmgenet_init_dma()
3077 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3081 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; in bcmgenet_init_dma()
3082 priv->num_tx_bds = TOTAL_DESC; in bcmgenet_init_dma()
3083 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), in bcmgenet_init_dma()
3085 if (!priv->tx_cbs) { in bcmgenet_init_dma()
3086 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3087 return -ENOMEM; in bcmgenet_init_dma()
3090 for (i = 0; i < priv->num_tx_bds; i++) { in bcmgenet_init_dma()
3091 cb = priv->tx_cbs + i; in bcmgenet_init_dma()
3092 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; in bcmgenet_init_dma()
3096 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3100 ret = bcmgenet_init_rx_queues(priv->dev); in bcmgenet_init_dma()
3102 netdev_err(priv->dev, "failed to initialize Rx queues\n"); in bcmgenet_init_dma()
3104 kfree(priv->rx_cbs); in bcmgenet_init_dma()
3105 kfree(priv->tx_cbs); in bcmgenet_init_dma()
3110 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, in bcmgenet_init_dma()
3114 bcmgenet_init_tx_queues(priv->dev); in bcmgenet_init_dma()
3126 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); in bcmgenet_irq_task()
3128 spin_lock_irq(&priv->lock); in bcmgenet_irq_task()
3129 status = priv->irq0_stat; in bcmgenet_irq_task()
3130 priv->irq0_stat = 0; in bcmgenet_irq_task()
3131 spin_unlock_irq(&priv->lock); in bcmgenet_irq_task()
3134 priv->dev->phydev->autoneg != AUTONEG_ENABLE) { in bcmgenet_irq_task()
3135 phy_init_hw(priv->dev->phydev); in bcmgenet_irq_task()
3136 genphy_config_aneg(priv->dev->phydev); in bcmgenet_irq_task()
3141 phy_mac_interrupt(priv->dev->phydev); in bcmgenet_irq_task()
3160 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr1()
3164 for (index = 0; index < priv->hw_params->rx_queues; index++) { in bcmgenet_isr1()
3168 rx_ring = &priv->rx_rings[index]; in bcmgenet_isr1()
3169 rx_ring->dim.event_ctr++; in bcmgenet_isr1()
3171 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr1()
3172 rx_ring->int_disable(rx_ring); in bcmgenet_isr1()
3173 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr1()
3178 for (index = 0; index < priv->hw_params->tx_queues; index++) { in bcmgenet_isr1()
3182 tx_ring = &priv->tx_rings[index]; in bcmgenet_isr1()
3184 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr1()
3185 tx_ring->int_disable(tx_ring); in bcmgenet_isr1()
3186 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr1()
3209 netif_dbg(priv, intr, priv->dev, in bcmgenet_isr0()
3213 rx_ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_isr0()
3214 rx_ring->dim.event_ctr++; in bcmgenet_isr0()
3216 if (likely(napi_schedule_prep(&rx_ring->napi))) { in bcmgenet_isr0()
3217 rx_ring->int_disable(rx_ring); in bcmgenet_isr0()
3218 __napi_schedule_irqoff(&rx_ring->napi); in bcmgenet_isr0()
3223 tx_ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_isr0()
3225 if (likely(napi_schedule_prep(&tx_ring->napi))) { in bcmgenet_isr0()
3226 tx_ring->int_disable(tx_ring); in bcmgenet_isr0()
3227 __napi_schedule_irqoff(&tx_ring->napi); in bcmgenet_isr0()
3231 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && in bcmgenet_isr0()
3233 wake_up(&priv->wq); in bcmgenet_isr0()
3239 /* Save irq status for bottom-half processing. */ in bcmgenet_isr0()
3240 spin_lock_irqsave(&priv->lock, flags); in bcmgenet_isr0()
3241 priv->irq0_stat |= status; in bcmgenet_isr0()
3242 spin_unlock_irqrestore(&priv->lock, flags); in bcmgenet_isr0()
3244 schedule_work(&priv->bcmgenet_irq_work); in bcmgenet_isr0()
3297 for (i = 0; i < priv->hw_params->tx_queues; i++) in bcmgenet_dma_disable()
3304 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_dma_disable()
3355 phy_start(dev->phydev); in bcmgenet_netif_start()
3367 clk_prepare_enable(priv->clk); in bcmgenet_open()
3369 /* If this is an internal GPHY, power it back on now, before UniMAC is in bcmgenet_open()
3370 * brought out of reset as absolutely no UniMAC activity is allowed in bcmgenet_open()
3372 if (priv->internal_phy) in bcmgenet_open()
3383 bcmgenet_set_features(dev, dev->features); in bcmgenet_open()
3385 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_open()
3397 /* Always enable ring 16 - descriptor ring */ in bcmgenet_open()
3403 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, in bcmgenet_open()
3404 dev->name, priv); in bcmgenet_open()
3406 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); in bcmgenet_open()
3410 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, in bcmgenet_open()
3411 dev->name, priv); in bcmgenet_open()
3413 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); in bcmgenet_open()
3423 bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); in bcmgenet_open()
3432 free_irq(priv->irq1, priv); in bcmgenet_open()
3434 free_irq(priv->irq0, priv); in bcmgenet_open()
3439 if (priv->internal_phy) in bcmgenet_open()
3441 clk_disable_unprepare(priv->clk); in bcmgenet_open()
3461 phy_stop(dev->phydev); in bcmgenet_netif_stop()
3468 cancel_work_sync(&priv->bcmgenet_irq_work); in bcmgenet_netif_stop()
3485 phy_disconnect(dev->phydev); in bcmgenet_close()
3487 free_irq(priv->irq0, priv); in bcmgenet_close()
3488 free_irq(priv->irq1, priv); in bcmgenet_close()
3490 if (priv->internal_phy) in bcmgenet_close()
3493 clk_disable_unprepare(priv->clk); in bcmgenet_close()
3500 struct bcmgenet_priv *priv = ring->priv; in bcmgenet_dump_tx_queue()
3509 txq = netdev_get_tx_queue(priv->dev, ring->queue); in bcmgenet_dump_tx_queue()
3511 spin_lock(&ring->lock); in bcmgenet_dump_tx_queue()
3512 if (ring->index == DESC_INDEX) { in bcmgenet_dump_tx_queue()
3517 intmsk = 1 << ring->index; in bcmgenet_dump_tx_queue()
3519 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); in bcmgenet_dump_tx_queue()
3520 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); in bcmgenet_dump_tx_queue()
3522 free_bds = ring->free_bds; in bcmgenet_dump_tx_queue()
3523 spin_unlock(&ring->lock); in bcmgenet_dump_tx_queue()
3525 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" in bcmgenet_dump_tx_queue()
3532 ring->index, ring->queue, in bcmgenet_dump_tx_queue()
3535 free_bds, ring->size, in bcmgenet_dump_tx_queue()
3536 ring->prod_index, p_index & DMA_P_INDEX_MASK, in bcmgenet_dump_tx_queue()
3537 ring->c_index, c_index & DMA_C_INDEX_MASK, in bcmgenet_dump_tx_queue()
3538 ring->clean_ptr, ring->write_ptr, in bcmgenet_dump_tx_queue()
3539 ring->cb_ptr, ring->end_ptr); in bcmgenet_dump_tx_queue()
3551 for (q = 0; q < priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3552 bcmgenet_dump_tx_queue(&priv->tx_rings[q]); in bcmgenet_timeout()
3553 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); in bcmgenet_timeout()
3557 for (q = 0; q < priv->hw_params->tx_queues; q++) in bcmgenet_timeout()
3562 /* Re-enable TX interrupts if disabled */ in bcmgenet_timeout()
3568 dev->stats.tx_errors++; in bcmgenet_timeout()
3594 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); in bcmgenet_set_rx_mode()
3606 spin_lock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3608 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || in bcmgenet_set_rx_mode()
3612 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3618 spin_unlock(&priv->reg_lock); in bcmgenet_set_rx_mode()
3624 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); in bcmgenet_set_rx_mode()
3626 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); in bcmgenet_set_rx_mode()
3630 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3634 bcmgenet_set_mdf_addr(priv, ha->addr, &i); in bcmgenet_set_rx_mode()
3637 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); in bcmgenet_set_rx_mode()
3647 * without disabling the UniMAC RX/TX enable bits. in bcmgenet_set_mac_addr()
3650 return -EBUSY; in bcmgenet_set_mac_addr()
3652 eth_hw_addr_set(dev, addr->sa_data); in bcmgenet_set_mac_addr()
3667 for (q = 0; q < priv->hw_params->tx_queues; q++) { in bcmgenet_get_stats()
3668 tx_ring = &priv->tx_rings[q]; in bcmgenet_get_stats()
3669 tx_bytes += tx_ring->bytes; in bcmgenet_get_stats()
3670 tx_packets += tx_ring->packets; in bcmgenet_get_stats()
3672 tx_ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_get_stats()
3673 tx_bytes += tx_ring->bytes; in bcmgenet_get_stats()
3674 tx_packets += tx_ring->packets; in bcmgenet_get_stats()
3676 for (q = 0; q < priv->hw_params->rx_queues; q++) { in bcmgenet_get_stats()
3677 rx_ring = &priv->rx_rings[q]; in bcmgenet_get_stats()
3679 rx_bytes += rx_ring->bytes; in bcmgenet_get_stats()
3680 rx_packets += rx_ring->packets; in bcmgenet_get_stats()
3681 rx_errors += rx_ring->errors; in bcmgenet_get_stats()
3682 rx_dropped += rx_ring->dropped; in bcmgenet_get_stats()
3684 rx_ring = &priv->rx_rings[DESC_INDEX]; in bcmgenet_get_stats()
3685 rx_bytes += rx_ring->bytes; in bcmgenet_get_stats()
3686 rx_packets += rx_ring->packets; in bcmgenet_get_stats()
3687 rx_errors += rx_ring->errors; in bcmgenet_get_stats()
3688 rx_dropped += rx_ring->dropped; in bcmgenet_get_stats()
3690 dev->stats.tx_bytes = tx_bytes; in bcmgenet_get_stats()
3691 dev->stats.tx_packets = tx_packets; in bcmgenet_get_stats()
3692 dev->stats.rx_bytes = rx_bytes; in bcmgenet_get_stats()
3693 dev->stats.rx_packets = rx_packets; in bcmgenet_get_stats()
3694 dev->stats.rx_errors = rx_errors; in bcmgenet_get_stats()
3695 dev->stats.rx_missed_errors = rx_errors; in bcmgenet_get_stats()
3696 dev->stats.rx_dropped = rx_dropped; in bcmgenet_get_stats()
3697 return &dev->stats; in bcmgenet_get_stats()
3704 if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || in bcmgenet_change_carrier()
3705 priv->phy_interface != PHY_INTERFACE_MODE_MOCA) in bcmgenet_change_carrier()
3706 return -EOPNOTSUPP; in bcmgenet_change_carrier()
3844 priv->hw_params = &bcmgenet_hw_params[priv->version]; in bcmgenet_set_hw_params()
3845 params = priv->hw_params; in bcmgenet_set_hw_params()
3856 if (major != priv->version) { in bcmgenet_set_hw_params()
3857 dev_err(&priv->pdev->dev, in bcmgenet_set_hw_params()
3859 major, priv->version); in bcmgenet_set_hw_params()
3863 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, in bcmgenet_set_hw_params()
3866 /* Store the integrated PHY revision for the MDIO probing function in bcmgenet_set_hw_params()
3875 * heuristic to check for the new GPHY revision and re-arrange things in bcmgenet_set_hw_params()
3881 /* The EPHY revision should come from the MDIO registers of in bcmgenet_set_hw_params()
3894 priv->gphy_rev = gphy_rev << 8; in bcmgenet_set_hw_params()
3897 priv->gphy_rev = gphy_rev; in bcmgenet_set_hw_params()
3901 if (!(params->flags & GENET_HAS_40BITS)) in bcmgenet_set_hw_params()
3902 pr_warn("GENET does not support 40-bits PA\n"); in bcmgenet_set_hw_params()
3912 priv->version, in bcmgenet_set_hw_params()
3913 params->tx_queues, params->tx_bds_per_q, in bcmgenet_set_hw_params()
3914 params->rx_queues, params->rx_bds_per_q, in bcmgenet_set_hw_params()
3915 params->bp_in_en_shift, params->bp_in_mask, in bcmgenet_set_hw_params()
3916 params->hfb_filter_cnt, params->qtag_mask, in bcmgenet_set_hw_params()
3917 params->tbuf_offset, params->hfb_offset, in bcmgenet_set_hw_params()
3918 params->hfb_reg_offset, in bcmgenet_set_hw_params()
3919 params->rdma_offset, params->tdma_offset, in bcmgenet_set_hw_params()
3920 params->words_per_bd); in bcmgenet_set_hw_params()
3966 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3967 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3968 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3969 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3970 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3971 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3972 { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3979 struct bcmgenet_platform_data *pd = pdev->dev.platform_data; in bcmgenet_probe()
3984 int err = -EIO; in bcmgenet_probe()
3990 dev_err(&pdev->dev, "can't allocate net device\n"); in bcmgenet_probe()
3991 return -ENOMEM; in bcmgenet_probe()
3995 priv->irq0 = platform_get_irq(pdev, 0); in bcmgenet_probe()
3996 if (priv->irq0 < 0) { in bcmgenet_probe()
3997 err = priv->irq0; in bcmgenet_probe()
4000 priv->irq1 = platform_get_irq(pdev, 1); in bcmgenet_probe()
4001 if (priv->irq1 < 0) { in bcmgenet_probe()
4002 err = priv->irq1; in bcmgenet_probe()
4005 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcmgenet_probe()
4006 if (priv->wol_irq == -EPROBE_DEFER) { in bcmgenet_probe()
4007 err = priv->wol_irq; in bcmgenet_probe()
4011 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcmgenet_probe()
4012 if (IS_ERR(priv->base)) { in bcmgenet_probe()
4013 err = PTR_ERR(priv->base); in bcmgenet_probe()
4017 spin_lock_init(&priv->reg_lock); in bcmgenet_probe()
4018 spin_lock_init(&priv->lock); in bcmgenet_probe()
4021 priv->autoneg_pause = 1; in bcmgenet_probe()
4022 priv->tx_pause = 1; in bcmgenet_probe()
4023 priv->rx_pause = 1; in bcmgenet_probe()
4025 SET_NETDEV_DEV(dev, &pdev->dev); in bcmgenet_probe()
4026 dev_set_drvdata(&pdev->dev, dev); in bcmgenet_probe()
4027 dev->watchdog_timeo = 2 * HZ; in bcmgenet_probe()
4028 dev->ethtool_ops = &bcmgenet_ethtool_ops; in bcmgenet_probe()
4029 dev->netdev_ops = &bcmgenet_netdev_ops; in bcmgenet_probe()
4031 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); in bcmgenet_probe()
4034 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | in bcmgenet_probe()
4036 dev->hw_features |= dev->features; in bcmgenet_probe()
4037 dev->vlan_features |= dev->features; in bcmgenet_probe()
4040 priv->wol_irq_disabled = true; in bcmgenet_probe()
4041 if (priv->wol_irq > 0) { in bcmgenet_probe()
4042 err = devm_request_irq(&pdev->dev, priv->wol_irq, in bcmgenet_probe()
4043 bcmgenet_wol_isr, 0, dev->name, priv); in bcmgenet_probe()
4045 device_set_wakeup_capable(&pdev->dev, 1); in bcmgenet_probe()
4051 dev->needed_headroom += 64; in bcmgenet_probe()
4053 priv->dev = dev; in bcmgenet_probe()
4054 priv->pdev = pdev; in bcmgenet_probe()
4056 pdata = device_get_match_data(&pdev->dev); in bcmgenet_probe()
4058 priv->version = pdata->version; in bcmgenet_probe()
4059 priv->dma_max_burst_length = pdata->dma_max_burst_length; in bcmgenet_probe()
4060 priv->ephy_16nm = pdata->ephy_16nm; in bcmgenet_probe()
4062 priv->version = pd->genet_version; in bcmgenet_probe()
4063 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; in bcmgenet_probe()
4066 priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); in bcmgenet_probe()
4067 if (IS_ERR(priv->clk)) { in bcmgenet_probe()
4068 dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); in bcmgenet_probe()
4069 err = PTR_ERR(priv->clk); in bcmgenet_probe()
4073 err = clk_prepare_enable(priv->clk); in bcmgenet_probe()
4079 err = -EIO; in bcmgenet_probe()
4080 if (priv->hw_params->flags & GENET_HAS_40BITS) in bcmgenet_probe()
4081 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcmgenet_probe()
4083 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcmgenet_probe()
4088 init_waitqueue_head(&priv->wq); in bcmgenet_probe()
4090 priv->rx_buf_len = RX_BUF_LENGTH; in bcmgenet_probe()
4091 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); in bcmgenet_probe()
4093 priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); in bcmgenet_probe()
4094 if (IS_ERR(priv->clk_wol)) { in bcmgenet_probe()
4095 dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); in bcmgenet_probe()
4096 err = PTR_ERR(priv->clk_wol); in bcmgenet_probe()
4100 priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); in bcmgenet_probe()
4101 if (IS_ERR(priv->clk_eee)) { in bcmgenet_probe()
4102 dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); in bcmgenet_probe()
4103 err = PTR_ERR(priv->clk_eee); in bcmgenet_probe()
4107 /* If this is an internal GPHY, power it on now, before UniMAC is in bcmgenet_probe()
4108 * brought out of reset as absolutely no UniMAC activity is allowed in bcmgenet_probe()
4110 if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) in bcmgenet_probe()
4113 if (pd && !IS_ERR_OR_NULL(pd->mac_address)) in bcmgenet_probe()
4114 eth_hw_addr_set(dev, pd->mac_address); in bcmgenet_probe()
4116 if (device_get_ethdev_address(&pdev->dev, dev)) in bcmgenet_probe()
4117 if (has_acpi_companion(&pdev->dev)) { in bcmgenet_probe()
4124 if (!is_valid_ether_addr(dev->dev_addr)) { in bcmgenet_probe()
4125 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcmgenet_probe()
4138 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); in bcmgenet_probe()
4139 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); in bcmgenet_probe()
4142 for (i = 0; i < priv->hw_params->rx_queues; i++) in bcmgenet_probe()
4143 priv->rx_rings[i].rx_max_coalesced_frames = 1; in bcmgenet_probe()
4144 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; in bcmgenet_probe()
4150 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4161 clk_disable_unprepare(priv->clk); in bcmgenet_probe()
4169 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); in bcmgenet_remove()
4171 dev_set_drvdata(&pdev->dev, NULL); in bcmgenet_remove()
4172 unregister_netdev(priv->dev); in bcmgenet_remove()
4173 bcmgenet_mii_exit(priv->dev); in bcmgenet_remove()
4174 free_netdev(priv->dev); in bcmgenet_remove()
4194 ret = clk_prepare_enable(priv->clk); in bcmgenet_resume_noirq()
4198 if (device_may_wakeup(d) && priv->wolopts) { in bcmgenet_resume_noirq()
4199 /* Account for Wake-on-LAN events and clear those events in bcmgenet_resume_noirq()
4207 pm_wakeup_event(&priv->pdev->dev, 0); in bcmgenet_resume_noirq()
4226 /* From WOL-enabled suspend, switch to regular clock */ in bcmgenet_resume()
4227 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_resume()
4230 /* If this is an internal GPHY, power it back on now, before UniMAC is in bcmgenet_resume()
4231 * brought out of reset as absolutely no UniMAC activity is allowed in bcmgenet_resume()
4233 if (priv->internal_phy) in bcmgenet_resume()
4240 phy_init_hw(dev->phydev); in bcmgenet_resume()
4243 genphy_config_aneg(dev->phydev); in bcmgenet_resume()
4244 bcmgenet_mii_config(priv->dev, false); in bcmgenet_resume()
4247 bcmgenet_set_features(dev, dev->features); in bcmgenet_resume()
4249 bcmgenet_set_hw_addr(priv, dev->dev_addr); in bcmgenet_resume()
4253 list_for_each_entry(rule, &priv->rxnfc_list, list) in bcmgenet_resume()
4254 if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) in bcmgenet_resume()
4267 /* Always enable ring 16 - descriptor ring */ in bcmgenet_resume()
4271 phy_resume(dev->phydev); in bcmgenet_resume()
4280 if (priv->internal_phy) in bcmgenet_resume()
4282 clk_disable_unprepare(priv->clk); in bcmgenet_resume()
4299 phy_suspend(dev->phydev); in bcmgenet_suspend()
4316 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ in bcmgenet_suspend_noirq()
4317 if (device_may_wakeup(d) && priv->wolopts) in bcmgenet_suspend_noirq()
4319 else if (priv->internal_phy) in bcmgenet_suspend_noirq()
4327 clk_disable_unprepare(priv->clk); in bcmgenet_suspend_noirq()
4368 MODULE_SOFTDEP("pre: mdio-bcm-unimac");