Lines Matching +full:rx +full:- +full:pcs +full:- +full:m
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
30 /* Caller must hold &ocelot->mact_lock */
36 /* Caller must hold &ocelot->mact_lock */
48 /* Caller must hold &ocelot->mact_lock */
90 if (mc_ports & BIT(ocelot->num_phys_ports)) in __ocelot_mact_learn()
109 mutex_lock(&ocelot->mact_lock); in ocelot_mact_learn()
111 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_learn()
122 mutex_lock(&ocelot->mact_lock); in ocelot_mact_forget()
133 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_forget()
145 mutex_lock(&ocelot->mact_lock); in ocelot_mact_lookup()
155 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_lookup()
156 return -ETIMEDOUT; in ocelot_mact_lookup()
162 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_lookup()
165 return -ENOENT; in ocelot_mact_lookup()
182 mutex_lock(&ocelot->mact_lock); in ocelot_mact_learn_streamdata()
193 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_learn_streamdata()
202 * - Do not copy the frame to the CPU extraction queues. in ocelot_mact_init()
203 * - Use the vlan and mac_cpoy for dmac lookup. in ocelot_mact_init()
212 * holding &ocelot->mact_lock is pointless. in ocelot_mact_init()
222 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, in ocelot_pll5_init()
225 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, in ocelot_pll5_init()
237 regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, in ocelot_pll5_init()
267 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_single_vlan_aware_bridge()
268 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_single_vlan_aware_bridge()
270 if (!ocelot_port || !ocelot_port->bridge || in ocelot_single_vlan_aware_bridge()
271 !br_vlan_enabled(ocelot_port->bridge)) in ocelot_single_vlan_aware_bridge()
275 bridge = ocelot_port->bridge; in ocelot_single_vlan_aware_bridge()
279 if (bridge == ocelot_port->bridge) in ocelot_single_vlan_aware_bridge()
283 "Only one VLAN-aware bridge is supported"); in ocelot_single_vlan_aware_bridge()
284 return -EBUSY; in ocelot_single_vlan_aware_bridge()
325 list_for_each_entry(vlan, &ocelot->vlans, list) { in ocelot_port_num_untagged_vlans()
326 if (!(vlan->portmask & BIT(port))) in ocelot_port_num_untagged_vlans()
331 * the bridge VLANs, which only matter in VLAN-aware mode. in ocelot_port_num_untagged_vlans()
333 if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START) in ocelot_port_num_untagged_vlans()
336 if (vlan->untagged & BIT(port)) in ocelot_port_num_untagged_vlans()
348 list_for_each_entry(vlan, &ocelot->vlans, list) { in ocelot_port_num_tagged_vlans()
349 if (!(vlan->portmask & BIT(port))) in ocelot_port_num_tagged_vlans()
352 if (!(vlan->untagged & BIT(port))) in ocelot_port_num_tagged_vlans()
359 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly
360 * _one_ egress-untagged VLAN (_the_ native VLAN)
373 list_for_each_entry(vlan, &ocelot->vlans, list) in ocelot_port_find_native_vlan()
374 if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) in ocelot_port_find_native_vlan()
386 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_manage_port_tag()
390 if (ocelot_port->vlan_aware) { in ocelot_port_manage_port_tag()
417 REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), in ocelot_port_manage_port_tag()
428 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_bridge_num_find()
429 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_bridge_num_find()
431 if (ocelot_port && ocelot_port->bridge == bridge) in ocelot_bridge_num_find()
432 return ocelot_port->bridge_num; in ocelot_bridge_num_find()
435 return -1; in ocelot_bridge_num_find()
452 /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */ in ocelot_vlan_unaware_pvid()
453 return VLAN_N_VID - bridge_num - 1; in ocelot_vlan_unaware_pvid()
457 * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
462 * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
463 * component conformance" suggest that a C-VLAN component should only recognize
464 * and filter on C-Tags, and an S-VLAN component should only recognize and
465 * process based on C-Tags.
467 * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
469 * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
472 * design is non-conformant, because the switch assigns each frame to a VLAN
476 * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
492 * In the VLAN Table, the TAG_TYPE information is not accessible - just the
493 * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
494 * C-VLAN X, and S-VLAN X.
497 * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
501 * should be treated as 802.1Q-untagged, and classified to the PVID of that
510 * if those packets were processed as VLAN-untagged.
514 * VLAN-unaware.
519 struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1]; in ocelot_update_vlan_reclassify_rule()
520 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_update_vlan_reclassify_rule()
527 pvid_vlan = ocelot_port->pvid_vlan; in ocelot_update_vlan_reclassify_rule()
528 vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan; in ocelot_update_vlan_reclassify_rule()
547 /* Treating as VLAN-untagged means using as classified VID equal to in ocelot_update_vlan_reclassify_rule()
550 vid = pvid_vlan->vid; in ocelot_update_vlan_reclassify_rule()
559 if (filter->action.vid != vid) { in ocelot_update_vlan_reclassify_rule()
560 filter->action.vid = vid; in ocelot_update_vlan_reclassify_rule()
563 if (filter->action.pcp != pcp) { in ocelot_update_vlan_reclassify_rule()
564 filter->action.pcp = pcp; in ocelot_update_vlan_reclassify_rule()
567 if (filter->action.dei != dei) { in ocelot_update_vlan_reclassify_rule()
568 filter->action.dei = dei; in ocelot_update_vlan_reclassify_rule()
581 return -ENOMEM; in ocelot_update_vlan_reclassify_rule()
583 filter->key_type = OCELOT_VCAP_KEY_ANY; in ocelot_update_vlan_reclassify_rule()
584 filter->ingress_port_mask = BIT(port); in ocelot_update_vlan_reclassify_rule()
585 filter->vlan.tpid = OCELOT_VCAP_BIT_1; in ocelot_update_vlan_reclassify_rule()
586 filter->prio = 1; in ocelot_update_vlan_reclassify_rule()
587 filter->id.cookie = cookie; in ocelot_update_vlan_reclassify_rule()
588 filter->id.tc_offload = false; in ocelot_update_vlan_reclassify_rule()
589 filter->block_id = VCAP_IS1; in ocelot_update_vlan_reclassify_rule()
590 filter->type = OCELOT_VCAP_FILTER_OFFLOAD; in ocelot_update_vlan_reclassify_rule()
591 filter->lookup = 0; in ocelot_update_vlan_reclassify_rule()
592 filter->action.vid_replace_ena = true; in ocelot_update_vlan_reclassify_rule()
593 filter->action.pcp_dei_ena = true; in ocelot_update_vlan_reclassify_rule()
594 filter->action.vid = vid; in ocelot_update_vlan_reclassify_rule()
595 filter->action.pcp = pcp; in ocelot_update_vlan_reclassify_rule()
596 filter->action.dei = dei; in ocelot_update_vlan_reclassify_rule()
609 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_pvid()
610 u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); in ocelot_port_set_pvid()
613 ocelot_port->pvid_vlan = pvid_vlan; in ocelot_port_set_pvid()
615 if (ocelot_port->vlan_aware && pvid_vlan) in ocelot_port_set_pvid()
616 pvid = pvid_vlan->vid; in ocelot_port_set_pvid()
625 * classified to VLAN 0, but that is always in our RX filter, so it in ocelot_port_set_pvid()
629 * 802.1ad-tagged frames (carrying S-Tags) should be considered in ocelot_port_set_pvid()
630 * 802.1Q-untagged, and also dropped. in ocelot_port_set_pvid()
632 if (!pvid_vlan && ocelot_port->vlan_aware) in ocelot_port_set_pvid()
651 list_for_each_entry(vlan, &ocelot->vlans, list) in ocelot_bridge_vlan_find()
652 if (vlan->vid == vid) in ocelot_bridge_vlan_find()
666 portmask = vlan->portmask | BIT(port); in ocelot_vlan_member_add()
672 vlan->portmask = portmask; in ocelot_vlan_member_add()
674 * egress-tagging setting, so make sure to override an untagged in ocelot_vlan_member_add()
678 vlan->untagged |= BIT(port); in ocelot_vlan_member_add()
680 vlan->untagged &= ~BIT(port); in ocelot_vlan_member_add()
687 return -ENOMEM; in ocelot_vlan_member_add()
697 vlan->vid = vid; in ocelot_vlan_member_add()
698 vlan->portmask = portmask; in ocelot_vlan_member_add()
700 vlan->untagged = BIT(port); in ocelot_vlan_member_add()
701 INIT_LIST_HEAD(&vlan->list); in ocelot_vlan_member_add()
702 list_add_tail(&vlan->list, &ocelot->vlans); in ocelot_vlan_member_add()
716 portmask = vlan->portmask & ~BIT(port); in ocelot_vlan_member_del()
722 vlan->portmask = portmask; in ocelot_vlan_member_del()
723 if (vlan->portmask) in ocelot_vlan_member_del()
726 list_del(&vlan->list); in ocelot_vlan_member_del()
751 struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; in ocelot_port_vlan_filtering()
752 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_vlan_filtering()
757 list_for_each_entry(filter, &block->rules, list) { in ocelot_port_vlan_filtering()
758 if (filter->ingress_port_mask & BIT(port) && in ocelot_port_vlan_filtering()
759 filter->action.vid_replace_ena) { in ocelot_port_vlan_filtering()
762 return -EBUSY; in ocelot_port_vlan_filtering()
772 ocelot_port->bridge); in ocelot_port_vlan_filtering()
773 else if (ocelot_port->bridge) in ocelot_port_vlan_filtering()
775 ocelot_port->bridge); in ocelot_port_vlan_filtering()
779 ocelot_port->vlan_aware = vlan_aware; in ocelot_port_vlan_filtering()
791 err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); in ocelot_port_vlan_filtering()
805 /* We are adding an egress-tagged VLAN */ in ocelot_vlan_prepare()
808 "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); in ocelot_vlan_prepare()
809 return -EBUSY; in ocelot_vlan_prepare()
812 /* We are adding an egress-tagged VLAN */ in ocelot_vlan_prepare()
815 "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); in ocelot_vlan_prepare()
816 return -EBUSY; in ocelot_vlan_prepare()
822 "VLAN range 4000-4095 reserved for VLAN-unaware bridging"); in ocelot_vlan_prepare()
823 return -EBUSY; in ocelot_vlan_prepare()
835 /* Ignore VID 0 added to our RX filter by the 8021q module, since in ocelot_vlan_add()
837 * egress-untagged to egress-tagged. in ocelot_vlan_add()
863 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_vlan_del()
870 if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) in ocelot_vlan_del()
893 unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); in ocelot_vlan_init()
916 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_vlan_init()
947 * Worst-case delays for 10 kilobyte jumbo frames are: in ocelot_port_flush()
948 * 8 ms on a 10M port in ocelot_port_flush()
949 * 800 μs on a 100M port in ocelot_port_flush()
974 /* Re-enable flow control */ in ocelot_port_flush()
983 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_configure_serdes()
984 struct device *dev = ocelot->dev; in ocelot_port_configure_serdes()
988 if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_QSGMII) in ocelot_port_configure_serdes()
994 if (ocelot_port->phy_mode != PHY_INTERFACE_MODE_INTERNAL) { in ocelot_port_configure_serdes()
1006 ocelot_port->phy_mode); in ocelot_port_configure_serdes()
1023 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_config()
1034 /* Enable PCS */ in ocelot_phylink_mac_config()
1050 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_link_down()
1053 ocelot_port->speed = SPEED_UNKNOWN; in ocelot_phylink_mac_link_down()
1058 if (ocelot->ops->cut_through_fwd) { in ocelot_phylink_mac_link_down()
1059 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_down()
1060 ocelot->ops->cut_through_fwd(ocelot); in ocelot_phylink_mac_link_down()
1061 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_down()
1068 dev_err(ocelot->dev, "failed to flush port %d: %d\n", in ocelot_phylink_mac_link_down()
1091 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_phylink_mac_link_up()
1095 ocelot_port->speed = speed; in ocelot_phylink_mac_link_up()
1098 * and it's the PCS who is performing the rate adaptation, so we have in ocelot_phylink_mac_link_up()
1138 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", in ocelot_phylink_mac_link_up()
1160 if (port != ocelot->npi) in ocelot_phylink_mac_link_up()
1170 /* If the port supports cut-through forwarding, update the masks before in ocelot_phylink_mac_link_up()
1173 if (ocelot->ops->cut_through_fwd) { in ocelot_phylink_mac_link_up()
1174 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_up()
1175 /* Workaround for hardware bug - FP doesn't work in ocelot_phylink_mac_link_up()
1177 * below also calls ocelot->ops->cut_through_fwd(), in ocelot_phylink_mac_link_up()
1181 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_phylink_mac_link_up()
1198 return -EIO; in ocelot_rx_frame_word()
1207 return -EIO; in ocelot_rx_frame_word()
1239 return (err < 0) ? err : -EIO; in ocelot_xtr_poll_xfh()
1252 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); in ocelot_ptp_rx_timestamp()
1256 full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | in ocelot_ptp_rx_timestamp()
1264 shhwtstamps->hwtstamp = full_ts_in_ns; in ocelot_ptp_rx_timestamp()
1269 __acquires(&ocelot->inj_lock) in ocelot_lock_inj_grp()
1271 spin_lock(&ocelot->inj_lock); in ocelot_lock_inj_grp()
1276 __releases(&ocelot->inj_lock) in ocelot_unlock_inj_grp()
1278 spin_unlock(&ocelot->inj_lock); in ocelot_unlock_inj_grp()
1283 __acquires(&ocelot->inj_lock) in ocelot_lock_xtr_grp()
1285 spin_lock(&ocelot->inj_lock); in ocelot_lock_xtr_grp()
1290 __releases(&ocelot->inj_lock) in ocelot_unlock_xtr_grp()
1292 spin_unlock(&ocelot->inj_lock); in ocelot_unlock_xtr_grp()
1297 __acquires(&ocelot->xtr_lock) in ocelot_lock_xtr_grp_bh()
1299 spin_lock_bh(&ocelot->xtr_lock); in ocelot_lock_xtr_grp_bh()
1304 __releases(&ocelot->xtr_lock) in ocelot_unlock_xtr_grp_bh()
1306 spin_unlock_bh(&ocelot->xtr_lock); in ocelot_unlock_xtr_grp_bh()
1320 lockdep_assert_held(&ocelot->xtr_lock); in ocelot_xtr_poll_frame()
1330 if (WARN_ON(src_port >= ocelot->num_phys_ports)) in ocelot_xtr_poll_frame()
1331 return -EINVAL; in ocelot_xtr_poll_frame()
1333 dev = ocelot->ops->port_to_netdev(ocelot, src_port); in ocelot_xtr_poll_frame()
1335 return -EINVAL; in ocelot_xtr_poll_frame()
1340 return -ENOMEM; in ocelot_xtr_poll_frame()
1343 buf_len = len - ETH_FCS_LEN; in ocelot_xtr_poll_frame()
1365 len -= ETH_FCS_LEN - sz; in ocelot_xtr_poll_frame()
1367 if (unlikely(dev->features & NETIF_F_RXFCS)) { in ocelot_xtr_poll_frame()
1372 if (ocelot->ptp) in ocelot_xtr_poll_frame()
1378 if (ocelot->ports[src_port]->bridge) in ocelot_xtr_poll_frame()
1379 skb->offload_fwd_mark = 1; in ocelot_xtr_poll_frame()
1381 skb->protocol = eth_type_trans(skb, dev); in ocelot_xtr_poll_frame()
1397 lockdep_assert_held(&ocelot->inj_lock); in ocelot_can_inject()
1409 * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
1422 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_ifh_set_basic()
1423 struct net_device *dev = skb->dev; in ocelot_ifh_set_basic()
1427 ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci, in ocelot_ifh_set_basic()
1431 netdev_get_prio_tc_map(dev, skb->priority) : skb->priority; in ocelot_ifh_set_basic()
1435 ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports)); in ocelot_ifh_set_basic()
1451 lockdep_assert_held(&ocelot->inj_lock); in ocelot_port_inject_frame()
1461 count = DIV_ROUND_UP(skb->len, 4); in ocelot_port_inject_frame()
1462 last = skb->len % 4; in ocelot_port_inject_frame()
1464 ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); in ocelot_port_inject_frame()
1474 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | in ocelot_port_inject_frame()
1482 skb->dev->stats.tx_packets++; in ocelot_port_inject_frame()
1483 skb->dev->stats.tx_bytes += skb->len; in ocelot_port_inject_frame()
1489 lockdep_assert_held(&ocelot->xtr_lock); in ocelot_drain_cpu_queue()
1516 /* Caller must hold &ocelot->mact_lock */
1533 return -ETIMEDOUT; in ocelot_mact_read()
1538 return -EINVAL; in ocelot_mact_read()
1545 return -EINVAL; in ocelot_mact_read()
1558 entry->vid = (mach >> 16) & 0xfff; in ocelot_mact_read()
1559 ether_addr_copy(entry->mac, mac); in ocelot_mact_read()
1568 mutex_lock(&ocelot->mact_lock); in ocelot_mact_flush()
1581 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_flush()
1595 mutex_unlock(&ocelot->mact_lock); in ocelot_mact_flush()
1610 mutex_lock(&ocelot->mact_lock); in ocelot_fdb_dump()
1613 for (i = 0; i < ocelot->num_mact_rows; i++) { in ocelot_fdb_dump()
1622 if (err == -EINVAL) in ocelot_fdb_dump()
1630 * VLAN-unaware bridging. in ocelot_fdb_dump()
1641 mutex_unlock(&ocelot->mact_lock); in ocelot_fdb_dump()
1656 block_vcap_is2 = &ocelot->block[VCAP_IS2]; in ocelot_trap_add()
1663 return -ENOMEM; in ocelot_trap_add()
1666 trap->prio = 1; in ocelot_trap_add()
1667 trap->id.cookie = cookie; in ocelot_trap_add()
1668 trap->id.tc_offload = false; in ocelot_trap_add()
1669 trap->block_id = VCAP_IS2; in ocelot_trap_add()
1670 trap->type = OCELOT_VCAP_FILTER_OFFLOAD; in ocelot_trap_add()
1671 trap->lookup = 0; in ocelot_trap_add()
1672 trap->action.cpu_copy_ena = true; in ocelot_trap_add()
1673 trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; in ocelot_trap_add()
1674 trap->action.port_mask = 0; in ocelot_trap_add()
1675 trap->take_ts = take_ts; in ocelot_trap_add()
1676 trap->is_trap = true; in ocelot_trap_add()
1680 trap->ingress_port_mask |= BIT(port); in ocelot_trap_add()
1687 trap->ingress_port_mask &= ~BIT(port); in ocelot_trap_add()
1688 if (!trap->ingress_port_mask) in ocelot_trap_add()
1701 block_vcap_is2 = &ocelot->block[VCAP_IS2]; in ocelot_trap_del()
1708 trap->ingress_port_mask &= ~BIT(port); in ocelot_trap_del()
1709 if (!trap->ingress_port_mask) in ocelot_trap_del()
1720 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_get_bond_mask()
1722 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_get_bond_mask()
1723 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_get_bond_mask()
1728 if (ocelot_port->bond == bond) in ocelot_get_bond_mask()
1743 return -ENOENT; in ocelot_bond_get_id()
1765 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_dsa_8021q_cpu_assigned_ports()
1766 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_dsa_8021q_cpu_assigned_ports()
1771 if (ocelot_port->dsa_8021q_cpu == cpu) in ocelot_dsa_8021q_cpu_assigned_ports()
1775 if (cpu->bond) in ocelot_dsa_8021q_cpu_assigned_ports()
1776 mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond); in ocelot_dsa_8021q_cpu_assigned_ports()
1786 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_assigned_dsa_8021q_cpu_mask()
1787 struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu; in ocelot_port_assigned_dsa_8021q_cpu_mask()
1792 if (cpu_port->bond) in ocelot_port_assigned_dsa_8021q_cpu_mask()
1793 return ocelot_get_bond_mask(ocelot, cpu_port->bond); in ocelot_port_assigned_dsa_8021q_cpu_mask()
1795 return BIT(cpu_port->index); in ocelot_port_assigned_dsa_8021q_cpu_mask()
1801 struct ocelot_port *ocelot_port = ocelot->ports[src_port]; in ocelot_get_bridge_fwd_mask()
1806 if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) in ocelot_get_bridge_fwd_mask()
1809 bridge = ocelot_port->bridge; in ocelot_get_bridge_fwd_mask()
1813 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_get_bridge_fwd_mask()
1814 ocelot_port = ocelot->ports[port]; in ocelot_get_bridge_fwd_mask()
1819 if (ocelot_port->stp_state == BR_STATE_FORWARDING && in ocelot_get_bridge_fwd_mask()
1820 ocelot_port->bridge == bridge) in ocelot_get_bridge_fwd_mask()
1832 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_apply_bridge_fwd_mask()
1834 /* If cut-through forwarding is supported, update the masks before a in ocelot_apply_bridge_fwd_mask()
1838 if (joining && ocelot->ops->cut_through_fwd) in ocelot_apply_bridge_fwd_mask()
1839 ocelot->ops->cut_through_fwd(ocelot); in ocelot_apply_bridge_fwd_mask()
1844 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_apply_bridge_fwd_mask()
1845 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_apply_bridge_fwd_mask()
1851 } else if (ocelot_port->is_dsa_8021q_cpu) { in ocelot_apply_bridge_fwd_mask()
1857 } else if (ocelot_port->bridge) { in ocelot_apply_bridge_fwd_mask()
1858 struct net_device *bond = ocelot_port->bond; in ocelot_apply_bridge_fwd_mask()
1880 /* If cut-through forwarding is supported and a port is leaving, there in ocelot_apply_bridge_fwd_mask()
1881 * is a chance that cut-through was disabled on the other ports due to in ocelot_apply_bridge_fwd_mask()
1883 * update the cut-through masks of the remaining ports no earlier than in ocelot_apply_bridge_fwd_mask()
1885 * the cut-through update and the forwarding domain update. in ocelot_apply_bridge_fwd_mask()
1887 if (!joining && ocelot->ops->cut_through_fwd) in ocelot_apply_bridge_fwd_mask()
1888 ocelot->ops->cut_through_fwd(ocelot); in ocelot_apply_bridge_fwd_mask()
1903 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_update_pgid_cpu()
1904 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_update_pgid_cpu()
1906 if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu) in ocelot_update_pgid_cpu()
1913 pgid_cpu = BIT(ocelot->num_phys_ports); in ocelot_update_pgid_cpu()
1920 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_setup_dsa_8021q_cpu()
1923 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_setup_dsa_8021q_cpu()
1925 cpu_port->is_dsa_8021q_cpu = true; in ocelot_port_setup_dsa_8021q_cpu()
1932 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_setup_dsa_8021q_cpu()
1938 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_teardown_dsa_8021q_cpu()
1941 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_teardown_dsa_8021q_cpu()
1943 cpu_port->is_dsa_8021q_cpu = false; in ocelot_port_teardown_dsa_8021q_cpu()
1946 ocelot_vlan_member_del(ocelot, cpu_port->index, vid); in ocelot_port_teardown_dsa_8021q_cpu()
1950 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_teardown_dsa_8021q_cpu()
1957 struct ocelot_port *cpu_port = ocelot->ports[cpu]; in ocelot_port_assign_dsa_8021q_cpu()
1959 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_assign_dsa_8021q_cpu()
1961 ocelot->ports[port]->dsa_8021q_cpu = cpu_port; in ocelot_port_assign_dsa_8021q_cpu()
1964 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_assign_dsa_8021q_cpu()
1970 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_unassign_dsa_8021q_cpu()
1972 ocelot->ports[port]->dsa_8021q_cpu = NULL; in ocelot_port_unassign_dsa_8021q_cpu()
1975 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_unassign_dsa_8021q_cpu()
1981 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_bridge_stp_state_set()
1984 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_bridge_stp_state_set()
1986 ocelot_port->stp_state = state; in ocelot_bridge_stp_state_set()
1989 ocelot_port->learn_ena) in ocelot_bridge_stp_state_set()
1997 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_bridge_stp_state_set()
2021 list_for_each_entry(mc, &ocelot->multicast, list) { in ocelot_multicast_get()
2022 if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) in ocelot_multicast_get()
2045 return ERR_PTR(-ENOMEM); in ocelot_pgid_alloc()
2047 pgid->ports = ports; in ocelot_pgid_alloc()
2048 pgid->index = index; in ocelot_pgid_alloc()
2049 refcount_set(&pgid->refcount, 1); in ocelot_pgid_alloc()
2050 list_add_tail(&pgid->list, &ocelot->pgids); in ocelot_pgid_alloc()
2057 if (!refcount_dec_and_test(&pgid->refcount)) in ocelot_pgid_free()
2060 list_del(&pgid->list); in ocelot_pgid_free()
2075 if (mc->entry_type == ENTRYTYPE_MACv4 || in ocelot_mdb_get_pgid()
2076 mc->entry_type == ENTRYTYPE_MACv6) in ocelot_mdb_get_pgid()
2077 return ocelot_pgid_alloc(ocelot, 0, mc->ports); in ocelot_mdb_get_pgid()
2079 list_for_each_entry(pgid, &ocelot->pgids, list) { in ocelot_mdb_get_pgid()
2083 if (pgid->index && pgid->ports == mc->ports) { in ocelot_mdb_get_pgid()
2084 refcount_inc(&pgid->refcount); in ocelot_mdb_get_pgid()
2093 list_for_each_entry(pgid, &ocelot->pgids, list) { in ocelot_mdb_get_pgid()
2094 if (pgid->index == index) { in ocelot_mdb_get_pgid()
2101 return ocelot_pgid_alloc(ocelot, index, mc->ports); in ocelot_mdb_get_pgid()
2104 return ERR_PTR(-ENOSPC); in ocelot_mdb_get_pgid()
2110 ether_addr_copy(addr, mc->addr); in ocelot_encode_ports_to_mdb()
2112 if (mc->entry_type == ENTRYTYPE_MACv4) { in ocelot_encode_ports_to_mdb()
2114 addr[1] = mc->ports >> 8; in ocelot_encode_ports_to_mdb()
2115 addr[2] = mc->ports & 0xff; in ocelot_encode_ports_to_mdb()
2116 } else if (mc->entry_type == ENTRYTYPE_MACv6) { in ocelot_encode_ports_to_mdb()
2117 addr[0] = mc->ports >> 8; in ocelot_encode_ports_to_mdb()
2118 addr[1] = mc->ports & 0xff; in ocelot_encode_ports_to_mdb()
2129 u16 vid = mdb->vid; in ocelot_port_mdb_add()
2134 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); in ocelot_port_mdb_add()
2137 mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); in ocelot_port_mdb_add()
2139 return -ENOMEM; in ocelot_port_mdb_add()
2141 mc->entry_type = ocelot_classify_mdb(mdb->addr); in ocelot_port_mdb_add()
2142 ether_addr_copy(mc->addr, mdb->addr); in ocelot_port_mdb_add()
2143 mc->vid = vid; in ocelot_port_mdb_add()
2145 list_add_tail(&mc->list, &ocelot->multicast); in ocelot_port_mdb_add()
2150 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_port_mdb_add()
2155 mc->ports |= BIT(port); in ocelot_port_mdb_add()
2159 dev_err(ocelot->dev, in ocelot_port_mdb_add()
2161 mc->addr, mc->vid); in ocelot_port_mdb_add()
2162 devm_kfree(ocelot->dev, mc); in ocelot_port_mdb_add()
2165 mc->pgid = pgid; in ocelot_port_mdb_add()
2169 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_port_mdb_add()
2170 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_port_mdb_add()
2171 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_port_mdb_add()
2172 pgid->index); in ocelot_port_mdb_add()
2174 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_port_mdb_add()
2175 mc->entry_type); in ocelot_port_mdb_add()
2186 u16 vid = mdb->vid; in ocelot_port_mdb_del()
2191 mc = ocelot_multicast_get(ocelot, mdb->addr, vid); in ocelot_port_mdb_del()
2193 return -ENOENT; in ocelot_port_mdb_del()
2198 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_port_mdb_del()
2199 mc->ports &= ~BIT(port); in ocelot_port_mdb_del()
2200 if (!mc->ports) { in ocelot_port_mdb_del()
2201 list_del(&mc->list); in ocelot_port_mdb_del()
2202 devm_kfree(ocelot->dev, mc); in ocelot_port_mdb_del()
2210 mc->pgid = pgid; in ocelot_port_mdb_del()
2214 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_port_mdb_del()
2215 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_port_mdb_del()
2216 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_port_mdb_del()
2217 pgid->index); in ocelot_port_mdb_del()
2219 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_port_mdb_del()
2220 mc->entry_type); in ocelot_port_mdb_del()
2228 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_bridge_join()
2235 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_join()
2237 ocelot_port->bridge = bridge; in ocelot_port_bridge_join()
2238 ocelot_port->bridge_num = bridge_num; in ocelot_port_bridge_join()
2242 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_join()
2254 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_bridge_leave()
2256 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_leave()
2261 ocelot_port->bridge = NULL; in ocelot_port_bridge_leave()
2262 ocelot_port->bridge_num = -1; in ocelot_port_bridge_leave()
2268 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_bridge_leave()
2274 unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); in ocelot_set_aggr_pgids()
2282 ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), in ocelot_set_aggr_pgids()
2293 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_set_aggr_pgids()
2294 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2296 if (!ocelot_port || !ocelot_port->bond) in ocelot_set_aggr_pgids()
2303 for (lag = 0; lag < ocelot->num_phys_ports; lag++) { in ocelot_set_aggr_pgids()
2304 struct net_device *bond = ocelot->ports[lag]->bond; in ocelot_set_aggr_pgids()
2314 for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { in ocelot_set_aggr_pgids()
2315 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2321 if (ocelot_port->lag_tx_active) in ocelot_set_aggr_pgids()
2341 for (port = lag; port < ocelot->num_phys_ports; port++) { in ocelot_set_aggr_pgids()
2342 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_set_aggr_pgids()
2347 if (ocelot_port->bond == bond) in ocelot_set_aggr_pgids()
2362 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_setup_logical_port_ids()
2363 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_setup_logical_port_ids()
2369 bond = ocelot_port->bond; in ocelot_setup_logical_port_ids()
2391 u16 vid = mc->vid; in ocelot_migrate_mc()
2393 dev_dbg(ocelot->dev, in ocelot_migrate_mc()
2395 mc->addr, mc->vid, from_mask, to_mask); in ocelot_migrate_mc()
2400 ocelot_pgid_free(ocelot, mc->pgid); in ocelot_migrate_mc()
2404 mc->ports &= ~from_mask; in ocelot_migrate_mc()
2405 mc->ports |= to_mask; in ocelot_migrate_mc()
2409 dev_err(ocelot->dev, in ocelot_migrate_mc()
2411 mc->addr, mc->vid); in ocelot_migrate_mc()
2412 devm_kfree(ocelot->dev, mc); in ocelot_migrate_mc()
2415 mc->pgid = pgid; in ocelot_migrate_mc()
2419 if (mc->entry_type != ENTRYTYPE_MACv4 && in ocelot_migrate_mc()
2420 mc->entry_type != ENTRYTYPE_MACv6) in ocelot_migrate_mc()
2421 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, in ocelot_migrate_mc()
2422 pgid->index); in ocelot_migrate_mc()
2424 return ocelot_mact_learn(ocelot, pgid->index, addr, vid, in ocelot_migrate_mc()
2425 mc->entry_type); in ocelot_migrate_mc()
2434 list_for_each_entry(mc, &ocelot->multicast, list) { in ocelot_migrate_mdbs()
2435 if (!(mc->ports & from_mask)) in ocelot_migrate_mdbs()
2464 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_migrate_lag_fdbs()
2466 list_for_each_entry(fdb, &ocelot->lag_fdbs, list) { in ocelot_migrate_lag_fdbs()
2467 if (fdb->bond != bond) in ocelot_migrate_lag_fdbs()
2470 err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid); in ocelot_migrate_lag_fdbs()
2472 dev_err(ocelot->dev, in ocelot_migrate_lag_fdbs()
2474 bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); in ocelot_migrate_lag_fdbs()
2477 err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid, in ocelot_migrate_lag_fdbs()
2480 dev_err(ocelot->dev, in ocelot_migrate_lag_fdbs()
2482 bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); in ocelot_migrate_lag_fdbs()
2492 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { in ocelot_port_lag_join()
2495 return -EOPNOTSUPP; in ocelot_port_lag_join()
2498 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_join()
2500 ocelot->ports[port]->bond = bond; in ocelot_port_lag_join()
2506 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_join()
2517 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_leave()
2521 ocelot->ports[port]->bond = NULL; in ocelot_port_lag_leave()
2532 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_leave()
2538 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_lag_change()
2540 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_port_lag_change()
2542 ocelot_port->lag_tx_active = lag_tx_active; in ocelot_port_lag_change()
2547 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_port_lag_change()
2560 return -ENOMEM; in ocelot_lag_fdb_add()
2562 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2567 ether_addr_copy(fdb->addr, addr); in ocelot_lag_fdb_add()
2568 fdb->vid = vid; in ocelot_lag_fdb_add()
2569 fdb->bond = bond; in ocelot_lag_fdb_add()
2575 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2580 list_add_tail(&fdb->list, &ocelot->lag_fdbs); in ocelot_lag_fdb_add()
2581 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_add()
2593 mutex_lock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2598 list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) { in ocelot_lag_fdb_del()
2599 if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid || in ocelot_lag_fdb_del()
2600 fdb->bond != bond) in ocelot_lag_fdb_del()
2604 list_del(&fdb->list); in ocelot_lag_fdb_del()
2605 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2611 mutex_unlock(&ocelot->fwd_domain_lock); in ocelot_lag_fdb_del()
2613 return -ENOENT; in ocelot_lag_fdb_del()
2617 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
2625 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_maxlen()
2630 if (port == ocelot->npi) { in ocelot_port_set_maxlen()
2633 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) in ocelot_port_set_maxlen()
2635 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) in ocelot_port_set_maxlen()
2650 atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / in ocelot_port_set_maxlen()
2653 ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); in ocelot_port_set_maxlen()
2654 ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); in ocelot_port_set_maxlen()
2660 int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN; in ocelot_get_max_mtu()
2662 if (port == ocelot->npi) { in ocelot_get_max_mtu()
2663 max_mtu -= OCELOT_TAG_LEN; in ocelot_get_max_mtu()
2665 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) in ocelot_get_max_mtu()
2666 max_mtu -= OCELOT_SHORT_PREFIX_LEN; in ocelot_get_max_mtu()
2667 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) in ocelot_get_max_mtu()
2668 max_mtu -= OCELOT_LONG_PREFIX_LEN; in ocelot_get_max_mtu()
2678 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_set_learning()
2687 ocelot_port->learn_ena = enabled; in ocelot_port_set_learning()
2730 return -EINVAL; in ocelot_port_pre_bridge_flags()
2768 return -ERANGE; in ocelot_port_set_default_prio()
2787 return -EOPNOTSUPP; in ocelot_port_get_dscp_prio()
2791 /* Re-read ANA_DSCP_CFG for the translated DSCP */ in ocelot_port_get_dscp_prio()
2796 * to VLAN PCP or port-based default. in ocelot_port_get_dscp_prio()
2799 return -EOPNOTSUPP; in ocelot_port_get_dscp_prio()
2810 return -ERANGE; in ocelot_port_add_dscp_prio()
2876 struct ocelot_mirror *m = ocelot->mirror; in ocelot_mirror_get() local
2878 if (m) { in ocelot_mirror_get()
2879 if (m->to != to) { in ocelot_mirror_get()
2882 return ERR_PTR(-EBUSY); in ocelot_mirror_get()
2885 refcount_inc(&m->refcount); in ocelot_mirror_get()
2886 return m; in ocelot_mirror_get()
2889 m = kzalloc(sizeof(*m), GFP_KERNEL); in ocelot_mirror_get()
2890 if (!m) in ocelot_mirror_get()
2891 return ERR_PTR(-ENOMEM); in ocelot_mirror_get()
2893 m->to = to; in ocelot_mirror_get()
2894 refcount_set(&m->refcount, 1); in ocelot_mirror_get()
2895 ocelot->mirror = m; in ocelot_mirror_get()
2900 return m; in ocelot_mirror_get()
2905 struct ocelot_mirror *m = ocelot->mirror; in ocelot_mirror_put() local
2907 if (!refcount_dec_and_test(&m->refcount)) in ocelot_mirror_put()
2911 ocelot->mirror = NULL; in ocelot_mirror_put()
2912 kfree(m); in ocelot_mirror_put()
2918 struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack); in ocelot_port_mirror_add() local
2920 if (IS_ERR(m)) in ocelot_port_mirror_add()
2921 return PTR_ERR(m); in ocelot_port_mirror_add()
2951 struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); in ocelot_port_reset_mqprio()
2960 struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); in ocelot_port_mqprio()
2961 struct netlink_ext_ack *extack = mqprio->extack; in ocelot_port_mqprio()
2962 struct tc_mqprio_qopt *qopt = &mqprio->qopt; in ocelot_port_mqprio()
2963 int num_tc = qopt->num_tc; in ocelot_port_mqprio()
2976 if (qopt->count[tc] != 1) { in ocelot_port_mqprio()
2979 return -EINVAL; in ocelot_port_mqprio()
2982 err = netdev_set_tc_queue(dev, tc, 1, qopt->offset[tc]); in ocelot_port_mqprio()
2991 ocelot_port_change_fp(ocelot, port, mqprio->preemptible_tcs); in ocelot_port_mqprio()
3003 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_init_port()
3005 skb_queue_head_init(&ocelot_port->tx_skbs); in ocelot_init_port()
3072 int cpu = ocelot->num_phys_ports; in ocelot_cpu_port_init()
3106 * the number of 240-byte free memory words (aka 4-cell chunks) and not in ocelot_detect_features()
3110 ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); in ocelot_detect_features()
3113 ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); in ocelot_detect_features()
3121 err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], in ocelot_mem_init_status()
3132 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); in ocelot_reset()
3136 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); in ocelot_reset()
3140 /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be in ocelot_reset()
3148 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); in ocelot_reset()
3152 return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); in ocelot_reset()
3161 if (ocelot->ops->reset) { in ocelot_init()
3162 ret = ocelot->ops->reset(ocelot); in ocelot_init()
3164 dev_err(ocelot->dev, "Switch reset failed\n"); in ocelot_init()
3169 mutex_init(&ocelot->mact_lock); in ocelot_init()
3170 mutex_init(&ocelot->fwd_domain_lock); in ocelot_init()
3171 spin_lock_init(&ocelot->ptp_clock_lock); in ocelot_init()
3172 spin_lock_init(&ocelot->ts_id_lock); in ocelot_init()
3173 spin_lock_init(&ocelot->inj_lock); in ocelot_init()
3174 spin_lock_init(&ocelot->xtr_lock); in ocelot_init()
3176 ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); in ocelot_init()
3177 if (!ocelot->owq) in ocelot_init()
3178 return -ENOMEM; in ocelot_init()
3184 INIT_LIST_HEAD(&ocelot->multicast); in ocelot_init()
3185 INIT_LIST_HEAD(&ocelot->pgids); in ocelot_init()
3186 INIT_LIST_HEAD(&ocelot->vlans); in ocelot_init()
3187 INIT_LIST_HEAD(&ocelot->lag_fdbs); in ocelot_init()
3194 if (ocelot->ops->psfp_init) in ocelot_init()
3195 ocelot->ops->psfp_init(ocelot); in ocelot_init()
3197 if (ocelot->mm_supported) { in ocelot_init()
3203 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_init()
3210 /* Only use S-Tag */ in ocelot_init()
3230 regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); in ocelot_init()
3232 /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ in ocelot_init()
3237 for (i = 0; i < ocelot->num_flooding_pgids; i++) in ocelot_init()
3248 for (port = 0; port < ocelot->num_phys_ports; port++) { in ocelot_init()
3261 u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); in ocelot_init()
3269 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3270 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3272 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3273 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), in ocelot_init()
3304 destroy_workqueue(ocelot->owq); in ocelot_init()
3312 destroy_workqueue(ocelot->owq); in ocelot_deinit()
3318 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_deinit_port()
3320 skb_queue_purge(&ocelot_port->tx_skbs); in ocelot_deinit_port()