Lines Matching +full:dp +full:- +full:bridge
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2008-2009 Marvell Semiconductor
53 * dsa_lag_map() - Map LAG structure to a linear LAG array
59 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
61 * no-ops.
67 for (id = 1; id <= dst->lags_len; id++) { in dsa_lag_map()
69 dst->lags[id - 1] = lag; in dsa_lag_map()
70 lag->id = id; in dsa_lag_map()
78 * driver can then return -EOPNOTSUPP back to DSA, which will in dsa_lag_map()
84 * dsa_lag_unmap() - Remove a LAG ID mapping
97 dst->lags[id - 1] = NULL; in dsa_lag_unmap()
98 lag->id = 0; in dsa_lag_unmap()
107 struct dsa_port *dp; in dsa_tree_lag_find() local
109 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_lag_find()
110 if (dsa_port_lag_dev_get(dp) == lag_dev) in dsa_tree_lag_find()
111 return dp->lag; in dsa_tree_lag_find()
119 struct dsa_port *dp; in dsa_tree_bridge_find() local
121 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_bridge_find()
122 if (dsa_port_bridge_dev_get(dp) == br) in dsa_tree_bridge_find()
123 return dp->bridge; in dsa_tree_bridge_find()
133 struct dsa_bridge *bridge; in dsa_bridge_num_find() local
135 bridge = dsa_tree_bridge_find(dst, bridge_dev); in dsa_bridge_num_find()
136 if (bridge) in dsa_bridge_num_find()
137 return bridge->num; in dsa_bridge_num_find()
148 * bridge numbering in dsa_bridge_num_get()
155 * offload for this bridge in dsa_bridge_num_get()
182 struct dsa_port *dp; in dsa_switch_find() local
185 if (dst->index != tree_index) in dsa_switch_find()
188 list_for_each_entry(dp, &dst->ports, list) { in dsa_switch_find()
189 if (dp->ds->index != sw_index) in dsa_switch_find()
192 return dp->ds; in dsa_switch_find()
205 if (dst->index == index) in dsa_tree_find()
219 dst->index = index; in dsa_tree_alloc()
221 INIT_LIST_HEAD(&dst->rtable); in dsa_tree_alloc()
223 INIT_LIST_HEAD(&dst->ports); in dsa_tree_alloc()
225 INIT_LIST_HEAD(&dst->list); in dsa_tree_alloc()
226 list_add_tail(&dst->list, &dsa_tree_list); in dsa_tree_alloc()
228 kref_init(&dst->refcount); in dsa_tree_alloc()
235 if (dst->tag_ops) in dsa_tree_free()
236 dsa_tag_driver_put(dst->tag_ops); in dsa_tree_free()
237 list_del(&dst->list); in dsa_tree_free()
244 kref_get(&dst->refcount); in dsa_tree_get()
272 kref_put(&dst->refcount, dsa_tree_release); in dsa_tree_put()
278 struct dsa_port *dp; in dsa_tree_find_port_by_node() local
280 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_find_port_by_node()
281 if (dp->dn == dn) in dsa_tree_find_port_by_node()
282 return dp; in dsa_tree_find_port_by_node()
287 static struct dsa_link *dsa_link_touch(struct dsa_port *dp, in dsa_link_touch() argument
290 struct dsa_switch *ds = dp->ds; in dsa_link_touch()
294 dst = ds->dst; in dsa_link_touch()
296 list_for_each_entry(dl, &dst->rtable, list) in dsa_link_touch()
297 if (dl->dp == dp && dl->link_dp == link_dp) in dsa_link_touch()
304 dl->dp = dp; in dsa_link_touch()
305 dl->link_dp = link_dp; in dsa_link_touch()
307 INIT_LIST_HEAD(&dl->list); in dsa_link_touch()
308 list_add_tail(&dl->list, &dst->rtable); in dsa_link_touch()
313 static bool dsa_port_setup_routing_table(struct dsa_port *dp) in dsa_port_setup_routing_table() argument
315 struct dsa_switch *ds = dp->ds; in dsa_port_setup_routing_table()
316 struct dsa_switch_tree *dst = ds->dst; in dsa_port_setup_routing_table()
317 struct device_node *dn = dp->dn; in dsa_port_setup_routing_table()
330 dl = dsa_link_touch(dp, link_dp); in dsa_port_setup_routing_table()
343 struct dsa_port *dp; in dsa_tree_setup_routing_table() local
345 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_routing_table()
346 if (dsa_port_is_dsa(dp)) { in dsa_tree_setup_routing_table()
347 complete = dsa_port_setup_routing_table(dp); in dsa_tree_setup_routing_table()
358 struct dsa_port *dp; in dsa_tree_find_first_cpu() local
360 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_find_first_cpu()
361 if (dsa_port_is_cpu(dp)) in dsa_tree_find_first_cpu()
362 return dp; in dsa_tree_find_first_cpu()
374 ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0); in dsa_tree_find_first_conduit()
386 struct dsa_port *cpu_dp, *dp; in dsa_tree_setup_default_cpu() local
390 pr_err("DSA: tree %d has no CPU port\n", dst->index); in dsa_tree_setup_default_cpu()
391 return -EINVAL; in dsa_tree_setup_default_cpu()
394 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_default_cpu()
395 if (dp->cpu_dp) in dsa_tree_setup_default_cpu()
398 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_setup_default_cpu()
399 dp->cpu_dp = cpu_dp; in dsa_tree_setup_default_cpu()
410 if (!ds->ops->preferred_default_local_cpu_port) in dsa_switch_preferred_default_local_cpu_port()
413 cpu_dp = ds->ops->preferred_default_local_cpu_port(ds); in dsa_switch_preferred_default_local_cpu_port()
417 if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds)) in dsa_switch_preferred_default_local_cpu_port()
430 struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp; in dsa_tree_setup_cpu_ports() local
432 list_for_each_entry(cpu_dp, &dst->ports, list) { in dsa_tree_setup_cpu_ports()
436 preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds); in dsa_tree_setup_cpu_ports()
441 dsa_switch_for_each_port(dp, cpu_dp->ds) { in dsa_tree_setup_cpu_ports()
443 if (dp->cpu_dp) in dsa_tree_setup_cpu_ports()
446 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_setup_cpu_ports()
447 dp->cpu_dp = cpu_dp; in dsa_tree_setup_cpu_ports()
456 struct dsa_port *dp; in dsa_tree_teardown_cpu_ports() local
458 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_cpu_ports()
459 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) in dsa_tree_teardown_cpu_ports()
460 dp->cpu_dp = NULL; in dsa_tree_teardown_cpu_ports()
463 static int dsa_port_setup(struct dsa_port *dp) in dsa_port_setup() argument
466 struct dsa_switch *ds = dp->ds; in dsa_port_setup()
470 if (dp->setup) in dsa_port_setup()
473 err = dsa_port_devlink_setup(dp); in dsa_port_setup()
477 switch (dp->type) { in dsa_port_setup()
479 dsa_port_disable(dp); in dsa_port_setup()
482 if (dp->dn) { in dsa_port_setup()
483 err = dsa_shared_port_link_register_of(dp); in dsa_port_setup()
488 dev_warn(ds->dev, in dsa_port_setup()
490 dp->index); in dsa_port_setup()
493 err = dsa_port_enable(dp, NULL); in dsa_port_setup()
500 if (dp->dn) { in dsa_port_setup()
501 err = dsa_shared_port_link_register_of(dp); in dsa_port_setup()
506 dev_warn(ds->dev, in dsa_port_setup()
508 dp->index); in dsa_port_setup()
511 err = dsa_port_enable(dp, NULL); in dsa_port_setup()
518 of_get_mac_address(dp->dn, dp->mac); in dsa_port_setup()
519 err = dsa_user_create(dp); in dsa_port_setup()
524 dsa_port_disable(dp); in dsa_port_setup()
526 dsa_shared_port_link_unregister_of(dp); in dsa_port_setup()
528 dsa_port_devlink_teardown(dp); in dsa_port_setup()
532 dp->setup = true; in dsa_port_setup()
537 static void dsa_port_teardown(struct dsa_port *dp) in dsa_port_teardown() argument
539 if (!dp->setup) in dsa_port_teardown()
542 switch (dp->type) { in dsa_port_teardown()
546 dsa_port_disable(dp); in dsa_port_teardown()
547 if (dp->dn) in dsa_port_teardown()
548 dsa_shared_port_link_unregister_of(dp); in dsa_port_teardown()
551 dsa_port_disable(dp); in dsa_port_teardown()
552 if (dp->dn) in dsa_port_teardown()
553 dsa_shared_port_link_unregister_of(dp); in dsa_port_teardown()
556 if (dp->user) { in dsa_port_teardown()
557 dsa_user_destroy(dp->user); in dsa_port_teardown()
558 dp->user = NULL; in dsa_port_teardown()
563 dsa_port_devlink_teardown(dp); in dsa_port_teardown()
565 dp->setup = false; in dsa_port_teardown()
568 static int dsa_port_setup_as_unused(struct dsa_port *dp) in dsa_port_setup_as_unused() argument
570 dp->type = DSA_PORT_TYPE_UNUSED; in dsa_port_setup_as_unused()
571 return dsa_port_setup(dp); in dsa_port_setup_as_unused()
576 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; in dsa_switch_setup_tag_protocol()
577 struct dsa_switch_tree *dst = ds->dst; in dsa_switch_setup_tag_protocol()
580 if (tag_ops->proto == dst->default_proto) in dsa_switch_setup_tag_protocol()
584 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); in dsa_switch_setup_tag_protocol()
587 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", in dsa_switch_setup_tag_protocol()
588 tag_ops->name, ERR_PTR(err)); in dsa_switch_setup_tag_protocol()
593 if (tag_ops->connect) { in dsa_switch_setup_tag_protocol()
594 err = tag_ops->connect(ds); in dsa_switch_setup_tag_protocol()
599 if (ds->ops->connect_tag_protocol) { in dsa_switch_setup_tag_protocol()
600 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); in dsa_switch_setup_tag_protocol()
602 dev_err(ds->dev, in dsa_switch_setup_tag_protocol()
604 tag_ops->name, ERR_PTR(err)); in dsa_switch_setup_tag_protocol()
612 if (tag_ops->disconnect) in dsa_switch_setup_tag_protocol()
613 tag_ops->disconnect(ds); in dsa_switch_setup_tag_protocol()
620 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; in dsa_switch_teardown_tag_protocol()
622 if (tag_ops->disconnect) in dsa_switch_teardown_tag_protocol()
623 tag_ops->disconnect(ds); in dsa_switch_teardown_tag_protocol()
630 if (ds->setup) in dsa_switch_setup()
633 /* Initialize ds->phys_mii_mask before registering the user MDIO bus in dsa_switch_setup()
634 * driver and before ops->setup() has run, since the switch drivers and in dsa_switch_setup()
638 ds->phys_mii_mask |= dsa_user_ports(ds); in dsa_switch_setup()
648 ds->configure_vlan_while_not_filtering = true; in dsa_switch_setup()
650 err = ds->ops->setup(ds); in dsa_switch_setup()
658 if (!ds->user_mii_bus && ds->ops->phy_read) { in dsa_switch_setup()
659 ds->user_mii_bus = mdiobus_alloc(); in dsa_switch_setup()
660 if (!ds->user_mii_bus) { in dsa_switch_setup()
661 err = -ENOMEM; in dsa_switch_setup()
667 err = mdiobus_register(ds->user_mii_bus); in dsa_switch_setup()
674 ds->setup = true; in dsa_switch_setup()
678 if (ds->user_mii_bus && ds->ops->phy_read) in dsa_switch_setup()
679 mdiobus_free(ds->user_mii_bus); in dsa_switch_setup()
681 if (ds->ops->teardown) in dsa_switch_setup()
682 ds->ops->teardown(ds); in dsa_switch_setup()
692 if (!ds->setup) in dsa_switch_teardown()
697 if (ds->user_mii_bus && ds->ops->phy_read) { in dsa_switch_teardown()
698 mdiobus_unregister(ds->user_mii_bus); in dsa_switch_teardown()
699 mdiobus_free(ds->user_mii_bus); in dsa_switch_teardown()
700 ds->user_mii_bus = NULL; in dsa_switch_teardown()
705 if (ds->ops->teardown) in dsa_switch_teardown()
706 ds->ops->teardown(ds); in dsa_switch_teardown()
712 ds->setup = false; in dsa_switch_teardown()
715 /* First tear down the non-shared, then the shared ports. This ensures that
721 struct dsa_port *dp; in dsa_tree_teardown_ports() local
723 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_ports()
724 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) in dsa_tree_teardown_ports()
725 dsa_port_teardown(dp); in dsa_tree_teardown_ports()
729 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_ports()
730 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) in dsa_tree_teardown_ports()
731 dsa_port_teardown(dp); in dsa_tree_teardown_ports()
736 struct dsa_port *dp; in dsa_tree_teardown_switches() local
738 list_for_each_entry(dp, &dst->ports, list) in dsa_tree_teardown_switches()
739 dsa_switch_teardown(dp->ds); in dsa_tree_teardown_switches()
742 /* Bring shared ports up first, then non-shared ports */
745 struct dsa_port *dp; in dsa_tree_setup_ports() local
748 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_ports()
749 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) { in dsa_tree_setup_ports()
750 err = dsa_port_setup(dp); in dsa_tree_setup_ports()
756 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_ports()
757 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { in dsa_tree_setup_ports()
758 err = dsa_port_setup(dp); in dsa_tree_setup_ports()
760 err = dsa_port_setup_as_unused(dp); in dsa_tree_setup_ports()
777 struct dsa_port *dp; in dsa_tree_setup_switches() local
780 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_switches()
781 err = dsa_switch_setup(dp->ds); in dsa_tree_setup_switches()
799 struct net_device *conduit = cpu_dp->conduit; in dsa_tree_setup_conduit()
800 bool admin_up = (conduit->flags & IFF_UP) && in dsa_tree_setup_conduit()
825 struct net_device *conduit = cpu_dp->conduit; in dsa_tree_teardown_conduit()
842 struct dsa_port *dp; in dsa_tree_setup_lags() local
844 list_for_each_entry(dp, &dst->ports, list) { in dsa_tree_setup_lags()
845 if (dp->ds->num_lag_ids > len) in dsa_tree_setup_lags()
846 len = dp->ds->num_lag_ids; in dsa_tree_setup_lags()
852 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL); in dsa_tree_setup_lags()
853 if (!dst->lags) in dsa_tree_setup_lags()
854 return -ENOMEM; in dsa_tree_setup_lags()
856 dst->lags_len = len; in dsa_tree_setup_lags()
862 kfree(dst->lags); in dsa_tree_teardown_lags()
870 if (dst->setup) { in dsa_tree_setup()
872 dst->index); in dsa_tree_setup()
873 return -EEXIST; in dsa_tree_setup()
900 dst->setup = true; in dsa_tree_setup()
902 pr_info("DSA: tree %d setup\n", dst->index); in dsa_tree_setup()
922 if (!dst->setup) in dsa_tree_teardown()
935 list_for_each_entry_safe(dl, next, &dst->rtable, list) { in dsa_tree_teardown()
936 list_del(&dl->list); in dsa_tree_teardown()
940 pr_info("DSA: tree %d torn down\n", dst->index); in dsa_tree_teardown()
942 dst->setup = false; in dsa_tree_teardown()
948 const struct dsa_device_ops *old_tag_ops = dst->tag_ops; in dsa_tree_bind_tag_proto()
952 dst->tag_ops = tag_ops; in dsa_tree_bind_tag_proto()
959 if (err && err != -EOPNOTSUPP) in dsa_tree_bind_tag_proto()
971 dst->tag_ops = old_tag_ops; in dsa_tree_bind_tag_proto()
985 struct dsa_port *dp; in dsa_tree_change_tag_proto() local
986 int err = -EBUSY; in dsa_tree_change_tag_proto()
996 dsa_tree_for_each_user_port(dp, dst) { in dsa_tree_change_tag_proto()
997 if (dsa_port_to_conduit(dp)->flags & IFF_UP) in dsa_tree_change_tag_proto()
1000 if (dp->user->flags & IFF_UP) in dsa_tree_change_tag_proto()
1030 struct dsa_port *cpu_dp = conduit->dsa_ptr; in dsa_tree_conduit_state_change()
1042 struct dsa_port *cpu_dp = conduit->dsa_ptr; in dsa_tree_conduit_admin_state_change()
1052 (up && cpu_dp->conduit_oper_up)) in dsa_tree_conduit_admin_state_change()
1055 cpu_dp->conduit_admin_up = up; in dsa_tree_conduit_admin_state_change()
1065 struct dsa_port *cpu_dp = conduit->dsa_ptr; in dsa_tree_conduit_oper_state_change()
1075 (cpu_dp->conduit_admin_up && up)) in dsa_tree_conduit_oper_state_change()
1078 cpu_dp->conduit_oper_up = up; in dsa_tree_conduit_oper_state_change()
1086 struct dsa_switch_tree *dst = ds->dst; in dsa_port_touch()
1087 struct dsa_port *dp; in dsa_port_touch() local
1089 dsa_switch_for_each_port(dp, ds) in dsa_port_touch()
1090 if (dp->index == index) in dsa_port_touch()
1091 return dp; in dsa_port_touch()
1093 dp = kzalloc(sizeof(*dp), GFP_KERNEL); in dsa_port_touch()
1094 if (!dp) in dsa_port_touch()
1097 dp->ds = ds; in dsa_port_touch()
1098 dp->index = index; in dsa_port_touch()
1100 mutex_init(&dp->addr_lists_lock); in dsa_port_touch()
1101 mutex_init(&dp->vlans_lock); in dsa_port_touch()
1102 INIT_LIST_HEAD(&dp->fdbs); in dsa_port_touch()
1103 INIT_LIST_HEAD(&dp->mdbs); in dsa_port_touch()
1104 INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */ in dsa_port_touch()
1105 INIT_LIST_HEAD(&dp->list); in dsa_port_touch()
1106 list_add_tail(&dp->list, &dst->ports); in dsa_port_touch()
1108 return dp; in dsa_port_touch()
1111 static int dsa_port_parse_user(struct dsa_port *dp, const char *name) in dsa_port_parse_user() argument
1113 dp->type = DSA_PORT_TYPE_USER; in dsa_port_parse_user()
1114 dp->name = name; in dsa_port_parse_user()
1119 static int dsa_port_parse_dsa(struct dsa_port *dp) in dsa_port_parse_dsa() argument
1121 dp->type = DSA_PORT_TYPE_DSA; in dsa_port_parse_dsa()
1126 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, in dsa_get_tag_protocol() argument
1130 struct dsa_switch *mds, *ds = dp->ds; in dsa_get_tag_protocol()
1140 mds = mdp->ds; in dsa_get_tag_protocol()
1141 mdp_upstream = dsa_upstream_port(mds, mdp->index); in dsa_get_tag_protocol()
1142 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream, in dsa_get_tag_protocol()
1149 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol); in dsa_get_tag_protocol()
1152 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit, in dsa_port_parse_cpu() argument
1156 struct dsa_switch *ds = dp->ds; in dsa_port_parse_cpu()
1157 struct dsa_switch_tree *dst = ds->dst; in dsa_port_parse_cpu()
1161 default_proto = dsa_get_tag_protocol(dp, conduit); in dsa_port_parse_cpu()
1162 if (dst->default_proto) { in dsa_port_parse_cpu()
1163 if (dst->default_proto != default_proto) { in dsa_port_parse_cpu()
1164 dev_err(ds->dev, in dsa_port_parse_cpu()
1166 return -EINVAL; in dsa_port_parse_cpu()
1169 dst->default_proto = default_proto; in dsa_port_parse_cpu()
1174 if (!ds->ops->change_tag_protocol) { in dsa_port_parse_cpu()
1175 dev_err(ds->dev, "Tag protocol cannot be modified\n"); in dsa_port_parse_cpu()
1176 return -EINVAL; in dsa_port_parse_cpu()
1181 dev_warn(ds->dev, in dsa_port_parse_cpu()
1192 if (PTR_ERR(tag_ops) == -ENOPROTOOPT) in dsa_port_parse_cpu()
1193 return -EPROBE_DEFER; in dsa_port_parse_cpu()
1195 dev_warn(ds->dev, "No tagger for this switch\n"); in dsa_port_parse_cpu()
1199 if (dst->tag_ops) { in dsa_port_parse_cpu()
1200 if (dst->tag_ops != tag_ops) { in dsa_port_parse_cpu()
1201 dev_err(ds->dev, in dsa_port_parse_cpu()
1205 return -EINVAL; in dsa_port_parse_cpu()
1209 * protocol is still reference-counted only per switch tree. in dsa_port_parse_cpu()
1213 dst->tag_ops = tag_ops; in dsa_port_parse_cpu()
1216 dp->conduit = conduit; in dsa_port_parse_cpu()
1217 dp->type = DSA_PORT_TYPE_CPU; in dsa_port_parse_cpu()
1218 dsa_port_set_tag_protocol(dp, dst->tag_ops); in dsa_port_parse_cpu()
1219 dp->dst = dst; in dsa_port_parse_cpu()
1237 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) in dsa_port_parse_of() argument
1243 dp->dn = dn; in dsa_port_parse_of()
1252 return -EPROBE_DEFER; in dsa_port_parse_of()
1254 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL); in dsa_port_parse_of()
1255 return dsa_port_parse_cpu(dp, conduit, user_protocol); in dsa_port_parse_of()
1259 return dsa_port_parse_dsa(dp); in dsa_port_parse_of()
1261 return dsa_port_parse_user(dp, name); in dsa_port_parse_of()
1268 struct dsa_port *dp; in dsa_switch_parse_ports_of() local
1274 /* The second possibility is "ethernet-ports" */ in dsa_switch_parse_ports_of()
1275 ports = of_get_child_by_name(dn, "ethernet-ports"); in dsa_switch_parse_ports_of()
1277 dev_err(ds->dev, "no ports child node found\n"); in dsa_switch_parse_ports_of()
1278 return -EINVAL; in dsa_switch_parse_ports_of()
1289 if (reg >= ds->num_ports) { in dsa_switch_parse_ports_of()
1290 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n", in dsa_switch_parse_ports_of()
1291 port, reg, ds->num_ports); in dsa_switch_parse_ports_of()
1293 err = -EINVAL; in dsa_switch_parse_ports_of()
1297 dp = dsa_to_port(ds, reg); in dsa_switch_parse_ports_of()
1299 err = dsa_port_parse_of(dp, port); in dsa_switch_parse_ports_of()
1319 if (sz < 0 && sz != -EINVAL) in dsa_switch_parse_member_of()
1322 ds->index = m[1]; in dsa_switch_parse_member_of()
1324 ds->dst = dsa_tree_touch(m[0]); in dsa_switch_parse_member_of()
1325 if (!ds->dst) in dsa_switch_parse_member_of()
1326 return -ENOMEM; in dsa_switch_parse_member_of()
1328 if (dsa_switch_find(ds->dst->index, ds->index)) { in dsa_switch_parse_member_of()
1329 dev_err(ds->dev, in dsa_switch_parse_member_of()
1331 ds->index, ds->dst->index); in dsa_switch_parse_member_of()
1332 return -EEXIST; in dsa_switch_parse_member_of()
1335 if (ds->dst->last_switch < ds->index) in dsa_switch_parse_member_of()
1336 ds->dst->last_switch = ds->index; in dsa_switch_parse_member_of()
1343 struct dsa_port *dp; in dsa_switch_touch_ports() local
1346 for (port = 0; port < ds->num_ports; port++) { in dsa_switch_touch_ports()
1347 dp = dsa_port_touch(ds, port); in dsa_switch_touch_ports()
1348 if (!dp) in dsa_switch_touch_ports()
1349 return -ENOMEM; in dsa_switch_touch_ports()
1372 if (dev->class != NULL && !strcmp(dev->class->name, class)) in dev_is_class()
1406 static int dsa_port_parse(struct dsa_port *dp, const char *name, in dsa_port_parse() argument
1414 return -EPROBE_DEFER; in dsa_port_parse()
1418 return dsa_port_parse_cpu(dp, conduit, NULL); in dsa_port_parse()
1422 return dsa_port_parse_dsa(dp); in dsa_port_parse()
1424 return dsa_port_parse_user(dp, name); in dsa_port_parse()
1431 struct dsa_port *dp; in dsa_switch_parse_ports() local
1438 name = cd->port_names[i]; in dsa_switch_parse_ports()
1439 dev = cd->netdev[i]; in dsa_switch_parse_ports()
1440 dp = dsa_to_port(ds, i); in dsa_switch_parse_ports()
1445 err = dsa_port_parse(dp, name, dev); in dsa_switch_parse_ports()
1453 return -EINVAL; in dsa_switch_parse_ports()
1462 ds->cd = cd; in dsa_switch_parse()
1467 ds->index = 0; in dsa_switch_parse()
1468 ds->dst = dsa_tree_touch(0); in dsa_switch_parse()
1469 if (!ds->dst) in dsa_switch_parse()
1470 return -ENOMEM; in dsa_switch_parse()
1481 struct dsa_port *dp, *next; in dsa_switch_release_ports() local
1483 dsa_switch_for_each_port_safe(dp, next, ds) { in dsa_switch_release_ports()
1484 WARN_ON(!list_empty(&dp->fdbs)); in dsa_switch_release_ports()
1485 WARN_ON(!list_empty(&dp->mdbs)); in dsa_switch_release_ports()
1486 WARN_ON(!list_empty(&dp->vlans)); in dsa_switch_release_ports()
1487 list_del(&dp->list); in dsa_switch_release_ports()
1488 kfree(dp); in dsa_switch_release_ports()
1499 if (!ds->dev) in dsa_switch_probe()
1500 return -ENODEV; in dsa_switch_probe()
1502 pdata = ds->dev->platform_data; in dsa_switch_probe()
1503 np = ds->dev->of_node; in dsa_switch_probe()
1505 if (!ds->num_ports) in dsa_switch_probe()
1506 return -EINVAL; in dsa_switch_probe()
1508 if (ds->phylink_mac_ops) { in dsa_switch_probe()
1509 if (ds->ops->phylink_mac_select_pcs || in dsa_switch_probe()
1510 ds->ops->phylink_mac_config || in dsa_switch_probe()
1511 ds->ops->phylink_mac_link_down || in dsa_switch_probe()
1512 ds->ops->phylink_mac_link_up) in dsa_switch_probe()
1513 return -EINVAL; in dsa_switch_probe()
1525 err = -ENODEV; in dsa_switch_probe()
1531 dst = ds->dst; in dsa_switch_probe()
1548 dsa_tree_put(ds->dst); in dsa_register_switch()
1557 struct dsa_switch_tree *dst = ds->dst; in dsa_switch_remove()
1581 struct dsa_port *dp; in dsa_switch_shutdown() local
1585 if (!ds->setup) in dsa_switch_shutdown()
1590 dsa_switch_for_each_cpu_port(dp, ds) in dsa_switch_shutdown()
1591 list_add(&dp->conduit->close_list, &close_list); in dsa_switch_shutdown()
1595 dsa_switch_for_each_user_port(dp, ds) { in dsa_switch_shutdown()
1596 conduit = dsa_port_to_conduit(dp); in dsa_switch_shutdown()
1597 user_dev = dp->user; in dsa_switch_shutdown()
1606 dsa_switch_for_each_cpu_port(dp, ds) in dsa_switch_shutdown()
1607 dp->conduit->dsa_ptr = NULL; in dsa_switch_shutdown()
1616 static bool dsa_port_is_initialized(const struct dsa_port *dp) in dsa_port_is_initialized() argument
1618 return dp->type == DSA_PORT_TYPE_USER && dp->user; in dsa_port_is_initialized()
1623 struct dsa_port *dp; in dsa_switch_suspend() local
1627 dsa_switch_for_each_port(dp, ds) { in dsa_switch_suspend()
1628 if (!dsa_port_is_initialized(dp)) in dsa_switch_suspend()
1631 ret = dsa_user_suspend(dp->user); in dsa_switch_suspend()
1636 if (ds->ops->suspend) in dsa_switch_suspend()
1637 ret = ds->ops->suspend(ds); in dsa_switch_suspend()
1645 struct dsa_port *dp; in dsa_switch_resume() local
1648 if (ds->ops->resume) in dsa_switch_resume()
1649 ret = ds->ops->resume(ds); in dsa_switch_resume()
1655 dsa_switch_for_each_port(dp, ds) { in dsa_switch_resume()
1656 if (!dsa_port_is_initialized(dp)) in dsa_switch_resume()
1659 ret = dsa_user_resume(dp->user); in dsa_switch_resume()
1672 return ERR_PTR(-ENODEV); in dsa_port_from_netdev()
1680 if (a->type != b->type) in dsa_db_equal()
1683 switch (a->type) { in dsa_db_equal()
1685 return a->dp == b->dp; in dsa_db_equal()
1687 return a->lag.dev == b->lag.dev; in dsa_db_equal()
1689 return a->bridge.num == b->bridge.num; in dsa_db_equal()
1700 struct dsa_port *dp = dsa_to_port(ds, port); in dsa_fdb_present_in_other_db() local
1703 lockdep_assert_held(&dp->addr_lists_lock); in dsa_fdb_present_in_other_db()
1705 list_for_each_entry(a, &dp->fdbs, list) { in dsa_fdb_present_in_other_db()
1706 if (!ether_addr_equal(a->addr, addr) || a->vid != vid) in dsa_fdb_present_in_other_db()
1709 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) in dsa_fdb_present_in_other_db()
1721 struct dsa_port *dp = dsa_to_port(ds, port); in dsa_mdb_present_in_other_db() local
1724 lockdep_assert_held(&dp->addr_lists_lock); in dsa_mdb_present_in_other_db()
1726 list_for_each_entry(a, &dp->mdbs, list) { in dsa_mdb_present_in_other_db()
1727 if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid) in dsa_mdb_present_in_other_db()
1730 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) in dsa_mdb_present_in_other_db()
1759 return -ENOMEM; in dsa_init_module()