1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * DPAA2 Ethernet Switch driver
4   *
5   * Copyright 2014-2016 Freescale Semiconductor Inc.
6   * Copyright 2017-2021 NXP
7   *
8   */
9  
10  #include <linux/module.h>
11  
12  #include <linux/interrupt.h>
13  #include <linux/kthread.h>
14  #include <linux/workqueue.h>
15  #include <linux/iommu.h>
16  #include <net/pkt_cls.h>
17  
18  #include <linux/fsl/mc.h>
19  
20  #include "dpaa2-switch.h"
21  
22  /* Minimal supported DPSW version */
23  #define DPSW_MIN_VER_MAJOR		8
24  #define DPSW_MIN_VER_MINOR		9
25  
26  #define DEFAULT_VLAN_ID			1
27  
dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv * port_priv)28  static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
29  {
30  	return port_priv->fdb->fdb_id;
31  }
32  
dpaa2_switch_fdb_get_unused(struct ethsw_core * ethsw)33  static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
34  {
35  	int i;
36  
37  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
38  		if (!ethsw->fdbs[i].in_use)
39  			return &ethsw->fdbs[i];
40  	return NULL;
41  }
42  
43  static struct dpaa2_switch_filter_block *
dpaa2_switch_filter_block_get_unused(struct ethsw_core * ethsw)44  dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
45  {
46  	int i;
47  
48  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
49  		if (!ethsw->filter_blocks[i].in_use)
50  			return &ethsw->filter_blocks[i];
51  	return NULL;
52  }
53  
dpaa2_switch_port_set_fdb(struct ethsw_port_priv * port_priv,struct net_device * bridge_dev)54  static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
55  				     struct net_device *bridge_dev)
56  {
57  	struct ethsw_port_priv *other_port_priv = NULL;
58  	struct dpaa2_switch_fdb *fdb;
59  	struct net_device *other_dev;
60  	struct list_head *iter;
61  
62  	/* If we leave a bridge (bridge_dev is NULL), find an unused
63  	 * FDB and use that.
64  	 */
65  	if (!bridge_dev) {
66  		fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
67  
68  		/* If there is no unused FDB, we must be the last port that
69  		 * leaves the last bridge, all the others are standalone. We
70  		 * can just keep the FDB that we already have.
71  		 */
72  
73  		if (!fdb) {
74  			port_priv->fdb->bridge_dev = NULL;
75  			return 0;
76  		}
77  
78  		port_priv->fdb = fdb;
79  		port_priv->fdb->in_use = true;
80  		port_priv->fdb->bridge_dev = NULL;
81  		return 0;
82  	}
83  
84  	/* The below call to netdev_for_each_lower_dev() demands the RTNL lock
85  	 * being held. Assert on it so that it's easier to catch new code
86  	 * paths that reach this point without the RTNL lock.
87  	 */
88  	ASSERT_RTNL();
89  
90  	/* If part of a bridge, use the FDB of the first dpaa2 switch interface
91  	 * to be present in that bridge
92  	 */
93  	netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
94  		if (!dpaa2_switch_port_dev_check(other_dev))
95  			continue;
96  
97  		if (other_dev == port_priv->netdev)
98  			continue;
99  
100  		other_port_priv = netdev_priv(other_dev);
101  		break;
102  	}
103  
104  	/* The current port is about to change its FDB to the one used by the
105  	 * first port that joined the bridge.
106  	 */
107  	if (other_port_priv) {
108  		/* The previous FDB is about to become unused, since the
109  		 * interface is no longer standalone.
110  		 */
111  		port_priv->fdb->in_use = false;
112  		port_priv->fdb->bridge_dev = NULL;
113  
114  		/* Get a reference to the new FDB */
115  		port_priv->fdb = other_port_priv->fdb;
116  	}
117  
118  	/* Keep track of the new upper bridge device */
119  	port_priv->fdb->bridge_dev = bridge_dev;
120  
121  	return 0;
122  }
123  
dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core * ethsw,u16 fdb_id,enum dpsw_flood_type type,struct dpsw_egress_flood_cfg * cfg)124  static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
125  					   enum dpsw_flood_type type,
126  					   struct dpsw_egress_flood_cfg *cfg)
127  {
128  	int i = 0, j;
129  
130  	memset(cfg, 0, sizeof(*cfg));
131  
132  	/* Add all the DPAA2 switch ports found in the same bridging domain to
133  	 * the egress flooding domain
134  	 */
135  	for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
136  		if (!ethsw->ports[j])
137  			continue;
138  		if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
139  			continue;
140  
141  		if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
142  			cfg->if_id[i++] = ethsw->ports[j]->idx;
143  		else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
144  			cfg->if_id[i++] = ethsw->ports[j]->idx;
145  	}
146  
147  	/* Add the CTRL interface to the egress flooding domain */
148  	cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
149  
150  	cfg->fdb_id = fdb_id;
151  	cfg->flood_type = type;
152  	cfg->num_ifs = i;
153  }
154  
dpaa2_switch_fdb_set_egress_flood(struct ethsw_core * ethsw,u16 fdb_id)155  static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
156  {
157  	struct dpsw_egress_flood_cfg flood_cfg;
158  	int err;
159  
160  	/* Setup broadcast flooding domain */
161  	dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
162  	err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
163  				    &flood_cfg);
164  	if (err) {
165  		dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
166  		return err;
167  	}
168  
169  	/* Setup unknown flooding domain */
170  	dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
171  	err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
172  				    &flood_cfg);
173  	if (err) {
174  		dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
175  		return err;
176  	}
177  
178  	return 0;
179  }
180  
dpaa2_iova_to_virt(struct iommu_domain * domain,dma_addr_t iova_addr)181  static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
182  				dma_addr_t iova_addr)
183  {
184  	phys_addr_t phys_addr;
185  
186  	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
187  
188  	return phys_to_virt(phys_addr);
189  }
190  
dpaa2_switch_add_vlan(struct ethsw_port_priv * port_priv,u16 vid)191  static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
192  {
193  	struct ethsw_core *ethsw = port_priv->ethsw_data;
194  	struct dpsw_vlan_cfg vcfg = {0};
195  	int err;
196  
197  	vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
198  	err = dpsw_vlan_add(ethsw->mc_io, 0,
199  			    ethsw->dpsw_handle, vid, &vcfg);
200  	if (err) {
201  		dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
202  		return err;
203  	}
204  	ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
205  
206  	return 0;
207  }
208  
dpaa2_switch_port_is_up(struct ethsw_port_priv * port_priv)209  static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
210  {
211  	struct net_device *netdev = port_priv->netdev;
212  	struct dpsw_link_state state;
213  	int err;
214  
215  	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
216  				     port_priv->ethsw_data->dpsw_handle,
217  				     port_priv->idx, &state);
218  	if (err) {
219  		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
220  		return true;
221  	}
222  
223  	WARN_ONCE(state.up > 1, "Garbage read into link_state");
224  
225  	return state.up ? true : false;
226  }
227  
dpaa2_switch_port_set_pvid(struct ethsw_port_priv * port_priv,u16 pvid)228  static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
229  {
230  	struct ethsw_core *ethsw = port_priv->ethsw_data;
231  	struct net_device *netdev = port_priv->netdev;
232  	struct dpsw_tci_cfg tci_cfg = { 0 };
233  	bool up;
234  	int err, ret;
235  
236  	err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
237  			      port_priv->idx, &tci_cfg);
238  	if (err) {
239  		netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
240  		return err;
241  	}
242  
243  	tci_cfg.vlan_id = pvid;
244  
245  	/* Interface needs to be down to change PVID */
246  	up = dpaa2_switch_port_is_up(port_priv);
247  	if (up) {
248  		err = dpsw_if_disable(ethsw->mc_io, 0,
249  				      ethsw->dpsw_handle,
250  				      port_priv->idx);
251  		if (err) {
252  			netdev_err(netdev, "dpsw_if_disable err %d\n", err);
253  			return err;
254  		}
255  	}
256  
257  	err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
258  			      port_priv->idx, &tci_cfg);
259  	if (err) {
260  		netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
261  		goto set_tci_error;
262  	}
263  
264  	/* Delete previous PVID info and mark the new one */
265  	port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
266  	port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
267  	port_priv->pvid = pvid;
268  
269  set_tci_error:
270  	if (up) {
271  		ret = dpsw_if_enable(ethsw->mc_io, 0,
272  				     ethsw->dpsw_handle,
273  				     port_priv->idx);
274  		if (ret) {
275  			netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
276  			return ret;
277  		}
278  	}
279  
280  	return err;
281  }
282  
dpaa2_switch_port_add_vlan(struct ethsw_port_priv * port_priv,u16 vid,u16 flags)283  static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
284  				      u16 vid, u16 flags)
285  {
286  	struct ethsw_core *ethsw = port_priv->ethsw_data;
287  	struct net_device *netdev = port_priv->netdev;
288  	struct dpsw_vlan_if_cfg vcfg = {0};
289  	int err;
290  
291  	if (port_priv->vlans[vid]) {
292  		netdev_err(netdev, "VLAN %d already configured\n", vid);
293  		return -EEXIST;
294  	}
295  
296  	/* If hit, this VLAN rule will lead the packet into the FDB table
297  	 * specified in the vlan configuration below
298  	 */
299  	vcfg.num_ifs = 1;
300  	vcfg.if_id[0] = port_priv->idx;
301  	vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
302  	vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
303  	err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
304  	if (err) {
305  		netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
306  		return err;
307  	}
308  
309  	port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
310  
311  	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
312  		err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
313  						ethsw->dpsw_handle,
314  						vid, &vcfg);
315  		if (err) {
316  			netdev_err(netdev,
317  				   "dpsw_vlan_add_if_untagged err %d\n", err);
318  			return err;
319  		}
320  		port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
321  	}
322  
323  	if (flags & BRIDGE_VLAN_INFO_PVID) {
324  		err = dpaa2_switch_port_set_pvid(port_priv, vid);
325  		if (err)
326  			return err;
327  	}
328  
329  	return 0;
330  }
331  
br_stp_state_to_dpsw(u8 state)332  static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
333  {
334  	switch (state) {
335  	case BR_STATE_DISABLED:
336  		return DPSW_STP_STATE_DISABLED;
337  	case BR_STATE_LISTENING:
338  		return DPSW_STP_STATE_LISTENING;
339  	case BR_STATE_LEARNING:
340  		return DPSW_STP_STATE_LEARNING;
341  	case BR_STATE_FORWARDING:
342  		return DPSW_STP_STATE_FORWARDING;
343  	case BR_STATE_BLOCKING:
344  		return DPSW_STP_STATE_BLOCKING;
345  	default:
346  		return DPSW_STP_STATE_DISABLED;
347  	}
348  }
349  
dpaa2_switch_port_set_stp_state(struct ethsw_port_priv * port_priv,u8 state)350  static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
351  {
352  	struct dpsw_stp_cfg stp_cfg = {0};
353  	int err;
354  	u16 vid;
355  
356  	if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
357  		return 0;	/* Nothing to do */
358  
359  	stp_cfg.state = br_stp_state_to_dpsw(state);
360  	for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
361  		if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
362  			stp_cfg.vlan_id = vid;
363  			err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
364  					      port_priv->ethsw_data->dpsw_handle,
365  					      port_priv->idx, &stp_cfg);
366  			if (err) {
367  				netdev_err(port_priv->netdev,
368  					   "dpsw_if_set_stp err %d\n", err);
369  				return err;
370  			}
371  		}
372  	}
373  
374  	port_priv->stp_state = state;
375  
376  	return 0;
377  }
378  
dpaa2_switch_dellink(struct ethsw_core * ethsw,u16 vid)379  static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
380  {
381  	struct ethsw_port_priv *ppriv_local = NULL;
382  	int i, err;
383  
384  	if (!ethsw->vlans[vid])
385  		return -ENOENT;
386  
387  	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
388  	if (err) {
389  		dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
390  		return err;
391  	}
392  	ethsw->vlans[vid] = 0;
393  
394  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
395  		ppriv_local = ethsw->ports[i];
396  		if (ppriv_local)
397  			ppriv_local->vlans[vid] = 0;
398  	}
399  
400  	return 0;
401  }
402  
dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)403  static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
404  					const unsigned char *addr)
405  {
406  	struct dpsw_fdb_unicast_cfg entry = {0};
407  	u16 fdb_id;
408  	int err;
409  
410  	entry.if_egress = port_priv->idx;
411  	entry.type = DPSW_FDB_ENTRY_STATIC;
412  	ether_addr_copy(entry.mac_addr, addr);
413  
414  	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
415  	err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
416  				   port_priv->ethsw_data->dpsw_handle,
417  				   fdb_id, &entry);
418  	if (err)
419  		netdev_err(port_priv->netdev,
420  			   "dpsw_fdb_add_unicast err %d\n", err);
421  	return err;
422  }
423  
dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)424  static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
425  					const unsigned char *addr)
426  {
427  	struct dpsw_fdb_unicast_cfg entry = {0};
428  	u16 fdb_id;
429  	int err;
430  
431  	entry.if_egress = port_priv->idx;
432  	entry.type = DPSW_FDB_ENTRY_STATIC;
433  	ether_addr_copy(entry.mac_addr, addr);
434  
435  	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
436  	err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
437  				      port_priv->ethsw_data->dpsw_handle,
438  				      fdb_id, &entry);
439  	/* Silently discard error for calling multiple times the del command */
440  	if (err && err != -ENXIO)
441  		netdev_err(port_priv->netdev,
442  			   "dpsw_fdb_remove_unicast err %d\n", err);
443  	return err;
444  }
445  
dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)446  static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
447  					const unsigned char *addr)
448  {
449  	struct dpsw_fdb_multicast_cfg entry = {0};
450  	u16 fdb_id;
451  	int err;
452  
453  	ether_addr_copy(entry.mac_addr, addr);
454  	entry.type = DPSW_FDB_ENTRY_STATIC;
455  	entry.num_ifs = 1;
456  	entry.if_id[0] = port_priv->idx;
457  
458  	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
459  	err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
460  				     port_priv->ethsw_data->dpsw_handle,
461  				     fdb_id, &entry);
462  	/* Silently discard error for calling multiple times the add command */
463  	if (err && err != -ENXIO)
464  		netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
465  			   err);
466  	return err;
467  }
468  
dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)469  static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
470  					const unsigned char *addr)
471  {
472  	struct dpsw_fdb_multicast_cfg entry = {0};
473  	u16 fdb_id;
474  	int err;
475  
476  	ether_addr_copy(entry.mac_addr, addr);
477  	entry.type = DPSW_FDB_ENTRY_STATIC;
478  	entry.num_ifs = 1;
479  	entry.if_id[0] = port_priv->idx;
480  
481  	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
482  	err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
483  					port_priv->ethsw_data->dpsw_handle,
484  					fdb_id, &entry);
485  	/* Silently discard error for calling multiple times the del command */
486  	if (err && err != -ENAVAIL)
487  		netdev_err(port_priv->netdev,
488  			   "dpsw_fdb_remove_multicast err %d\n", err);
489  	return err;
490  }
491  
dpaa2_switch_port_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)492  static void dpaa2_switch_port_get_stats(struct net_device *netdev,
493  					struct rtnl_link_stats64 *stats)
494  {
495  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
496  	u64 tmp;
497  	int err;
498  
499  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
500  				  port_priv->ethsw_data->dpsw_handle,
501  				  port_priv->idx,
502  				  DPSW_CNT_ING_FRAME, &stats->rx_packets);
503  	if (err)
504  		goto error;
505  
506  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
507  				  port_priv->ethsw_data->dpsw_handle,
508  				  port_priv->idx,
509  				  DPSW_CNT_EGR_FRAME, &stats->tx_packets);
510  	if (err)
511  		goto error;
512  
513  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
514  				  port_priv->ethsw_data->dpsw_handle,
515  				  port_priv->idx,
516  				  DPSW_CNT_ING_BYTE, &stats->rx_bytes);
517  	if (err)
518  		goto error;
519  
520  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
521  				  port_priv->ethsw_data->dpsw_handle,
522  				  port_priv->idx,
523  				  DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
524  	if (err)
525  		goto error;
526  
527  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
528  				  port_priv->ethsw_data->dpsw_handle,
529  				  port_priv->idx,
530  				  DPSW_CNT_ING_FRAME_DISCARD,
531  				  &stats->rx_dropped);
532  	if (err)
533  		goto error;
534  
535  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
536  				  port_priv->ethsw_data->dpsw_handle,
537  				  port_priv->idx,
538  				  DPSW_CNT_ING_FLTR_FRAME,
539  				  &tmp);
540  	if (err)
541  		goto error;
542  	stats->rx_dropped += tmp;
543  
544  	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
545  				  port_priv->ethsw_data->dpsw_handle,
546  				  port_priv->idx,
547  				  DPSW_CNT_EGR_FRAME_DISCARD,
548  				  &stats->tx_dropped);
549  	if (err)
550  		goto error;
551  
552  	return;
553  
554  error:
555  	netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
556  }
557  
dpaa2_switch_port_has_offload_stats(const struct net_device * netdev,int attr_id)558  static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
559  						int attr_id)
560  {
561  	return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
562  }
563  
dpaa2_switch_port_get_offload_stats(int attr_id,const struct net_device * netdev,void * sp)564  static int dpaa2_switch_port_get_offload_stats(int attr_id,
565  					       const struct net_device *netdev,
566  					       void *sp)
567  {
568  	switch (attr_id) {
569  	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
570  		dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
571  		return 0;
572  	}
573  
574  	return -EINVAL;
575  }
576  
dpaa2_switch_port_change_mtu(struct net_device * netdev,int mtu)577  static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
578  {
579  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
580  	int err;
581  
582  	err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
583  					   0,
584  					   port_priv->ethsw_data->dpsw_handle,
585  					   port_priv->idx,
586  					   (u16)ETHSW_L2_MAX_FRM(mtu));
587  	if (err) {
588  		netdev_err(netdev,
589  			   "dpsw_if_set_max_frame_length() err %d\n", err);
590  		return err;
591  	}
592  
593  	WRITE_ONCE(netdev->mtu, mtu);
594  	return 0;
595  }
596  
dpaa2_switch_port_link_state_update(struct net_device * netdev)597  static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
598  {
599  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
600  	struct dpsw_link_state state;
601  	int err;
602  
603  	/* When we manage the MAC/PHY using phylink there is no need
604  	 * to manually update the netif_carrier.
605  	 * We can avoid locking because we are called from the "link changed"
606  	 * IRQ handler, which is the same as the "endpoint changed" IRQ handler
607  	 * (the writer to port_priv->mac), so we cannot race with it.
608  	 */
609  	if (dpaa2_mac_is_type_phy(port_priv->mac))
610  		return 0;
611  
612  	/* Interrupts are received even though no one issued an 'ifconfig up'
613  	 * on the switch interface. Ignore these link state update interrupts
614  	 */
615  	if (!netif_running(netdev))
616  		return 0;
617  
618  	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
619  				     port_priv->ethsw_data->dpsw_handle,
620  				     port_priv->idx, &state);
621  	if (err) {
622  		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
623  		return err;
624  	}
625  
626  	WARN_ONCE(state.up > 1, "Garbage read into link_state");
627  
628  	if (state.up != port_priv->link_state) {
629  		if (state.up) {
630  			netif_carrier_on(netdev);
631  			netif_tx_start_all_queues(netdev);
632  		} else {
633  			netif_carrier_off(netdev);
634  			netif_tx_stop_all_queues(netdev);
635  		}
636  		port_priv->link_state = state.up;
637  	}
638  
639  	return 0;
640  }
641  
642  /* Manage all NAPI instances for the control interface.
643   *
644   * We only have one RX queue and one Tx Conf queue for all
645   * switch ports. Therefore, we only need to enable the NAPI instance once, the
646   * first time one of the switch ports runs .dev_open().
647   */
648  
dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core * ethsw)649  static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
650  {
651  	int i;
652  
653  	/* Access to the ethsw->napi_users relies on the RTNL lock */
654  	ASSERT_RTNL();
655  
656  	/* a new interface is using the NAPI instance */
657  	ethsw->napi_users++;
658  
659  	/* if there is already a user of the instance, return */
660  	if (ethsw->napi_users > 1)
661  		return;
662  
663  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
664  		napi_enable(&ethsw->fq[i].napi);
665  }
666  
dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core * ethsw)667  static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
668  {
669  	int i;
670  
671  	/* Access to the ethsw->napi_users relies on the RTNL lock */
672  	ASSERT_RTNL();
673  
674  	/* If we are not the last interface using the NAPI, return */
675  	ethsw->napi_users--;
676  	if (ethsw->napi_users)
677  		return;
678  
679  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
680  		napi_disable(&ethsw->fq[i].napi);
681  }
682  
dpaa2_switch_port_open(struct net_device * netdev)683  static int dpaa2_switch_port_open(struct net_device *netdev)
684  {
685  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
686  	struct ethsw_core *ethsw = port_priv->ethsw_data;
687  	int err;
688  
689  	mutex_lock(&port_priv->mac_lock);
690  
691  	if (!dpaa2_switch_port_is_type_phy(port_priv)) {
692  		/* Explicitly set carrier off, otherwise
693  		 * netif_carrier_ok() will return true and cause 'ip link show'
694  		 * to report the LOWER_UP flag, even though the link
695  		 * notification wasn't even received.
696  		 */
697  		netif_carrier_off(netdev);
698  	}
699  
700  	err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
701  			     port_priv->ethsw_data->dpsw_handle,
702  			     port_priv->idx);
703  	if (err) {
704  		mutex_unlock(&port_priv->mac_lock);
705  		netdev_err(netdev, "dpsw_if_enable err %d\n", err);
706  		return err;
707  	}
708  
709  	dpaa2_switch_enable_ctrl_if_napi(ethsw);
710  
711  	if (dpaa2_switch_port_is_type_phy(port_priv))
712  		dpaa2_mac_start(port_priv->mac);
713  
714  	mutex_unlock(&port_priv->mac_lock);
715  
716  	return 0;
717  }
718  
dpaa2_switch_port_stop(struct net_device * netdev)719  static int dpaa2_switch_port_stop(struct net_device *netdev)
720  {
721  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
722  	struct ethsw_core *ethsw = port_priv->ethsw_data;
723  	int err;
724  
725  	mutex_lock(&port_priv->mac_lock);
726  
727  	if (dpaa2_switch_port_is_type_phy(port_priv)) {
728  		dpaa2_mac_stop(port_priv->mac);
729  	} else {
730  		netif_tx_stop_all_queues(netdev);
731  		netif_carrier_off(netdev);
732  	}
733  
734  	mutex_unlock(&port_priv->mac_lock);
735  
736  	err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
737  			      port_priv->ethsw_data->dpsw_handle,
738  			      port_priv->idx);
739  	if (err) {
740  		netdev_err(netdev, "dpsw_if_disable err %d\n", err);
741  		return err;
742  	}
743  
744  	dpaa2_switch_disable_ctrl_if_napi(ethsw);
745  
746  	return 0;
747  }
748  
dpaa2_switch_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)749  static int dpaa2_switch_port_parent_id(struct net_device *dev,
750  				       struct netdev_phys_item_id *ppid)
751  {
752  	struct ethsw_port_priv *port_priv = netdev_priv(dev);
753  
754  	ppid->id_len = 1;
755  	ppid->id[0] = port_priv->ethsw_data->dev_id;
756  
757  	return 0;
758  }
759  
dpaa2_switch_port_get_phys_name(struct net_device * netdev,char * name,size_t len)760  static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
761  					   size_t len)
762  {
763  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
764  	int err;
765  
766  	err = snprintf(name, len, "p%d", port_priv->idx);
767  	if (err >= len)
768  		return -EINVAL;
769  
770  	return 0;
771  }
772  
773  struct ethsw_dump_ctx {
774  	struct net_device *dev;
775  	struct sk_buff *skb;
776  	struct netlink_callback *cb;
777  	int idx;
778  };
779  
dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry * entry,struct ethsw_dump_ctx * dump)780  static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
781  				    struct ethsw_dump_ctx *dump)
782  {
783  	int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
784  	u32 portid = NETLINK_CB(dump->cb->skb).portid;
785  	u32 seq = dump->cb->nlh->nlmsg_seq;
786  	struct nlmsghdr *nlh;
787  	struct ndmsg *ndm;
788  
789  	if (dump->idx < dump->cb->args[2])
790  		goto skip;
791  
792  	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
793  			sizeof(*ndm), NLM_F_MULTI);
794  	if (!nlh)
795  		return -EMSGSIZE;
796  
797  	ndm = nlmsg_data(nlh);
798  	ndm->ndm_family  = AF_BRIDGE;
799  	ndm->ndm_pad1    = 0;
800  	ndm->ndm_pad2    = 0;
801  	ndm->ndm_flags   = NTF_SELF;
802  	ndm->ndm_type    = 0;
803  	ndm->ndm_ifindex = dump->dev->ifindex;
804  	ndm->ndm_state   = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
805  
806  	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
807  		goto nla_put_failure;
808  
809  	nlmsg_end(dump->skb, nlh);
810  
811  skip:
812  	dump->idx++;
813  	return 0;
814  
815  nla_put_failure:
816  	nlmsg_cancel(dump->skb, nlh);
817  	return -EMSGSIZE;
818  }
819  
dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry * entry,struct ethsw_port_priv * port_priv)820  static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
821  					     struct ethsw_port_priv *port_priv)
822  {
823  	int idx = port_priv->idx;
824  	int valid;
825  
826  	if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
827  		valid = entry->if_info == port_priv->idx;
828  	else
829  		valid = entry->if_mask[idx / 8] & BIT(idx % 8);
830  
831  	return valid;
832  }
833  
dpaa2_switch_fdb_iterate(struct ethsw_port_priv * port_priv,dpaa2_switch_fdb_cb_t cb,void * data)834  static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
835  				    dpaa2_switch_fdb_cb_t cb, void *data)
836  {
837  	struct net_device *net_dev = port_priv->netdev;
838  	struct ethsw_core *ethsw = port_priv->ethsw_data;
839  	struct device *dev = net_dev->dev.parent;
840  	struct fdb_dump_entry *fdb_entries;
841  	struct fdb_dump_entry fdb_entry;
842  	dma_addr_t fdb_dump_iova;
843  	u16 num_fdb_entries;
844  	u32 fdb_dump_size;
845  	int err = 0, i;
846  	u8 *dma_mem;
847  	u16 fdb_id;
848  
849  	fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
850  	dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
851  	if (!dma_mem)
852  		return -ENOMEM;
853  
854  	fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
855  				       DMA_FROM_DEVICE);
856  	if (dma_mapping_error(dev, fdb_dump_iova)) {
857  		netdev_err(net_dev, "dma_map_single() failed\n");
858  		err = -ENOMEM;
859  		goto err_map;
860  	}
861  
862  	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
863  	err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
864  			    fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
865  	if (err) {
866  		netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
867  		goto err_dump;
868  	}
869  
870  	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
871  
872  	fdb_entries = (struct fdb_dump_entry *)dma_mem;
873  	for (i = 0; i < num_fdb_entries; i++) {
874  		fdb_entry = fdb_entries[i];
875  
876  		err = cb(port_priv, &fdb_entry, data);
877  		if (err)
878  			goto end;
879  	}
880  
881  end:
882  	kfree(dma_mem);
883  
884  	return 0;
885  
886  err_dump:
887  	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
888  err_map:
889  	kfree(dma_mem);
890  	return err;
891  }
892  
dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv * port_priv,struct fdb_dump_entry * fdb_entry,void * data)893  static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
894  				       struct fdb_dump_entry *fdb_entry,
895  				       void *data)
896  {
897  	if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
898  		return 0;
899  
900  	return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
901  }
902  
dpaa2_switch_port_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * net_dev,struct net_device * filter_dev,int * idx)903  static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
904  				      struct net_device *net_dev,
905  				      struct net_device *filter_dev, int *idx)
906  {
907  	struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
908  	struct ethsw_dump_ctx dump = {
909  		.dev = net_dev,
910  		.skb = skb,
911  		.cb = cb,
912  		.idx = *idx,
913  	};
914  	int err;
915  
916  	err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
917  	*idx = dump.idx;
918  
919  	return err;
920  }
921  
dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv * port_priv,struct fdb_dump_entry * fdb_entry,void * data __always_unused)922  static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
923  					   struct fdb_dump_entry *fdb_entry,
924  					   void *data __always_unused)
925  {
926  	if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
927  		return 0;
928  
929  	if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
930  		return 0;
931  
932  	if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
933  		dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
934  	else
935  		dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
936  
937  	return 0;
938  }
939  
dpaa2_switch_port_fast_age(struct ethsw_port_priv * port_priv)940  static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
941  {
942  	dpaa2_switch_fdb_iterate(port_priv,
943  				 dpaa2_switch_fdb_entry_fast_age, NULL);
944  }
945  
dpaa2_switch_port_vlan_add(struct net_device * netdev,__be16 proto,u16 vid)946  static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
947  				      u16 vid)
948  {
949  	struct switchdev_obj_port_vlan vlan = {
950  		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
951  		.vid = vid,
952  		.obj.orig_dev = netdev,
953  		/* This API only allows programming tagged, non-PVID VIDs */
954  		.flags = 0,
955  	};
956  
957  	return dpaa2_switch_port_vlans_add(netdev, &vlan);
958  }
959  
dpaa2_switch_port_vlan_kill(struct net_device * netdev,__be16 proto,u16 vid)960  static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
961  				       u16 vid)
962  {
963  	struct switchdev_obj_port_vlan vlan = {
964  		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
965  		.vid = vid,
966  		.obj.orig_dev = netdev,
967  		/* This API only allows programming tagged, non-PVID VIDs */
968  		.flags = 0,
969  	};
970  
971  	return dpaa2_switch_port_vlans_del(netdev, &vlan);
972  }
973  
dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv * port_priv)974  static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
975  {
976  	struct ethsw_core *ethsw = port_priv->ethsw_data;
977  	struct net_device *net_dev = port_priv->netdev;
978  	struct device *dev = net_dev->dev.parent;
979  	u8 mac_addr[ETH_ALEN];
980  	int err;
981  
982  	if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
983  		return 0;
984  
985  	/* Get firmware address, if any */
986  	err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
987  					port_priv->idx, mac_addr);
988  	if (err) {
989  		dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
990  		return err;
991  	}
992  
993  	/* First check if firmware has any address configured by bootloader */
994  	if (!is_zero_ether_addr(mac_addr)) {
995  		eth_hw_addr_set(net_dev, mac_addr);
996  	} else {
997  		/* No MAC address configured, fill in net_dev->dev_addr
998  		 * with a random one
999  		 */
1000  		eth_hw_addr_random(net_dev);
1001  		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
1002  
1003  		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
1004  		 * practical purposes, this will be our "permanent" mac address,
1005  		 * at least until the next reboot. This move will also permit
1006  		 * register_netdevice() to properly fill up net_dev->perm_addr.
1007  		 */
1008  		net_dev->addr_assign_type = NET_ADDR_PERM;
1009  	}
1010  
1011  	return 0;
1012  }
1013  
dpaa2_switch_free_fd(const struct ethsw_core * ethsw,const struct dpaa2_fd * fd)1014  static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
1015  				 const struct dpaa2_fd *fd)
1016  {
1017  	struct device *dev = ethsw->dev;
1018  	unsigned char *buffer_start;
1019  	struct sk_buff **skbh, *skb;
1020  	dma_addr_t fd_addr;
1021  
1022  	fd_addr = dpaa2_fd_get_addr(fd);
1023  	skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1024  
1025  	skb = *skbh;
1026  	buffer_start = (unsigned char *)skbh;
1027  
1028  	dma_unmap_single(dev, fd_addr,
1029  			 skb_tail_pointer(skb) - buffer_start,
1030  			 DMA_TO_DEVICE);
1031  
1032  	/* Move on with skb release */
1033  	dev_kfree_skb(skb);
1034  }
1035  
dpaa2_switch_build_single_fd(struct ethsw_core * ethsw,struct sk_buff * skb,struct dpaa2_fd * fd)1036  static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1037  					struct sk_buff *skb,
1038  					struct dpaa2_fd *fd)
1039  {
1040  	struct device *dev = ethsw->dev;
1041  	struct sk_buff **skbh;
1042  	dma_addr_t addr;
1043  	u8 *buff_start;
1044  	void *hwa;
1045  
1046  	buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1047  			       DPAA2_SWITCH_TX_BUF_ALIGN,
1048  			       DPAA2_SWITCH_TX_BUF_ALIGN);
1049  
1050  	/* Clear FAS to have consistent values for TX confirmation. It is
1051  	 * located in the first 8 bytes of the buffer's hardware annotation
1052  	 * area
1053  	 */
1054  	hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1055  	memset(hwa, 0, 8);
1056  
1057  	/* Store a backpointer to the skb at the beginning of the buffer
1058  	 * (in the private data area) such that we can release it
1059  	 * on Tx confirm
1060  	 */
1061  	skbh = (struct sk_buff **)buff_start;
1062  	*skbh = skb;
1063  
1064  	addr = dma_map_single(dev, buff_start,
1065  			      skb_tail_pointer(skb) - buff_start,
1066  			      DMA_TO_DEVICE);
1067  	if (unlikely(dma_mapping_error(dev, addr)))
1068  		return -ENOMEM;
1069  
1070  	/* Setup the FD fields */
1071  	memset(fd, 0, sizeof(*fd));
1072  
1073  	dpaa2_fd_set_addr(fd, addr);
1074  	dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1075  	dpaa2_fd_set_len(fd, skb->len);
1076  	dpaa2_fd_set_format(fd, dpaa2_fd_single);
1077  
1078  	return 0;
1079  }
1080  
dpaa2_switch_port_tx(struct sk_buff * skb,struct net_device * net_dev)1081  static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1082  					struct net_device *net_dev)
1083  {
1084  	struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1085  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1086  	int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1087  	struct dpaa2_fd fd;
1088  	int err;
1089  
1090  	if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1091  		struct sk_buff *ns;
1092  
1093  		ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1094  		if (unlikely(!ns)) {
1095  			net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1096  			goto err_free_skb;
1097  		}
1098  		dev_consume_skb_any(skb);
1099  		skb = ns;
1100  	}
1101  
1102  	/* We'll be holding a back-reference to the skb until Tx confirmation */
1103  	skb = skb_unshare(skb, GFP_ATOMIC);
1104  	if (unlikely(!skb)) {
1105  		/* skb_unshare() has already freed the skb */
1106  		net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1107  		goto err_exit;
1108  	}
1109  
1110  	/* At this stage, we do not support non-linear skbs so just try to
1111  	 * linearize the skb and if that's not working, just drop the packet.
1112  	 */
1113  	err = skb_linearize(skb);
1114  	if (err) {
1115  		net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1116  		goto err_free_skb;
1117  	}
1118  
1119  	err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1120  	if (unlikely(err)) {
1121  		net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1122  		goto err_free_skb;
1123  	}
1124  
1125  	do {
1126  		err = dpaa2_io_service_enqueue_qd(NULL,
1127  						  port_priv->tx_qdid,
1128  						  8, 0, &fd);
1129  		retries--;
1130  	} while (err == -EBUSY && retries);
1131  
1132  	if (unlikely(err < 0)) {
1133  		dpaa2_switch_free_fd(ethsw, &fd);
1134  		goto err_exit;
1135  	}
1136  
1137  	return NETDEV_TX_OK;
1138  
1139  err_free_skb:
1140  	dev_kfree_skb(skb);
1141  err_exit:
1142  	return NETDEV_TX_OK;
1143  }
1144  
1145  static int
dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block * filter_block,struct flow_cls_offload * f)1146  dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
1147  				 struct flow_cls_offload *f)
1148  {
1149  	switch (f->command) {
1150  	case FLOW_CLS_REPLACE:
1151  		return dpaa2_switch_cls_flower_replace(filter_block, f);
1152  	case FLOW_CLS_DESTROY:
1153  		return dpaa2_switch_cls_flower_destroy(filter_block, f);
1154  	default:
1155  		return -EOPNOTSUPP;
1156  	}
1157  }
1158  
1159  static int
dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * f)1160  dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
1161  				   struct tc_cls_matchall_offload *f)
1162  {
1163  	switch (f->command) {
1164  	case TC_CLSMATCHALL_REPLACE:
1165  		return dpaa2_switch_cls_matchall_replace(block, f);
1166  	case TC_CLSMATCHALL_DESTROY:
1167  		return dpaa2_switch_cls_matchall_destroy(block, f);
1168  	default:
1169  		return -EOPNOTSUPP;
1170  	}
1171  }
1172  
dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1173  static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1174  						  void *type_data,
1175  						  void *cb_priv)
1176  {
1177  	switch (type) {
1178  	case TC_SETUP_CLSFLOWER:
1179  		return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1180  	case TC_SETUP_CLSMATCHALL:
1181  		return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1182  	default:
1183  		return -EOPNOTSUPP;
1184  	}
1185  }
1186  
1187  static LIST_HEAD(dpaa2_switch_block_cb_list);
1188  
1189  static int
dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1190  dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1191  			       struct dpaa2_switch_filter_block *block)
1192  {
1193  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1194  	struct net_device *netdev = port_priv->netdev;
1195  	struct dpsw_acl_if_cfg acl_if_cfg;
1196  	int err;
1197  
1198  	if (port_priv->filter_block)
1199  		return -EINVAL;
1200  
1201  	acl_if_cfg.if_id[0] = port_priv->idx;
1202  	acl_if_cfg.num_ifs = 1;
1203  	err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1204  			      block->acl_id, &acl_if_cfg);
1205  	if (err) {
1206  		netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1207  		return err;
1208  	}
1209  
1210  	block->ports |= BIT(port_priv->idx);
1211  	port_priv->filter_block = block;
1212  
1213  	return 0;
1214  }
1215  
1216  static int
dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1217  dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1218  				 struct dpaa2_switch_filter_block *block)
1219  {
1220  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1221  	struct net_device *netdev = port_priv->netdev;
1222  	struct dpsw_acl_if_cfg acl_if_cfg;
1223  	int err;
1224  
1225  	if (port_priv->filter_block != block)
1226  		return -EINVAL;
1227  
1228  	acl_if_cfg.if_id[0] = port_priv->idx;
1229  	acl_if_cfg.num_ifs = 1;
1230  	err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1231  				 block->acl_id, &acl_if_cfg);
1232  	if (err) {
1233  		netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1234  		return err;
1235  	}
1236  
1237  	block->ports &= ~BIT(port_priv->idx);
1238  	port_priv->filter_block = NULL;
1239  	return 0;
1240  }
1241  
dpaa2_switch_port_block_bind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1242  static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1243  					struct dpaa2_switch_filter_block *block)
1244  {
1245  	struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
1246  	int err;
1247  
1248  	/* Offload all the mirror entries found in the block on this new port
1249  	 * joining it.
1250  	 */
1251  	err = dpaa2_switch_block_offload_mirror(block, port_priv);
1252  	if (err)
1253  		return err;
1254  
1255  	/* If the port is already bound to this ACL table then do nothing. This
1256  	 * can happen when this port is the first one to join a tc block
1257  	 */
1258  	if (port_priv->filter_block == block)
1259  		return 0;
1260  
1261  	err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
1262  	if (err)
1263  		return err;
1264  
1265  	/* Mark the previous ACL table as being unused if this was the last
1266  	 * port that was using it.
1267  	 */
1268  	if (old_block->ports == 0)
1269  		old_block->in_use = false;
1270  
1271  	return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
1272  }
1273  
1274  static int
dpaa2_switch_port_block_unbind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1275  dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1276  			       struct dpaa2_switch_filter_block *block)
1277  {
1278  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1279  	struct dpaa2_switch_filter_block *new_block;
1280  	int err;
1281  
1282  	/* Unoffload all the mirror entries found in the block from the
1283  	 * port leaving it.
1284  	 */
1285  	err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
1286  	if (err)
1287  		return err;
1288  
1289  	/* We are the last port that leaves a block (an ACL table).
1290  	 * We'll continue to use this table.
1291  	 */
1292  	if (block->ports == BIT(port_priv->idx))
1293  		return 0;
1294  
1295  	err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
1296  	if (err)
1297  		return err;
1298  
1299  	if (block->ports == 0)
1300  		block->in_use = false;
1301  
1302  	new_block = dpaa2_switch_filter_block_get_unused(ethsw);
1303  	new_block->in_use = true;
1304  	return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
1305  }
1306  
dpaa2_switch_setup_tc_block_bind(struct net_device * netdev,struct flow_block_offload * f)1307  static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1308  					    struct flow_block_offload *f)
1309  {
1310  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1311  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1312  	struct dpaa2_switch_filter_block *filter_block;
1313  	struct flow_block_cb *block_cb;
1314  	bool register_block = false;
1315  	int err;
1316  
1317  	block_cb = flow_block_cb_lookup(f->block,
1318  					dpaa2_switch_port_setup_tc_block_cb_ig,
1319  					ethsw);
1320  
1321  	if (!block_cb) {
1322  		/* If the filter block is not already known, then this port
1323  		 * must be the first to join it. In this case, we can just
1324  		 * continue to use our private table
1325  		 */
1326  		filter_block = port_priv->filter_block;
1327  
1328  		block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1329  					       ethsw, filter_block, NULL);
1330  		if (IS_ERR(block_cb))
1331  			return PTR_ERR(block_cb);
1332  
1333  		register_block = true;
1334  	} else {
1335  		filter_block = flow_block_cb_priv(block_cb);
1336  	}
1337  
1338  	flow_block_cb_incref(block_cb);
1339  	err = dpaa2_switch_port_block_bind(port_priv, filter_block);
1340  	if (err)
1341  		goto err_block_bind;
1342  
1343  	if (register_block) {
1344  		flow_block_cb_add(block_cb, f);
1345  		list_add_tail(&block_cb->driver_list,
1346  			      &dpaa2_switch_block_cb_list);
1347  	}
1348  
1349  	return 0;
1350  
1351  err_block_bind:
1352  	if (!flow_block_cb_decref(block_cb))
1353  		flow_block_cb_free(block_cb);
1354  	return err;
1355  }
1356  
dpaa2_switch_setup_tc_block_unbind(struct net_device * netdev,struct flow_block_offload * f)1357  static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1358  					       struct flow_block_offload *f)
1359  {
1360  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1361  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1362  	struct dpaa2_switch_filter_block *filter_block;
1363  	struct flow_block_cb *block_cb;
1364  	int err;
1365  
1366  	block_cb = flow_block_cb_lookup(f->block,
1367  					dpaa2_switch_port_setup_tc_block_cb_ig,
1368  					ethsw);
1369  	if (!block_cb)
1370  		return;
1371  
1372  	filter_block = flow_block_cb_priv(block_cb);
1373  	err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
1374  	if (!err && !flow_block_cb_decref(block_cb)) {
1375  		flow_block_cb_remove(block_cb, f);
1376  		list_del(&block_cb->driver_list);
1377  	}
1378  }
1379  
dpaa2_switch_setup_tc_block(struct net_device * netdev,struct flow_block_offload * f)1380  static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1381  				       struct flow_block_offload *f)
1382  {
1383  	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1384  		return -EOPNOTSUPP;
1385  
1386  	f->driver_block_list = &dpaa2_switch_block_cb_list;
1387  
1388  	switch (f->command) {
1389  	case FLOW_BLOCK_BIND:
1390  		return dpaa2_switch_setup_tc_block_bind(netdev, f);
1391  	case FLOW_BLOCK_UNBIND:
1392  		dpaa2_switch_setup_tc_block_unbind(netdev, f);
1393  		return 0;
1394  	default:
1395  		return -EOPNOTSUPP;
1396  	}
1397  }
1398  
dpaa2_switch_port_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)1399  static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1400  				      enum tc_setup_type type,
1401  				      void *type_data)
1402  {
1403  	switch (type) {
1404  	case TC_SETUP_BLOCK: {
1405  		return dpaa2_switch_setup_tc_block(netdev, type_data);
1406  	}
1407  	default:
1408  		return -EOPNOTSUPP;
1409  	}
1410  
1411  	return 0;
1412  }
1413  
1414  static const struct net_device_ops dpaa2_switch_port_ops = {
1415  	.ndo_open		= dpaa2_switch_port_open,
1416  	.ndo_stop		= dpaa2_switch_port_stop,
1417  
1418  	.ndo_set_mac_address	= eth_mac_addr,
1419  	.ndo_get_stats64	= dpaa2_switch_port_get_stats,
1420  	.ndo_change_mtu		= dpaa2_switch_port_change_mtu,
1421  	.ndo_has_offload_stats	= dpaa2_switch_port_has_offload_stats,
1422  	.ndo_get_offload_stats	= dpaa2_switch_port_get_offload_stats,
1423  	.ndo_fdb_dump		= dpaa2_switch_port_fdb_dump,
1424  	.ndo_vlan_rx_add_vid	= dpaa2_switch_port_vlan_add,
1425  	.ndo_vlan_rx_kill_vid	= dpaa2_switch_port_vlan_kill,
1426  
1427  	.ndo_start_xmit		= dpaa2_switch_port_tx,
1428  	.ndo_get_port_parent_id	= dpaa2_switch_port_parent_id,
1429  	.ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1430  	.ndo_setup_tc		= dpaa2_switch_port_setup_tc,
1431  };
1432  
dpaa2_switch_port_dev_check(const struct net_device * netdev)1433  bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1434  {
1435  	return netdev->netdev_ops == &dpaa2_switch_port_ops;
1436  }
1437  
dpaa2_switch_port_connect_mac(struct ethsw_port_priv * port_priv)1438  static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
1439  {
1440  	struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
1441  	struct dpaa2_mac *mac;
1442  	int err;
1443  
1444  	dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
1445  	dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
1446  
1447  	if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
1448  		return PTR_ERR(dpmac_dev);
1449  
1450  	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
1451  		return 0;
1452  
1453  	mac = kzalloc(sizeof(*mac), GFP_KERNEL);
1454  	if (!mac)
1455  		return -ENOMEM;
1456  
1457  	mac->mc_dev = dpmac_dev;
1458  	mac->mc_io = port_priv->ethsw_data->mc_io;
1459  	mac->net_dev = port_priv->netdev;
1460  
1461  	err = dpaa2_mac_open(mac);
1462  	if (err)
1463  		goto err_free_mac;
1464  
1465  	if (dpaa2_mac_is_type_phy(mac)) {
1466  		err = dpaa2_mac_connect(mac);
1467  		if (err) {
1468  			netdev_err(port_priv->netdev,
1469  				   "Error connecting to the MAC endpoint %pe\n",
1470  				   ERR_PTR(err));
1471  			goto err_close_mac;
1472  		}
1473  	}
1474  
1475  	mutex_lock(&port_priv->mac_lock);
1476  	port_priv->mac = mac;
1477  	mutex_unlock(&port_priv->mac_lock);
1478  
1479  	return 0;
1480  
1481  err_close_mac:
1482  	dpaa2_mac_close(mac);
1483  err_free_mac:
1484  	kfree(mac);
1485  	return err;
1486  }
1487  
dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv * port_priv)1488  static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
1489  {
1490  	struct dpaa2_mac *mac;
1491  
1492  	mutex_lock(&port_priv->mac_lock);
1493  	mac = port_priv->mac;
1494  	port_priv->mac = NULL;
1495  	mutex_unlock(&port_priv->mac_lock);
1496  
1497  	if (!mac)
1498  		return;
1499  
1500  	if (dpaa2_mac_is_type_phy(mac))
1501  		dpaa2_mac_disconnect(mac);
1502  
1503  	dpaa2_mac_close(mac);
1504  	kfree(mac);
1505  }
1506  
dpaa2_switch_irq0_handler_thread(int irq_num,void * arg)1507  static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1508  {
1509  	struct device *dev = (struct device *)arg;
1510  	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1511  	struct ethsw_port_priv *port_priv;
1512  	int err, if_id;
1513  	bool had_mac;
1514  	u32 status;
1515  
1516  	err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1517  				  DPSW_IRQ_INDEX_IF, &status);
1518  	if (err) {
1519  		dev_err(dev, "Can't get irq status (err %d)\n", err);
1520  		goto out;
1521  	}
1522  
1523  	if_id = (status & 0xFFFF0000) >> 16;
1524  	port_priv = ethsw->ports[if_id];
1525  
1526  	if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
1527  		dpaa2_switch_port_link_state_update(port_priv->netdev);
1528  
1529  	if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
1530  		dpaa2_switch_port_set_mac_addr(port_priv);
1531  		/* We can avoid locking because the "endpoint changed" IRQ
1532  		 * handler is the only one who changes priv->mac at runtime,
1533  		 * so we are not racing with anyone.
1534  		 */
1535  		had_mac = !!port_priv->mac;
1536  		if (had_mac)
1537  			dpaa2_switch_port_disconnect_mac(port_priv);
1538  		else
1539  			dpaa2_switch_port_connect_mac(port_priv);
1540  	}
1541  
1542  	err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1543  				    DPSW_IRQ_INDEX_IF, status);
1544  	if (err)
1545  		dev_err(dev, "Can't clear irq status (err %d)\n", err);
1546  
1547  out:
1548  	return IRQ_HANDLED;
1549  }
1550  
dpaa2_switch_setup_irqs(struct fsl_mc_device * sw_dev)1551  static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1552  {
1553  	u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED | DPSW_IRQ_EVENT_ENDPOINT_CHANGED;
1554  	struct device *dev = &sw_dev->dev;
1555  	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1556  	struct fsl_mc_device_irq *irq;
1557  	int err;
1558  
1559  	err = fsl_mc_allocate_irqs(sw_dev);
1560  	if (err) {
1561  		dev_err(dev, "MC irqs allocation failed\n");
1562  		return err;
1563  	}
1564  
1565  	if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1566  		err = -EINVAL;
1567  		goto free_irq;
1568  	}
1569  
1570  	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1571  				  DPSW_IRQ_INDEX_IF, 0);
1572  	if (err) {
1573  		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1574  		goto free_irq;
1575  	}
1576  
1577  	irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1578  
1579  	err = devm_request_threaded_irq(dev, irq->virq, NULL,
1580  					dpaa2_switch_irq0_handler_thread,
1581  					IRQF_NO_SUSPEND | IRQF_ONESHOT,
1582  					dev_name(dev), dev);
1583  	if (err) {
1584  		dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1585  		goto free_irq;
1586  	}
1587  
1588  	err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1589  				DPSW_IRQ_INDEX_IF, mask);
1590  	if (err) {
1591  		dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1592  		goto free_devm_irq;
1593  	}
1594  
1595  	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1596  				  DPSW_IRQ_INDEX_IF, 1);
1597  	if (err) {
1598  		dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1599  		goto free_devm_irq;
1600  	}
1601  
1602  	return 0;
1603  
1604  free_devm_irq:
1605  	devm_free_irq(dev, irq->virq, dev);
1606  free_irq:
1607  	fsl_mc_free_irqs(sw_dev);
1608  	return err;
1609  }
1610  
dpaa2_switch_teardown_irqs(struct fsl_mc_device * sw_dev)1611  static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1612  {
1613  	struct device *dev = &sw_dev->dev;
1614  	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1615  	int err;
1616  
1617  	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1618  				  DPSW_IRQ_INDEX_IF, 0);
1619  	if (err)
1620  		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1621  
1622  	fsl_mc_free_irqs(sw_dev);
1623  }
1624  
dpaa2_switch_port_set_learning(struct ethsw_port_priv * port_priv,bool enable)1625  static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1626  {
1627  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1628  	enum dpsw_learning_mode learn_mode;
1629  	int err;
1630  
1631  	if (enable)
1632  		learn_mode = DPSW_LEARNING_MODE_HW;
1633  	else
1634  		learn_mode = DPSW_LEARNING_MODE_DIS;
1635  
1636  	err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1637  					port_priv->idx, learn_mode);
1638  	if (err)
1639  		netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1640  
1641  	if (!enable)
1642  		dpaa2_switch_port_fast_age(port_priv);
1643  
1644  	return err;
1645  }
1646  
dpaa2_switch_port_attr_stp_state_set(struct net_device * netdev,u8 state)1647  static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1648  						u8 state)
1649  {
1650  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1651  	int err;
1652  
1653  	err = dpaa2_switch_port_set_stp_state(port_priv, state);
1654  	if (err)
1655  		return err;
1656  
1657  	switch (state) {
1658  	case BR_STATE_DISABLED:
1659  	case BR_STATE_BLOCKING:
1660  	case BR_STATE_LISTENING:
1661  		err = dpaa2_switch_port_set_learning(port_priv, false);
1662  		break;
1663  	case BR_STATE_LEARNING:
1664  	case BR_STATE_FORWARDING:
1665  		err = dpaa2_switch_port_set_learning(port_priv,
1666  						     port_priv->learn_ena);
1667  		break;
1668  	}
1669  
1670  	return err;
1671  }
1672  
dpaa2_switch_port_flood(struct ethsw_port_priv * port_priv,struct switchdev_brport_flags flags)1673  static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1674  				   struct switchdev_brport_flags flags)
1675  {
1676  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1677  
1678  	if (flags.mask & BR_BCAST_FLOOD)
1679  		port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1680  
1681  	if (flags.mask & BR_FLOOD)
1682  		port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1683  
1684  	return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1685  }
1686  
dpaa2_switch_port_pre_bridge_flags(struct net_device * netdev,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1687  static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1688  					      struct switchdev_brport_flags flags,
1689  					      struct netlink_ext_ack *extack)
1690  {
1691  	if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1692  			   BR_MCAST_FLOOD))
1693  		return -EINVAL;
1694  
1695  	if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1696  		bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1697  		bool unicast = !!(flags.val & BR_FLOOD);
1698  
1699  		if (unicast != multicast) {
1700  			NL_SET_ERR_MSG_MOD(extack,
1701  					   "Cannot configure multicast flooding independently of unicast");
1702  			return -EINVAL;
1703  		}
1704  	}
1705  
1706  	return 0;
1707  }
1708  
dpaa2_switch_port_bridge_flags(struct net_device * netdev,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1709  static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1710  					  struct switchdev_brport_flags flags,
1711  					  struct netlink_ext_ack *extack)
1712  {
1713  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1714  	int err;
1715  
1716  	if (flags.mask & BR_LEARNING) {
1717  		bool learn_ena = !!(flags.val & BR_LEARNING);
1718  
1719  		err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1720  		if (err)
1721  			return err;
1722  		port_priv->learn_ena = learn_ena;
1723  	}
1724  
1725  	if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1726  		err = dpaa2_switch_port_flood(port_priv, flags);
1727  		if (err)
1728  			return err;
1729  	}
1730  
1731  	return 0;
1732  }
1733  
dpaa2_switch_port_attr_set(struct net_device * netdev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)1734  static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1735  				      const struct switchdev_attr *attr,
1736  				      struct netlink_ext_ack *extack)
1737  {
1738  	int err = 0;
1739  
1740  	switch (attr->id) {
1741  	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1742  		err = dpaa2_switch_port_attr_stp_state_set(netdev,
1743  							   attr->u.stp_state);
1744  		break;
1745  	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1746  		if (!attr->u.vlan_filtering) {
1747  			NL_SET_ERR_MSG_MOD(extack,
1748  					   "The DPAA2 switch does not support VLAN-unaware operation");
1749  			return -EOPNOTSUPP;
1750  		}
1751  		break;
1752  	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1753  		err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1754  		break;
1755  	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1756  		err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1757  		break;
1758  	default:
1759  		err = -EOPNOTSUPP;
1760  		break;
1761  	}
1762  
1763  	return err;
1764  }
1765  
dpaa2_switch_port_vlans_add(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)1766  int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1767  				const struct switchdev_obj_port_vlan *vlan)
1768  {
1769  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1770  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1771  	struct dpsw_attr *attr = &ethsw->sw_attr;
1772  	int err = 0;
1773  
1774  	/* Make sure that the VLAN is not already configured
1775  	 * on the switch port
1776  	 */
1777  	if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) {
1778  		netdev_err(netdev, "VLAN %d already configured\n", vlan->vid);
1779  		return -EEXIST;
1780  	}
1781  
1782  	/* Check if there is space for a new VLAN */
1783  	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1784  				  &ethsw->sw_attr);
1785  	if (err) {
1786  		netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1787  		return err;
1788  	}
1789  	if (attr->max_vlans - attr->num_vlans < 1)
1790  		return -ENOSPC;
1791  
1792  	/* Check if there is space for a new VLAN */
1793  	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1794  				  &ethsw->sw_attr);
1795  	if (err) {
1796  		netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1797  		return err;
1798  	}
1799  	if (attr->max_vlans - attr->num_vlans < 1)
1800  		return -ENOSPC;
1801  
1802  	if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1803  		/* this is a new VLAN */
1804  		err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1805  		if (err)
1806  			return err;
1807  
1808  		port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1809  	}
1810  
1811  	return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1812  }
1813  
dpaa2_switch_port_lookup_address(struct net_device * netdev,int is_uc,const unsigned char * addr)1814  static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1815  					    const unsigned char *addr)
1816  {
1817  	struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1818  	struct netdev_hw_addr *ha;
1819  
1820  	netif_addr_lock_bh(netdev);
1821  	list_for_each_entry(ha, &list->list, list) {
1822  		if (ether_addr_equal(ha->addr, addr)) {
1823  			netif_addr_unlock_bh(netdev);
1824  			return 1;
1825  		}
1826  	}
1827  	netif_addr_unlock_bh(netdev);
1828  	return 0;
1829  }
1830  
dpaa2_switch_port_mdb_add(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)1831  static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1832  				     const struct switchdev_obj_port_mdb *mdb)
1833  {
1834  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1835  	int err;
1836  
1837  	/* Check if address is already set on this port */
1838  	if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1839  		return -EEXIST;
1840  
1841  	err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1842  	if (err)
1843  		return err;
1844  
1845  	err = dev_mc_add(netdev, mdb->addr);
1846  	if (err) {
1847  		netdev_err(netdev, "dev_mc_add err %d\n", err);
1848  		dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1849  	}
1850  
1851  	return err;
1852  }
1853  
dpaa2_switch_port_obj_add(struct net_device * netdev,const struct switchdev_obj * obj)1854  static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1855  				     const struct switchdev_obj *obj)
1856  {
1857  	int err;
1858  
1859  	switch (obj->id) {
1860  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1861  		err = dpaa2_switch_port_vlans_add(netdev,
1862  						  SWITCHDEV_OBJ_PORT_VLAN(obj));
1863  		break;
1864  	case SWITCHDEV_OBJ_ID_PORT_MDB:
1865  		err = dpaa2_switch_port_mdb_add(netdev,
1866  						SWITCHDEV_OBJ_PORT_MDB(obj));
1867  		break;
1868  	default:
1869  		err = -EOPNOTSUPP;
1870  		break;
1871  	}
1872  
1873  	return err;
1874  }
1875  
dpaa2_switch_port_del_vlan(struct ethsw_port_priv * port_priv,u16 vid)1876  static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1877  {
1878  	struct ethsw_core *ethsw = port_priv->ethsw_data;
1879  	struct net_device *netdev = port_priv->netdev;
1880  	struct dpsw_vlan_if_cfg vcfg;
1881  	int i, err;
1882  
1883  	if (!port_priv->vlans[vid])
1884  		return -ENOENT;
1885  
1886  	if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1887  		/* If we are deleting the PVID of a port, use VLAN 4095 instead
1888  		 * as we are sure that neither the bridge nor the 8021q module
1889  		 * will use it
1890  		 */
1891  		err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1892  		if (err)
1893  			return err;
1894  	}
1895  
1896  	vcfg.num_ifs = 1;
1897  	vcfg.if_id[0] = port_priv->idx;
1898  	if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1899  		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1900  						   ethsw->dpsw_handle,
1901  						   vid, &vcfg);
1902  		if (err) {
1903  			netdev_err(netdev,
1904  				   "dpsw_vlan_remove_if_untagged err %d\n",
1905  				   err);
1906  		}
1907  		port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1908  	}
1909  
1910  	if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1911  		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1912  					  vid, &vcfg);
1913  		if (err) {
1914  			netdev_err(netdev,
1915  				   "dpsw_vlan_remove_if err %d\n", err);
1916  			return err;
1917  		}
1918  		port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1919  
1920  		/* Delete VLAN from switch if it is no longer configured on
1921  		 * any port
1922  		 */
1923  		for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1924  			if (ethsw->ports[i] &&
1925  			    ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1926  				return 0; /* Found a port member in VID */
1927  		}
1928  
1929  		ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1930  
1931  		err = dpaa2_switch_dellink(ethsw, vid);
1932  		if (err)
1933  			return err;
1934  	}
1935  
1936  	return 0;
1937  }
1938  
dpaa2_switch_port_vlans_del(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)1939  int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1940  				const struct switchdev_obj_port_vlan *vlan)
1941  {
1942  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1943  
1944  	if (netif_is_bridge_master(vlan->obj.orig_dev))
1945  		return -EOPNOTSUPP;
1946  
1947  	return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1948  }
1949  
dpaa2_switch_port_mdb_del(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)1950  static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1951  				     const struct switchdev_obj_port_mdb *mdb)
1952  {
1953  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1954  	int err;
1955  
1956  	if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1957  		return -ENOENT;
1958  
1959  	err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1960  	if (err)
1961  		return err;
1962  
1963  	err = dev_mc_del(netdev, mdb->addr);
1964  	if (err) {
1965  		netdev_err(netdev, "dev_mc_del err %d\n", err);
1966  		return err;
1967  	}
1968  
1969  	return err;
1970  }
1971  
dpaa2_switch_port_obj_del(struct net_device * netdev,const struct switchdev_obj * obj)1972  static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1973  				     const struct switchdev_obj *obj)
1974  {
1975  	int err;
1976  
1977  	switch (obj->id) {
1978  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1979  		err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1980  		break;
1981  	case SWITCHDEV_OBJ_ID_PORT_MDB:
1982  		err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1983  		break;
1984  	default:
1985  		err = -EOPNOTSUPP;
1986  		break;
1987  	}
1988  	return err;
1989  }
1990  
dpaa2_switch_port_attr_set_event(struct net_device * netdev,struct switchdev_notifier_port_attr_info * ptr)1991  static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1992  					    struct switchdev_notifier_port_attr_info *ptr)
1993  {
1994  	int err;
1995  
1996  	err = switchdev_handle_port_attr_set(netdev, ptr,
1997  					     dpaa2_switch_port_dev_check,
1998  					     dpaa2_switch_port_attr_set);
1999  	return notifier_from_errno(err);
2000  }
2001  
dpaa2_switch_port_bridge_join(struct net_device * netdev,struct net_device * upper_dev,struct netlink_ext_ack * extack)2002  static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
2003  					 struct net_device *upper_dev,
2004  					 struct netlink_ext_ack *extack)
2005  {
2006  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2007  	struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2008  	struct ethsw_core *ethsw = port_priv->ethsw_data;
2009  	bool learn_ena;
2010  	int err;
2011  
2012  	/* Delete the previously manually installed VLAN 1 */
2013  	err = dpaa2_switch_port_del_vlan(port_priv, 1);
2014  	if (err)
2015  		return err;
2016  
2017  	dpaa2_switch_port_set_fdb(port_priv, upper_dev);
2018  
2019  	/* Inherit the initial bridge port learning state */
2020  	learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
2021  	err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
2022  	port_priv->learn_ena = learn_ena;
2023  
2024  	/* Setup the egress flood policy (broadcast, unknown unicast) */
2025  	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2026  	if (err)
2027  		goto err_egress_flood;
2028  
2029  	/* Recreate the egress flood domain of the FDB that we just left. */
2030  	err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2031  	if (err)
2032  		goto err_egress_flood;
2033  
2034  	err = switchdev_bridge_port_offload(netdev, netdev, NULL,
2035  					    NULL, NULL, false, extack);
2036  	if (err)
2037  		goto err_switchdev_offload;
2038  
2039  	return 0;
2040  
2041  err_switchdev_offload:
2042  err_egress_flood:
2043  	dpaa2_switch_port_set_fdb(port_priv, NULL);
2044  	return err;
2045  }
2046  
dpaa2_switch_port_clear_rxvlan(struct net_device * vdev,int vid,void * arg)2047  static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
2048  {
2049  	__be16 vlan_proto = htons(ETH_P_8021Q);
2050  
2051  	if (vdev)
2052  		vlan_proto = vlan_dev_vlan_proto(vdev);
2053  
2054  	return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
2055  }
2056  
dpaa2_switch_port_restore_rxvlan(struct net_device * vdev,int vid,void * arg)2057  static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
2058  {
2059  	__be16 vlan_proto = htons(ETH_P_8021Q);
2060  
2061  	if (vdev)
2062  		vlan_proto = vlan_dev_vlan_proto(vdev);
2063  
2064  	return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
2065  }
2066  
dpaa2_switch_port_pre_bridge_leave(struct net_device * netdev)2067  static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
2068  {
2069  	switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
2070  }
2071  
dpaa2_switch_port_bridge_leave(struct net_device * netdev)2072  static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
2073  {
2074  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2075  	struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2076  	struct ethsw_core *ethsw = port_priv->ethsw_data;
2077  	int err;
2078  
2079  	/* First of all, fast age any learn FDB addresses on this switch port */
2080  	dpaa2_switch_port_fast_age(port_priv);
2081  
2082  	/* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
2083  	 * upper devices or otherwise from the FDB table that we are about to
2084  	 * leave
2085  	 */
2086  	err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
2087  	if (err)
2088  		netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
2089  
2090  	dpaa2_switch_port_set_fdb(port_priv, NULL);
2091  
2092  	/* Restore all RX VLANs into the new FDB table that we just joined */
2093  	err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
2094  	if (err)
2095  		netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
2096  
2097  	/* Reset the flooding state to denote that this port can send any
2098  	 * packet in standalone mode. With this, we are also ensuring that any
2099  	 * later bridge join will have the flooding flag on.
2100  	 */
2101  	port_priv->bcast_flood = true;
2102  	port_priv->ucast_flood = true;
2103  
2104  	/* Setup the egress flood policy (broadcast, unknown unicast).
2105  	 * When the port is not under a bridge, only the CTRL interface is part
2106  	 * of the flooding domain besides the actual port
2107  	 */
2108  	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2109  	if (err)
2110  		return err;
2111  
2112  	/* Recreate the egress flood domain of the FDB that we just left */
2113  	err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2114  	if (err)
2115  		return err;
2116  
2117  	/* No HW learning when not under a bridge */
2118  	err = dpaa2_switch_port_set_learning(port_priv, false);
2119  	if (err)
2120  		return err;
2121  	port_priv->learn_ena = false;
2122  
2123  	/* Add the VLAN 1 as PVID when not under a bridge. We need this since
2124  	 * the dpaa2 switch interfaces are not capable to be VLAN unaware
2125  	 */
2126  	return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2127  					  BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2128  }
2129  
dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device * netdev)2130  static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2131  {
2132  	struct net_device *upper_dev;
2133  	struct list_head *iter;
2134  
2135  	/* RCU read lock not necessary because we have write-side protection
2136  	 * (rtnl_mutex), however a non-rcu iterator does not exist.
2137  	 */
2138  	netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2139  		if (is_vlan_dev(upper_dev))
2140  			return -EOPNOTSUPP;
2141  
2142  	return 0;
2143  }
2144  
2145  static int
dpaa2_switch_prechangeupper_sanity_checks(struct net_device * netdev,struct net_device * upper_dev,struct netlink_ext_ack * extack)2146  dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
2147  					  struct net_device *upper_dev,
2148  					  struct netlink_ext_ack *extack)
2149  {
2150  	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2151  	struct ethsw_port_priv *other_port_priv;
2152  	struct net_device *other_dev;
2153  	struct list_head *iter;
2154  	int err;
2155  
2156  	if (!br_vlan_enabled(upper_dev)) {
2157  		NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2158  		return -EOPNOTSUPP;
2159  	}
2160  
2161  	err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2162  	if (err) {
2163  		NL_SET_ERR_MSG_MOD(extack,
2164  				   "Cannot join a bridge while VLAN uppers are present");
2165  		return 0;
2166  	}
2167  
2168  	netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
2169  		if (!dpaa2_switch_port_dev_check(other_dev))
2170  			continue;
2171  
2172  		other_port_priv = netdev_priv(other_dev);
2173  		if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
2174  			NL_SET_ERR_MSG_MOD(extack,
2175  					   "Interface from a different DPSW is in the bridge already");
2176  			return -EINVAL;
2177  		}
2178  	}
2179  
2180  	return 0;
2181  }
2182  
dpaa2_switch_port_prechangeupper(struct net_device * netdev,struct netdev_notifier_changeupper_info * info)2183  static int dpaa2_switch_port_prechangeupper(struct net_device *netdev,
2184  					    struct netdev_notifier_changeupper_info *info)
2185  {
2186  	struct netlink_ext_ack *extack;
2187  	struct net_device *upper_dev;
2188  	int err;
2189  
2190  	if (!dpaa2_switch_port_dev_check(netdev))
2191  		return 0;
2192  
2193  	extack = netdev_notifier_info_to_extack(&info->info);
2194  	upper_dev = info->upper_dev;
2195  	if (netif_is_bridge_master(upper_dev)) {
2196  		err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
2197  								upper_dev,
2198  								extack);
2199  		if (err)
2200  			return err;
2201  
2202  		if (!info->linking)
2203  			dpaa2_switch_port_pre_bridge_leave(netdev);
2204  	}
2205  
2206  	return 0;
2207  }
2208  
dpaa2_switch_port_changeupper(struct net_device * netdev,struct netdev_notifier_changeupper_info * info)2209  static int dpaa2_switch_port_changeupper(struct net_device *netdev,
2210  					 struct netdev_notifier_changeupper_info *info)
2211  {
2212  	struct netlink_ext_ack *extack;
2213  	struct net_device *upper_dev;
2214  
2215  	if (!dpaa2_switch_port_dev_check(netdev))
2216  		return 0;
2217  
2218  	extack = netdev_notifier_info_to_extack(&info->info);
2219  
2220  	upper_dev = info->upper_dev;
2221  	if (netif_is_bridge_master(upper_dev)) {
2222  		if (info->linking)
2223  			return dpaa2_switch_port_bridge_join(netdev,
2224  							     upper_dev,
2225  							     extack);
2226  		else
2227  			return dpaa2_switch_port_bridge_leave(netdev);
2228  	}
2229  
2230  	return 0;
2231  }
2232  
dpaa2_switch_port_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)2233  static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2234  					     unsigned long event, void *ptr)
2235  {
2236  	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2237  	int err = 0;
2238  
2239  	switch (event) {
2240  	case NETDEV_PRECHANGEUPPER:
2241  		err = dpaa2_switch_port_prechangeupper(netdev, ptr);
2242  		if (err)
2243  			return notifier_from_errno(err);
2244  
2245  		break;
2246  	case NETDEV_CHANGEUPPER:
2247  		err = dpaa2_switch_port_changeupper(netdev, ptr);
2248  		if (err)
2249  			return notifier_from_errno(err);
2250  
2251  		break;
2252  	}
2253  
2254  	return NOTIFY_DONE;
2255  }
2256  
2257  struct ethsw_switchdev_event_work {
2258  	struct work_struct work;
2259  	struct switchdev_notifier_fdb_info fdb_info;
2260  	struct net_device *dev;
2261  	unsigned long event;
2262  };
2263  
dpaa2_switch_event_work(struct work_struct * work)2264  static void dpaa2_switch_event_work(struct work_struct *work)
2265  {
2266  	struct ethsw_switchdev_event_work *switchdev_work =
2267  		container_of(work, struct ethsw_switchdev_event_work, work);
2268  	struct net_device *dev = switchdev_work->dev;
2269  	struct switchdev_notifier_fdb_info *fdb_info;
2270  	int err;
2271  
2272  	rtnl_lock();
2273  	fdb_info = &switchdev_work->fdb_info;
2274  
2275  	switch (switchdev_work->event) {
2276  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2277  		if (!fdb_info->added_by_user || fdb_info->is_local)
2278  			break;
2279  		if (is_unicast_ether_addr(fdb_info->addr))
2280  			err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2281  							   fdb_info->addr);
2282  		else
2283  			err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2284  							   fdb_info->addr);
2285  		if (err)
2286  			break;
2287  		fdb_info->offloaded = true;
2288  		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2289  					 &fdb_info->info, NULL);
2290  		break;
2291  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2292  		if (!fdb_info->added_by_user || fdb_info->is_local)
2293  			break;
2294  		if (is_unicast_ether_addr(fdb_info->addr))
2295  			dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2296  		else
2297  			dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2298  		break;
2299  	}
2300  
2301  	rtnl_unlock();
2302  	kfree(switchdev_work->fdb_info.addr);
2303  	kfree(switchdev_work);
2304  	dev_put(dev);
2305  }
2306  
2307  /* Called under rcu_read_lock() */
dpaa2_switch_port_event(struct notifier_block * nb,unsigned long event,void * ptr)2308  static int dpaa2_switch_port_event(struct notifier_block *nb,
2309  				   unsigned long event, void *ptr)
2310  {
2311  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2312  	struct ethsw_port_priv *port_priv = netdev_priv(dev);
2313  	struct ethsw_switchdev_event_work *switchdev_work;
2314  	struct switchdev_notifier_fdb_info *fdb_info = ptr;
2315  	struct ethsw_core *ethsw = port_priv->ethsw_data;
2316  
2317  	if (event == SWITCHDEV_PORT_ATTR_SET)
2318  		return dpaa2_switch_port_attr_set_event(dev, ptr);
2319  
2320  	if (!dpaa2_switch_port_dev_check(dev))
2321  		return NOTIFY_DONE;
2322  
2323  	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2324  	if (!switchdev_work)
2325  		return NOTIFY_BAD;
2326  
2327  	INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2328  	switchdev_work->dev = dev;
2329  	switchdev_work->event = event;
2330  
2331  	switch (event) {
2332  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2333  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2334  		memcpy(&switchdev_work->fdb_info, ptr,
2335  		       sizeof(switchdev_work->fdb_info));
2336  		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2337  		if (!switchdev_work->fdb_info.addr)
2338  			goto err_addr_alloc;
2339  
2340  		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2341  				fdb_info->addr);
2342  
2343  		/* Take a reference on the device to avoid being freed. */
2344  		dev_hold(dev);
2345  		break;
2346  	default:
2347  		kfree(switchdev_work);
2348  		return NOTIFY_DONE;
2349  	}
2350  
2351  	queue_work(ethsw->workqueue, &switchdev_work->work);
2352  
2353  	return NOTIFY_DONE;
2354  
2355  err_addr_alloc:
2356  	kfree(switchdev_work);
2357  	return NOTIFY_BAD;
2358  }
2359  
dpaa2_switch_port_obj_event(unsigned long event,struct net_device * netdev,struct switchdev_notifier_port_obj_info * port_obj_info)2360  static int dpaa2_switch_port_obj_event(unsigned long event,
2361  				       struct net_device *netdev,
2362  				       struct switchdev_notifier_port_obj_info *port_obj_info)
2363  {
2364  	int err = -EOPNOTSUPP;
2365  
2366  	if (!dpaa2_switch_port_dev_check(netdev))
2367  		return NOTIFY_DONE;
2368  
2369  	switch (event) {
2370  	case SWITCHDEV_PORT_OBJ_ADD:
2371  		err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2372  		break;
2373  	case SWITCHDEV_PORT_OBJ_DEL:
2374  		err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2375  		break;
2376  	}
2377  
2378  	port_obj_info->handled = true;
2379  	return notifier_from_errno(err);
2380  }
2381  
dpaa2_switch_port_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)2382  static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2383  					    unsigned long event, void *ptr)
2384  {
2385  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2386  
2387  	switch (event) {
2388  	case SWITCHDEV_PORT_OBJ_ADD:
2389  	case SWITCHDEV_PORT_OBJ_DEL:
2390  		return dpaa2_switch_port_obj_event(event, dev, ptr);
2391  	case SWITCHDEV_PORT_ATTR_SET:
2392  		return dpaa2_switch_port_attr_set_event(dev, ptr);
2393  	}
2394  
2395  	return NOTIFY_DONE;
2396  }
2397  
2398  /* Build a linear skb based on a single-buffer frame descriptor */
dpaa2_switch_build_linear_skb(struct ethsw_core * ethsw,const struct dpaa2_fd * fd)2399  static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2400  						     const struct dpaa2_fd *fd)
2401  {
2402  	u16 fd_offset = dpaa2_fd_get_offset(fd);
2403  	dma_addr_t addr = dpaa2_fd_get_addr(fd);
2404  	u32 fd_length = dpaa2_fd_get_len(fd);
2405  	struct device *dev = ethsw->dev;
2406  	struct sk_buff *skb = NULL;
2407  	void *fd_vaddr;
2408  
2409  	fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2410  	dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2411  		       DMA_FROM_DEVICE);
2412  
2413  	skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2414  			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2415  	if (unlikely(!skb)) {
2416  		dev_err(dev, "build_skb() failed\n");
2417  		return NULL;
2418  	}
2419  
2420  	skb_reserve(skb, fd_offset);
2421  	skb_put(skb, fd_length);
2422  
2423  	ethsw->buf_count--;
2424  
2425  	return skb;
2426  }
2427  
dpaa2_switch_tx_conf(struct dpaa2_switch_fq * fq,const struct dpaa2_fd * fd)2428  static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2429  				 const struct dpaa2_fd *fd)
2430  {
2431  	dpaa2_switch_free_fd(fq->ethsw, fd);
2432  }
2433  
dpaa2_switch_rx(struct dpaa2_switch_fq * fq,const struct dpaa2_fd * fd)2434  static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2435  			    const struct dpaa2_fd *fd)
2436  {
2437  	struct ethsw_core *ethsw = fq->ethsw;
2438  	struct ethsw_port_priv *port_priv;
2439  	struct net_device *netdev;
2440  	struct vlan_ethhdr *hdr;
2441  	struct sk_buff *skb;
2442  	u16 vlan_tci, vid;
2443  	int if_id, err;
2444  
2445  	/* get switch ingress interface ID */
2446  	if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2447  
2448  	if (if_id >= ethsw->sw_attr.num_ifs) {
2449  		dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2450  		goto err_free_fd;
2451  	}
2452  	port_priv = ethsw->ports[if_id];
2453  	netdev = port_priv->netdev;
2454  
2455  	/* build the SKB based on the FD received */
2456  	if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2457  		if (net_ratelimit()) {
2458  			netdev_err(netdev, "Received invalid frame format\n");
2459  			goto err_free_fd;
2460  		}
2461  	}
2462  
2463  	skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2464  	if (unlikely(!skb))
2465  		goto err_free_fd;
2466  
2467  	skb_reset_mac_header(skb);
2468  
2469  	/* Remove the VLAN header if the packet that we just received has a vid
2470  	 * equal to the port PVIDs. Since the dpaa2-switch can operate only in
2471  	 * VLAN-aware mode and no alterations are made on the packet when it's
2472  	 * redirected/mirrored to the control interface, we are sure that there
2473  	 * will always be a VLAN header present.
2474  	 */
2475  	hdr = vlan_eth_hdr(skb);
2476  	vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2477  	if (vid == port_priv->pvid) {
2478  		err = __skb_vlan_pop(skb, &vlan_tci);
2479  		if (err) {
2480  			dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2481  			goto err_free_fd;
2482  		}
2483  	}
2484  
2485  	skb->dev = netdev;
2486  	skb->protocol = eth_type_trans(skb, skb->dev);
2487  
2488  	/* Setup the offload_fwd_mark only if the port is under a bridge */
2489  	skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2490  
2491  	netif_receive_skb(skb);
2492  
2493  	return;
2494  
2495  err_free_fd:
2496  	dpaa2_switch_free_fd(ethsw, fd);
2497  }
2498  
dpaa2_switch_detect_features(struct ethsw_core * ethsw)2499  static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2500  {
2501  	ethsw->features = 0;
2502  
2503  	if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2504  		ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2505  }
2506  
dpaa2_switch_setup_fqs(struct ethsw_core * ethsw)2507  static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2508  {
2509  	struct dpsw_ctrl_if_attr ctrl_if_attr;
2510  	struct device *dev = ethsw->dev;
2511  	int i = 0;
2512  	int err;
2513  
2514  	err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2515  					  &ctrl_if_attr);
2516  	if (err) {
2517  		dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2518  		return err;
2519  	}
2520  
2521  	ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2522  	ethsw->fq[i].ethsw = ethsw;
2523  	ethsw->fq[i++].type = DPSW_QUEUE_RX;
2524  
2525  	ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2526  	ethsw->fq[i].ethsw = ethsw;
2527  	ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2528  
2529  	return 0;
2530  }
2531  
2532  /* Free buffers acquired from the buffer pool or which were meant to
2533   * be released in the pool
2534   */
dpaa2_switch_free_bufs(struct ethsw_core * ethsw,u64 * buf_array,int count)2535  static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2536  {
2537  	struct device *dev = ethsw->dev;
2538  	void *vaddr;
2539  	int i;
2540  
2541  	for (i = 0; i < count; i++) {
2542  		vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2543  		dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2544  			       DMA_FROM_DEVICE);
2545  		free_pages((unsigned long)vaddr, 0);
2546  	}
2547  }
2548  
2549  /* Perform a single release command to add buffers
2550   * to the specified buffer pool
2551   */
dpaa2_switch_add_bufs(struct ethsw_core * ethsw,u16 bpid)2552  static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2553  {
2554  	struct device *dev = ethsw->dev;
2555  	u64 buf_array[BUFS_PER_CMD];
2556  	struct page *page;
2557  	int retries = 0;
2558  	dma_addr_t addr;
2559  	int err;
2560  	int i;
2561  
2562  	for (i = 0; i < BUFS_PER_CMD; i++) {
2563  		/* Allocate one page for each Rx buffer. WRIOP sees
2564  		 * the entire page except for a tailroom reserved for
2565  		 * skb shared info
2566  		 */
2567  		page = dev_alloc_pages(0);
2568  		if (!page) {
2569  			dev_err(dev, "buffer allocation failed\n");
2570  			goto err_alloc;
2571  		}
2572  
2573  		addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2574  				    DMA_FROM_DEVICE);
2575  		if (dma_mapping_error(dev, addr)) {
2576  			dev_err(dev, "dma_map_single() failed\n");
2577  			goto err_map;
2578  		}
2579  		buf_array[i] = addr;
2580  	}
2581  
2582  release_bufs:
2583  	/* In case the portal is busy, retry until successful or
2584  	 * max retries hit.
2585  	 */
2586  	while ((err = dpaa2_io_service_release(NULL, bpid,
2587  					       buf_array, i)) == -EBUSY) {
2588  		if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2589  			break;
2590  
2591  		cpu_relax();
2592  	}
2593  
2594  	/* If release command failed, clean up and bail out. */
2595  	if (err) {
2596  		dpaa2_switch_free_bufs(ethsw, buf_array, i);
2597  		return 0;
2598  	}
2599  
2600  	return i;
2601  
2602  err_map:
2603  	__free_pages(page, 0);
2604  err_alloc:
2605  	/* If we managed to allocate at least some buffers,
2606  	 * release them to hardware
2607  	 */
2608  	if (i)
2609  		goto release_bufs;
2610  
2611  	return 0;
2612  }
2613  
dpaa2_switch_refill_bp(struct ethsw_core * ethsw)2614  static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2615  {
2616  	int *count = &ethsw->buf_count;
2617  	int new_count;
2618  	int err = 0;
2619  
2620  	if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2621  		do {
2622  			new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2623  			if (unlikely(!new_count)) {
2624  				/* Out of memory; abort for now, we'll
2625  				 * try later on
2626  				 */
2627  				break;
2628  			}
2629  			*count += new_count;
2630  		} while (*count < DPAA2_ETHSW_NUM_BUFS);
2631  
2632  		if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2633  			err = -ENOMEM;
2634  	}
2635  
2636  	return err;
2637  }
2638  
dpaa2_switch_seed_bp(struct ethsw_core * ethsw)2639  static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2640  {
2641  	int *count, ret, i;
2642  
2643  	for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2644  		ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2645  		count = &ethsw->buf_count;
2646  		*count += ret;
2647  
2648  		if (unlikely(ret < BUFS_PER_CMD))
2649  			return -ENOMEM;
2650  	}
2651  
2652  	return 0;
2653  }
2654  
dpaa2_switch_drain_bp(struct ethsw_core * ethsw)2655  static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2656  {
2657  	u64 buf_array[BUFS_PER_CMD];
2658  	int ret;
2659  
2660  	do {
2661  		ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2662  					       buf_array, BUFS_PER_CMD);
2663  		if (ret < 0) {
2664  			dev_err(ethsw->dev,
2665  				"dpaa2_io_service_acquire() = %d\n", ret);
2666  			return;
2667  		}
2668  		dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2669  
2670  	} while (ret);
2671  }
2672  
dpaa2_switch_setup_dpbp(struct ethsw_core * ethsw)2673  static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2674  {
2675  	struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2676  	struct device *dev = ethsw->dev;
2677  	struct fsl_mc_device *dpbp_dev;
2678  	struct dpbp_attr dpbp_attrs;
2679  	int err;
2680  
2681  	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2682  				     &dpbp_dev);
2683  	if (err) {
2684  		if (err == -ENXIO)
2685  			err = -EPROBE_DEFER;
2686  		else
2687  			dev_err(dev, "DPBP device allocation failed\n");
2688  		return err;
2689  	}
2690  	ethsw->dpbp_dev = dpbp_dev;
2691  
2692  	err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2693  			&dpbp_dev->mc_handle);
2694  	if (err) {
2695  		dev_err(dev, "dpbp_open() failed\n");
2696  		goto err_open;
2697  	}
2698  
2699  	err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2700  	if (err) {
2701  		dev_err(dev, "dpbp_reset() failed\n");
2702  		goto err_reset;
2703  	}
2704  
2705  	err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2706  	if (err) {
2707  		dev_err(dev, "dpbp_enable() failed\n");
2708  		goto err_enable;
2709  	}
2710  
2711  	err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2712  				  &dpbp_attrs);
2713  	if (err) {
2714  		dev_err(dev, "dpbp_get_attributes() failed\n");
2715  		goto err_get_attr;
2716  	}
2717  
2718  	dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2719  	dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2720  	dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2721  	dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2722  
2723  	err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2724  				     &dpsw_ctrl_if_pools_cfg);
2725  	if (err) {
2726  		dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2727  		goto err_get_attr;
2728  	}
2729  	ethsw->bpid = dpbp_attrs.id;
2730  
2731  	return 0;
2732  
2733  err_get_attr:
2734  	dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2735  err_enable:
2736  err_reset:
2737  	dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2738  err_open:
2739  	fsl_mc_object_free(dpbp_dev);
2740  	return err;
2741  }
2742  
dpaa2_switch_free_dpbp(struct ethsw_core * ethsw)2743  static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2744  {
2745  	dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2746  	dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2747  	fsl_mc_object_free(ethsw->dpbp_dev);
2748  }
2749  
dpaa2_switch_alloc_rings(struct ethsw_core * ethsw)2750  static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2751  {
2752  	int i;
2753  
2754  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2755  		ethsw->fq[i].store =
2756  			dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2757  					      ethsw->dev);
2758  		if (!ethsw->fq[i].store) {
2759  			dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2760  			while (--i >= 0)
2761  				dpaa2_io_store_destroy(ethsw->fq[i].store);
2762  			return -ENOMEM;
2763  		}
2764  	}
2765  
2766  	return 0;
2767  }
2768  
dpaa2_switch_destroy_rings(struct ethsw_core * ethsw)2769  static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2770  {
2771  	int i;
2772  
2773  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2774  		dpaa2_io_store_destroy(ethsw->fq[i].store);
2775  }
2776  
dpaa2_switch_pull_fq(struct dpaa2_switch_fq * fq)2777  static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2778  {
2779  	int err, retries = 0;
2780  
2781  	/* Try to pull from the FQ while the portal is busy and we didn't hit
2782  	 * the maximum number fo retries
2783  	 */
2784  	do {
2785  		err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2786  		cpu_relax();
2787  	} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2788  
2789  	if (unlikely(err))
2790  		dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2791  
2792  	return err;
2793  }
2794  
2795  /* Consume all frames pull-dequeued into the store */
dpaa2_switch_store_consume(struct dpaa2_switch_fq * fq)2796  static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2797  {
2798  	struct ethsw_core *ethsw = fq->ethsw;
2799  	int cleaned = 0, is_last;
2800  	struct dpaa2_dq *dq;
2801  	int retries = 0;
2802  
2803  	do {
2804  		/* Get the next available FD from the store */
2805  		dq = dpaa2_io_store_next(fq->store, &is_last);
2806  		if (unlikely(!dq)) {
2807  			if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2808  				dev_err_once(ethsw->dev,
2809  					     "No valid dequeue response\n");
2810  				return -ETIMEDOUT;
2811  			}
2812  			continue;
2813  		}
2814  
2815  		if (fq->type == DPSW_QUEUE_RX)
2816  			dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2817  		else
2818  			dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2819  		cleaned++;
2820  
2821  	} while (!is_last);
2822  
2823  	return cleaned;
2824  }
2825  
2826  /* NAPI poll routine */
dpaa2_switch_poll(struct napi_struct * napi,int budget)2827  static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2828  {
2829  	int err, cleaned = 0, store_cleaned, work_done;
2830  	struct dpaa2_switch_fq *fq;
2831  	int retries = 0;
2832  
2833  	fq = container_of(napi, struct dpaa2_switch_fq, napi);
2834  
2835  	do {
2836  		err = dpaa2_switch_pull_fq(fq);
2837  		if (unlikely(err))
2838  			break;
2839  
2840  		/* Refill pool if appropriate */
2841  		dpaa2_switch_refill_bp(fq->ethsw);
2842  
2843  		store_cleaned = dpaa2_switch_store_consume(fq);
2844  		cleaned += store_cleaned;
2845  
2846  		if (cleaned >= budget) {
2847  			work_done = budget;
2848  			goto out;
2849  		}
2850  
2851  	} while (store_cleaned);
2852  
2853  	/* We didn't consume the entire budget, so finish napi and re-enable
2854  	 * data availability notifications
2855  	 */
2856  	napi_complete_done(napi, cleaned);
2857  	do {
2858  		err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2859  		cpu_relax();
2860  	} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2861  
2862  	work_done = max(cleaned, 1);
2863  out:
2864  
2865  	return work_done;
2866  }
2867  
dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)2868  static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2869  {
2870  	struct dpaa2_switch_fq *fq;
2871  
2872  	fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2873  
2874  	napi_schedule(&fq->napi);
2875  }
2876  
dpaa2_switch_setup_dpio(struct ethsw_core * ethsw)2877  static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2878  {
2879  	struct dpsw_ctrl_if_queue_cfg queue_cfg;
2880  	struct dpaa2_io_notification_ctx *nctx;
2881  	int err, i, j;
2882  
2883  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2884  		nctx = &ethsw->fq[i].nctx;
2885  
2886  		/* Register a new software context for the FQID.
2887  		 * By using NULL as the first parameter, we specify that we do
2888  		 * not care on which cpu are interrupts received for this queue
2889  		 */
2890  		nctx->is_cdan = 0;
2891  		nctx->id = ethsw->fq[i].fqid;
2892  		nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2893  		nctx->cb = dpaa2_switch_fqdan_cb;
2894  		err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2895  		if (err) {
2896  			err = -EPROBE_DEFER;
2897  			goto err_register;
2898  		}
2899  
2900  		queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2901  				    DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2902  		queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2903  		queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2904  		queue_cfg.dest_cfg.priority = 0;
2905  		queue_cfg.user_ctx = nctx->qman64;
2906  
2907  		err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2908  					     ethsw->dpsw_handle,
2909  					     ethsw->fq[i].type,
2910  					     &queue_cfg);
2911  		if (err)
2912  			goto err_set_queue;
2913  	}
2914  
2915  	return 0;
2916  
2917  err_set_queue:
2918  	dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2919  err_register:
2920  	for (j = 0; j < i; j++)
2921  		dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
2922  					    ethsw->dev);
2923  
2924  	return err;
2925  }
2926  
dpaa2_switch_free_dpio(struct ethsw_core * ethsw)2927  static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2928  {
2929  	int i;
2930  
2931  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2932  		dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
2933  					    ethsw->dev);
2934  }
2935  
dpaa2_switch_ctrl_if_setup(struct ethsw_core * ethsw)2936  static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2937  {
2938  	int err;
2939  
2940  	/* setup FQs for Rx and Tx Conf */
2941  	err = dpaa2_switch_setup_fqs(ethsw);
2942  	if (err)
2943  		return err;
2944  
2945  	/* setup the buffer pool needed on the Rx path */
2946  	err = dpaa2_switch_setup_dpbp(ethsw);
2947  	if (err)
2948  		return err;
2949  
2950  	err = dpaa2_switch_alloc_rings(ethsw);
2951  	if (err)
2952  		goto err_free_dpbp;
2953  
2954  	err = dpaa2_switch_setup_dpio(ethsw);
2955  	if (err)
2956  		goto err_destroy_rings;
2957  
2958  	err = dpaa2_switch_seed_bp(ethsw);
2959  	if (err)
2960  		goto err_deregister_dpio;
2961  
2962  	err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2963  	if (err) {
2964  		dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2965  		goto err_drain_dpbp;
2966  	}
2967  
2968  	return 0;
2969  
2970  err_drain_dpbp:
2971  	dpaa2_switch_drain_bp(ethsw);
2972  err_deregister_dpio:
2973  	dpaa2_switch_free_dpio(ethsw);
2974  err_destroy_rings:
2975  	dpaa2_switch_destroy_rings(ethsw);
2976  err_free_dpbp:
2977  	dpaa2_switch_free_dpbp(ethsw);
2978  
2979  	return err;
2980  }
2981  
dpaa2_switch_remove_port(struct ethsw_core * ethsw,u16 port_idx)2982  static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
2983  				     u16 port_idx)
2984  {
2985  	struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
2986  
2987  	dpaa2_switch_port_disconnect_mac(port_priv);
2988  	free_netdev(port_priv->netdev);
2989  	ethsw->ports[port_idx] = NULL;
2990  }
2991  
dpaa2_switch_init(struct fsl_mc_device * sw_dev)2992  static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2993  {
2994  	struct device *dev = &sw_dev->dev;
2995  	struct ethsw_core *ethsw = dev_get_drvdata(dev);
2996  	struct dpsw_vlan_if_cfg vcfg = {0};
2997  	struct dpsw_tci_cfg tci_cfg = {0};
2998  	struct dpsw_stp_cfg stp_cfg;
2999  	int err;
3000  	u16 i;
3001  
3002  	ethsw->dev_id = sw_dev->obj_desc.id;
3003  
3004  	err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
3005  	if (err) {
3006  		dev_err(dev, "dpsw_open err %d\n", err);
3007  		return err;
3008  	}
3009  
3010  	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3011  				  &ethsw->sw_attr);
3012  	if (err) {
3013  		dev_err(dev, "dpsw_get_attributes err %d\n", err);
3014  		goto err_close;
3015  	}
3016  
3017  	err = dpsw_get_api_version(ethsw->mc_io, 0,
3018  				   &ethsw->major,
3019  				   &ethsw->minor);
3020  	if (err) {
3021  		dev_err(dev, "dpsw_get_api_version err %d\n", err);
3022  		goto err_close;
3023  	}
3024  
3025  	/* Minimum supported DPSW version check */
3026  	if (ethsw->major < DPSW_MIN_VER_MAJOR ||
3027  	    (ethsw->major == DPSW_MIN_VER_MAJOR &&
3028  	     ethsw->minor < DPSW_MIN_VER_MINOR)) {
3029  		dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
3030  			ethsw->major, ethsw->minor);
3031  		err = -EOPNOTSUPP;
3032  		goto err_close;
3033  	}
3034  
3035  	if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
3036  		err = -EOPNOTSUPP;
3037  		goto err_close;
3038  	}
3039  
3040  	dpaa2_switch_detect_features(ethsw);
3041  
3042  	err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
3043  	if (err) {
3044  		dev_err(dev, "dpsw_reset err %d\n", err);
3045  		goto err_close;
3046  	}
3047  
3048  	stp_cfg.vlan_id = DEFAULT_VLAN_ID;
3049  	stp_cfg.state = DPSW_STP_STATE_FORWARDING;
3050  
3051  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3052  		err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
3053  		if (err) {
3054  			dev_err(dev, "dpsw_if_disable err %d\n", err);
3055  			goto err_close;
3056  		}
3057  
3058  		err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
3059  				      &stp_cfg);
3060  		if (err) {
3061  			dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
3062  				err, i);
3063  			goto err_close;
3064  		}
3065  
3066  		/* Switch starts with all ports configured to VLAN 1. Need to
3067  		 * remove this setting to allow configuration at bridge join
3068  		 */
3069  		vcfg.num_ifs = 1;
3070  		vcfg.if_id[0] = i;
3071  		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
3072  						   DEFAULT_VLAN_ID, &vcfg);
3073  		if (err) {
3074  			dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
3075  				err);
3076  			goto err_close;
3077  		}
3078  
3079  		tci_cfg.vlan_id = 4095;
3080  		err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
3081  		if (err) {
3082  			dev_err(dev, "dpsw_if_set_tci err %d\n", err);
3083  			goto err_close;
3084  		}
3085  
3086  		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
3087  					  DEFAULT_VLAN_ID, &vcfg);
3088  		if (err) {
3089  			dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
3090  			goto err_close;
3091  		}
3092  	}
3093  
3094  	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
3095  	if (err) {
3096  		dev_err(dev, "dpsw_vlan_remove err %d\n", err);
3097  		goto err_close;
3098  	}
3099  
3100  	ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
3101  						   WQ_MEM_RECLAIM, "ethsw",
3102  						   ethsw->sw_attr.id);
3103  	if (!ethsw->workqueue) {
3104  		err = -ENOMEM;
3105  		goto err_close;
3106  	}
3107  
3108  	err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
3109  	if (err)
3110  		goto err_destroy_ordered_workqueue;
3111  
3112  	err = dpaa2_switch_ctrl_if_setup(ethsw);
3113  	if (err)
3114  		goto err_destroy_ordered_workqueue;
3115  
3116  	return 0;
3117  
3118  err_destroy_ordered_workqueue:
3119  	destroy_workqueue(ethsw->workqueue);
3120  
3121  err_close:
3122  	dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3123  	return err;
3124  }
3125  
3126  /* Add an ACL to redirect frames with specific destination MAC address to
3127   * control interface
3128   */
dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv * port_priv,const char * mac)3129  static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
3130  					   const char *mac)
3131  {
3132  	struct dpaa2_switch_acl_entry acl_entry = {0};
3133  
3134  	/* Match on the destination MAC address */
3135  	ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
3136  	eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
3137  
3138  	/* Trap to CPU */
3139  	acl_entry.cfg.precedence = 0;
3140  	acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
3141  
3142  	return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
3143  }
3144  
dpaa2_switch_port_init(struct ethsw_port_priv * port_priv,u16 port)3145  static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
3146  {
3147  	const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3148  	struct switchdev_obj_port_vlan vlan = {
3149  		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
3150  		.vid = DEFAULT_VLAN_ID,
3151  		.flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
3152  	};
3153  	struct net_device *netdev = port_priv->netdev;
3154  	struct ethsw_core *ethsw = port_priv->ethsw_data;
3155  	struct dpaa2_switch_filter_block *filter_block;
3156  	struct dpsw_fdb_cfg fdb_cfg = {0};
3157  	struct dpsw_if_attr dpsw_if_attr;
3158  	struct dpaa2_switch_fdb *fdb;
3159  	struct dpsw_acl_cfg acl_cfg;
3160  	u16 fdb_id, acl_tbl_id;
3161  	int err;
3162  
3163  	/* Get the Tx queue for this specific port */
3164  	err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3165  				     port_priv->idx, &dpsw_if_attr);
3166  	if (err) {
3167  		netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
3168  		return err;
3169  	}
3170  	port_priv->tx_qdid = dpsw_if_attr.qdid;
3171  
3172  	/* Create a FDB table for this particular switch port */
3173  	fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
3174  	err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3175  			   &fdb_id, &fdb_cfg);
3176  	if (err) {
3177  		netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
3178  		return err;
3179  	}
3180  
3181  	/* Find an unused dpaa2_switch_fdb structure and use it */
3182  	fdb = dpaa2_switch_fdb_get_unused(ethsw);
3183  	fdb->fdb_id = fdb_id;
3184  	fdb->in_use = true;
3185  	fdb->bridge_dev = NULL;
3186  	port_priv->fdb = fdb;
3187  
3188  	/* We need to add VLAN 1 as the PVID on this port until it is under a
3189  	 * bridge since the DPAA2 switch is not able to handle the traffic in a
3190  	 * VLAN unaware fashion
3191  	 */
3192  	err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3193  	if (err)
3194  		return err;
3195  
3196  	/* Setup the egress flooding domains (broadcast, unknown unicast */
3197  	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3198  	if (err)
3199  		return err;
3200  
3201  	/* Create an ACL table to be used by this switch port */
3202  	acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3203  	err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3204  			   &acl_tbl_id, &acl_cfg);
3205  	if (err) {
3206  		netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3207  		return err;
3208  	}
3209  
3210  	filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
3211  	filter_block->ethsw = ethsw;
3212  	filter_block->acl_id = acl_tbl_id;
3213  	filter_block->in_use = true;
3214  	filter_block->num_acl_rules = 0;
3215  	INIT_LIST_HEAD(&filter_block->acl_entries);
3216  	INIT_LIST_HEAD(&filter_block->mirror_entries);
3217  
3218  	err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
3219  	if (err)
3220  		return err;
3221  
3222  	err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3223  	if (err)
3224  		return err;
3225  
3226  	return err;
3227  }
3228  
dpaa2_switch_ctrl_if_teardown(struct ethsw_core * ethsw)3229  static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3230  {
3231  	dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3232  	dpaa2_switch_free_dpio(ethsw);
3233  	dpaa2_switch_destroy_rings(ethsw);
3234  	dpaa2_switch_drain_bp(ethsw);
3235  	dpaa2_switch_free_dpbp(ethsw);
3236  }
3237  
dpaa2_switch_teardown(struct fsl_mc_device * sw_dev)3238  static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3239  {
3240  	struct device *dev = &sw_dev->dev;
3241  	struct ethsw_core *ethsw = dev_get_drvdata(dev);
3242  	int err;
3243  
3244  	dpaa2_switch_ctrl_if_teardown(ethsw);
3245  
3246  	destroy_workqueue(ethsw->workqueue);
3247  
3248  	err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3249  	if (err)
3250  		dev_warn(dev, "dpsw_close err %d\n", err);
3251  }
3252  
dpaa2_switch_remove(struct fsl_mc_device * sw_dev)3253  static void dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3254  {
3255  	struct ethsw_port_priv *port_priv;
3256  	struct ethsw_core *ethsw;
3257  	struct device *dev;
3258  	int i;
3259  
3260  	dev = &sw_dev->dev;
3261  	ethsw = dev_get_drvdata(dev);
3262  
3263  	dpaa2_switch_teardown_irqs(sw_dev);
3264  
3265  	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3266  
3267  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3268  		port_priv = ethsw->ports[i];
3269  		unregister_netdev(port_priv->netdev);
3270  		dpaa2_switch_remove_port(ethsw, i);
3271  	}
3272  
3273  	kfree(ethsw->fdbs);
3274  	kfree(ethsw->filter_blocks);
3275  	kfree(ethsw->ports);
3276  
3277  	dpaa2_switch_teardown(sw_dev);
3278  
3279  	fsl_mc_portal_free(ethsw->mc_io);
3280  
3281  	kfree(ethsw);
3282  
3283  	dev_set_drvdata(dev, NULL);
3284  }
3285  
dpaa2_switch_probe_port(struct ethsw_core * ethsw,u16 port_idx)3286  static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3287  				   u16 port_idx)
3288  {
3289  	struct ethsw_port_priv *port_priv;
3290  	struct device *dev = ethsw->dev;
3291  	struct net_device *port_netdev;
3292  	int err;
3293  
3294  	port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3295  	if (!port_netdev) {
3296  		dev_err(dev, "alloc_etherdev error\n");
3297  		return -ENOMEM;
3298  	}
3299  
3300  	port_priv = netdev_priv(port_netdev);
3301  	port_priv->netdev = port_netdev;
3302  	port_priv->ethsw_data = ethsw;
3303  
3304  	mutex_init(&port_priv->mac_lock);
3305  
3306  	port_priv->idx = port_idx;
3307  	port_priv->stp_state = BR_STATE_FORWARDING;
3308  
3309  	SET_NETDEV_DEV(port_netdev, dev);
3310  	port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3311  	port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3312  
3313  	port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3314  
3315  	port_priv->bcast_flood = true;
3316  	port_priv->ucast_flood = true;
3317  
3318  	/* Set MTU limits */
3319  	port_netdev->min_mtu = ETH_MIN_MTU;
3320  	port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3321  
3322  	/* Populate the private port structure so that later calls to
3323  	 * dpaa2_switch_port_init() can use it.
3324  	 */
3325  	ethsw->ports[port_idx] = port_priv;
3326  
3327  	/* The DPAA2 switch's ingress path depends on the VLAN table,
3328  	 * thus we are not able to disable VLAN filtering.
3329  	 */
3330  	port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3331  				NETIF_F_HW_VLAN_STAG_FILTER |
3332  				NETIF_F_HW_TC;
3333  	port_netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3334  
3335  	err = dpaa2_switch_port_init(port_priv, port_idx);
3336  	if (err)
3337  		goto err_port_probe;
3338  
3339  	err = dpaa2_switch_port_set_mac_addr(port_priv);
3340  	if (err)
3341  		goto err_port_probe;
3342  
3343  	err = dpaa2_switch_port_set_learning(port_priv, false);
3344  	if (err)
3345  		goto err_port_probe;
3346  	port_priv->learn_ena = false;
3347  
3348  	err = dpaa2_switch_port_connect_mac(port_priv);
3349  	if (err)
3350  		goto err_port_probe;
3351  
3352  	return 0;
3353  
3354  err_port_probe:
3355  	free_netdev(port_netdev);
3356  	ethsw->ports[port_idx] = NULL;
3357  
3358  	return err;
3359  }
3360  
dpaa2_switch_probe(struct fsl_mc_device * sw_dev)3361  static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3362  {
3363  	struct device *dev = &sw_dev->dev;
3364  	struct ethsw_core *ethsw;
3365  	int i, err;
3366  
3367  	/* Allocate switch core*/
3368  	ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3369  
3370  	if (!ethsw)
3371  		return -ENOMEM;
3372  
3373  	ethsw->dev = dev;
3374  	ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3375  	dev_set_drvdata(dev, ethsw);
3376  
3377  	err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3378  				     &ethsw->mc_io);
3379  	if (err) {
3380  		if (err == -ENXIO)
3381  			err = -EPROBE_DEFER;
3382  		else
3383  			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3384  		goto err_free_drvdata;
3385  	}
3386  
3387  	err = dpaa2_switch_init(sw_dev);
3388  	if (err)
3389  		goto err_free_cmdport;
3390  
3391  	ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3392  			       GFP_KERNEL);
3393  	if (!(ethsw->ports)) {
3394  		err = -ENOMEM;
3395  		goto err_teardown;
3396  	}
3397  
3398  	ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3399  			      GFP_KERNEL);
3400  	if (!ethsw->fdbs) {
3401  		err = -ENOMEM;
3402  		goto err_free_ports;
3403  	}
3404  
3405  	ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
3406  				       sizeof(*ethsw->filter_blocks),
3407  				       GFP_KERNEL);
3408  	if (!ethsw->filter_blocks) {
3409  		err = -ENOMEM;
3410  		goto err_free_fdbs;
3411  	}
3412  
3413  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3414  		err = dpaa2_switch_probe_port(ethsw, i);
3415  		if (err)
3416  			goto err_free_netdev;
3417  	}
3418  
3419  	/* Add a NAPI instance for each of the Rx queues. The first port's
3420  	 * net_device will be associated with the instances since we do not have
3421  	 * different queues for each switch ports.
3422  	 */
3423  	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3424  		netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
3425  			       dpaa2_switch_poll);
3426  
3427  	/* Setup IRQs */
3428  	err = dpaa2_switch_setup_irqs(sw_dev);
3429  	if (err)
3430  		goto err_stop;
3431  
3432  	/* By convention, if the mirror port is equal to the number of switch
3433  	 * interfaces, then mirroring of any kind is disabled.
3434  	 */
3435  	ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
3436  
3437  	/* Register the netdev only when the entire setup is done and the
3438  	 * switch port interfaces are ready to receive traffic
3439  	 */
3440  	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3441  		err = register_netdev(ethsw->ports[i]->netdev);
3442  		if (err < 0) {
3443  			dev_err(dev, "register_netdev error %d\n", err);
3444  			goto err_unregister_ports;
3445  		}
3446  	}
3447  
3448  	return 0;
3449  
3450  err_unregister_ports:
3451  	for (i--; i >= 0; i--)
3452  		unregister_netdev(ethsw->ports[i]->netdev);
3453  	dpaa2_switch_teardown_irqs(sw_dev);
3454  err_stop:
3455  	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3456  err_free_netdev:
3457  	for (i--; i >= 0; i--)
3458  		dpaa2_switch_remove_port(ethsw, i);
3459  	kfree(ethsw->filter_blocks);
3460  err_free_fdbs:
3461  	kfree(ethsw->fdbs);
3462  err_free_ports:
3463  	kfree(ethsw->ports);
3464  
3465  err_teardown:
3466  	dpaa2_switch_teardown(sw_dev);
3467  
3468  err_free_cmdport:
3469  	fsl_mc_portal_free(ethsw->mc_io);
3470  
3471  err_free_drvdata:
3472  	kfree(ethsw);
3473  	dev_set_drvdata(dev, NULL);
3474  
3475  	return err;
3476  }
3477  
3478  static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3479  	{
3480  		.vendor = FSL_MC_VENDOR_FREESCALE,
3481  		.obj_type = "dpsw",
3482  	},
3483  	{ .vendor = 0x0 }
3484  };
3485  MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3486  
3487  static struct fsl_mc_driver dpaa2_switch_drv = {
3488  	.driver = {
3489  		.name = KBUILD_MODNAME,
3490  	},
3491  	.probe = dpaa2_switch_probe,
3492  	.remove = dpaa2_switch_remove,
3493  	.match_id_table = dpaa2_switch_match_id_table
3494  };
3495  
3496  static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3497  	.notifier_call = dpaa2_switch_port_netdevice_event,
3498  };
3499  
3500  static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3501  	.notifier_call = dpaa2_switch_port_event,
3502  };
3503  
3504  static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3505  	.notifier_call = dpaa2_switch_port_blocking_event,
3506  };
3507  
dpaa2_switch_register_notifiers(void)3508  static int dpaa2_switch_register_notifiers(void)
3509  {
3510  	int err;
3511  
3512  	err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3513  	if (err) {
3514  		pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3515  		return err;
3516  	}
3517  
3518  	err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3519  	if (err) {
3520  		pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3521  		goto err_switchdev_nb;
3522  	}
3523  
3524  	err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3525  	if (err) {
3526  		pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3527  		goto err_switchdev_blocking_nb;
3528  	}
3529  
3530  	return 0;
3531  
3532  err_switchdev_blocking_nb:
3533  	unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3534  err_switchdev_nb:
3535  	unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3536  
3537  	return err;
3538  }
3539  
dpaa2_switch_unregister_notifiers(void)3540  static void dpaa2_switch_unregister_notifiers(void)
3541  {
3542  	int err;
3543  
3544  	err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3545  	if (err)
3546  		pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3547  		       err);
3548  
3549  	err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3550  	if (err)
3551  		pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3552  
3553  	err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3554  	if (err)
3555  		pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3556  }
3557  
dpaa2_switch_driver_init(void)3558  static int __init dpaa2_switch_driver_init(void)
3559  {
3560  	int err;
3561  
3562  	err = fsl_mc_driver_register(&dpaa2_switch_drv);
3563  	if (err)
3564  		return err;
3565  
3566  	err = dpaa2_switch_register_notifiers();
3567  	if (err) {
3568  		fsl_mc_driver_unregister(&dpaa2_switch_drv);
3569  		return err;
3570  	}
3571  
3572  	return 0;
3573  }
3574  
dpaa2_switch_driver_exit(void)3575  static void __exit dpaa2_switch_driver_exit(void)
3576  {
3577  	dpaa2_switch_unregister_notifiers();
3578  	fsl_mc_driver_unregister(&dpaa2_switch_drv);
3579  }
3580  
3581  module_init(dpaa2_switch_driver_init);
3582  module_exit(dpaa2_switch_driver_exit);
3583  
3584  MODULE_LICENSE("GPL v2");
3585  MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
3586