1  // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2  /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3  
4  #include <linux/kernel.h>
5  #include <linux/module.h>
6  #include <linux/types.h>
7  #include <linux/pci.h>
8  #include <linux/netdevice.h>
9  #include <linux/etherdevice.h>
10  #include <linux/ethtool.h>
11  #include <linux/slab.h>
12  #include <linux/device.h>
13  #include <linux/skbuff.h>
14  #include <linux/if_vlan.h>
15  #include <linux/if_bridge.h>
16  #include <linux/workqueue.h>
17  #include <linux/jiffies.h>
18  #include <linux/bitops.h>
19  #include <linux/list.h>
20  #include <linux/notifier.h>
21  #include <linux/dcbnl.h>
22  #include <linux/inetdevice.h>
23  #include <linux/netlink.h>
24  #include <linux/jhash.h>
25  #include <linux/log2.h>
26  #include <linux/refcount.h>
27  #include <linux/rhashtable.h>
28  #include <net/switchdev.h>
29  #include <net/pkt_cls.h>
30  #include <net/netevent.h>
31  #include <net/addrconf.h>
32  #include <linux/ptp_classify.h>
33  
34  #include "spectrum.h"
35  #include "pci.h"
36  #include "core.h"
37  #include "core_env.h"
38  #include "reg.h"
39  #include "port.h"
40  #include "trap.h"
41  #include "txheader.h"
42  #include "spectrum_cnt.h"
43  #include "spectrum_dpipe.h"
44  #include "spectrum_acl_flex_actions.h"
45  #include "spectrum_span.h"
46  #include "spectrum_ptp.h"
47  #include "spectrum_trap.h"
48  
49  #define MLXSW_SP_FWREV_MINOR 2010
50  #define MLXSW_SP_FWREV_SUBMINOR 1006
51  
52  #define MLXSW_SP1_FWREV_MAJOR 13
53  #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54  
55  static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56  	.major = MLXSW_SP1_FWREV_MAJOR,
57  	.minor = MLXSW_SP_FWREV_MINOR,
58  	.subminor = MLXSW_SP_FWREV_SUBMINOR,
59  	.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60  };
61  
62  #define MLXSW_SP1_FW_FILENAME \
63  	"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64  	"." __stringify(MLXSW_SP_FWREV_MINOR) \
65  	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66  
67  #define MLXSW_SP2_FWREV_MAJOR 29
68  
69  static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70  	.major = MLXSW_SP2_FWREV_MAJOR,
71  	.minor = MLXSW_SP_FWREV_MINOR,
72  	.subminor = MLXSW_SP_FWREV_SUBMINOR,
73  };
74  
75  #define MLXSW_SP2_FW_FILENAME \
76  	"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77  	"." __stringify(MLXSW_SP_FWREV_MINOR) \
78  	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79  
80  #define MLXSW_SP3_FWREV_MAJOR 30
81  
82  static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83  	.major = MLXSW_SP3_FWREV_MAJOR,
84  	.minor = MLXSW_SP_FWREV_MINOR,
85  	.subminor = MLXSW_SP_FWREV_SUBMINOR,
86  };
87  
88  #define MLXSW_SP3_FW_FILENAME \
89  	"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90  	"." __stringify(MLXSW_SP_FWREV_MINOR) \
91  	"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92  
93  #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94  	"mellanox/lc_ini_bundle_" \
95  	__stringify(MLXSW_SP_FWREV_MINOR) "_" \
96  	__stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97  
98  static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99  static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100  static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101  static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102  
103  static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104  	0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105  };
106  static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107  	0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108  };
109  
110  /* tx_hdr_version
111   * Tx header version.
112   * Must be set to 1.
113   */
114  MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115  
116  /* tx_hdr_ctl
117   * Packet control type.
118   * 0 - Ethernet control (e.g. EMADs, LACP)
119   * 1 - Ethernet data
120   */
121  MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122  
123  /* tx_hdr_proto
124   * Packet protocol type. Must be set to 1 (Ethernet).
125   */
126  MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127  
128  /* tx_hdr_rx_is_router
129   * Packet is sent from the router. Valid for data packets only.
130   */
131  MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132  
133  /* tx_hdr_fid_valid
134   * Indicates if the 'fid' field is valid and should be used for
135   * forwarding lookup. Valid for data packets only.
136   */
137  MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138  
139  /* tx_hdr_swid
140   * Switch partition ID. Must be set to 0.
141   */
142  MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143  
144  /* tx_hdr_control_tclass
145   * Indicates if the packet should use the control TClass and not one
146   * of the data TClasses.
147   */
148  MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149  
150  /* tx_hdr_etclass
151   * Egress TClass to be used on the egress device on the egress port.
152   */
153  MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154  
155  /* tx_hdr_port_mid
156   * Destination local port for unicast packets.
157   * Destination multicast ID for multicast packets.
158   *
159   * Control packets are directed to a specific egress port, while data
160   * packets are transmitted through the CPU port (0) into the switch partition,
161   * where forwarding rules are applied.
162   */
163  MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164  
165  /* tx_hdr_fid
166   * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167   * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168   * Valid for data packets only.
169   */
170  MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171  
172  /* tx_hdr_type
173   * 0 - Data packets
174   * 6 - Control packets
175   */
176  MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177  
mlxsw_sp_flow_counter_get(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index,bool clear,u64 * packets,u64 * bytes)178  int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179  			      unsigned int counter_index, bool clear,
180  			      u64 *packets, u64 *bytes)
181  {
182  	enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
183  						MLXSW_REG_MGPC_OPCODE_NOP;
184  	char mgpc_pl[MLXSW_REG_MGPC_LEN];
185  	int err;
186  
187  	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
188  			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
189  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
190  	if (err)
191  		return err;
192  	if (packets)
193  		*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
194  	if (bytes)
195  		*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
196  	return 0;
197  }
198  
mlxsw_sp_flow_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)199  static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
200  				       unsigned int counter_index)
201  {
202  	char mgpc_pl[MLXSW_REG_MGPC_LEN];
203  
204  	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
205  			    MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
206  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
207  }
208  
mlxsw_sp_flow_counter_alloc(struct mlxsw_sp * mlxsw_sp,unsigned int * p_counter_index)209  int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
210  				unsigned int *p_counter_index)
211  {
212  	int err;
213  
214  	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
215  				     p_counter_index);
216  	if (err)
217  		return err;
218  	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
219  	if (err)
220  		goto err_counter_clear;
221  	return 0;
222  
223  err_counter_clear:
224  	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225  			      *p_counter_index);
226  	return err;
227  }
228  
mlxsw_sp_flow_counter_free(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)229  void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
230  				unsigned int counter_index)
231  {
232  	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
233  			       counter_index);
234  }
235  
mlxsw_sp_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)236  void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
237  			      const struct mlxsw_tx_info *tx_info)
238  {
239  	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
240  
241  	memset(txhdr, 0, MLXSW_TXHDR_LEN);
242  
243  	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
244  	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
245  	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
246  	mlxsw_tx_hdr_swid_set(txhdr, 0);
247  	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
248  	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
249  	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
250  }
251  
252  int
mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)253  mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
254  				  struct mlxsw_sp_port *mlxsw_sp_port,
255  				  struct sk_buff *skb,
256  				  const struct mlxsw_tx_info *tx_info)
257  {
258  	char *txhdr;
259  	u16 max_fid;
260  	int err;
261  
262  	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
263  		err = -ENOMEM;
264  		goto err_skb_cow_head;
265  	}
266  
267  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
268  		err = -EIO;
269  		goto err_res_valid;
270  	}
271  	max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
272  
273  	txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
274  	memset(txhdr, 0, MLXSW_TXHDR_LEN);
275  
276  	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
277  	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
278  	mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
279  	mlxsw_tx_hdr_fid_valid_set(txhdr, true);
280  	mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
281  	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
282  	return 0;
283  
284  err_res_valid:
285  err_skb_cow_head:
286  	this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
287  	dev_kfree_skb_any(skb);
288  	return err;
289  }
290  
mlxsw_sp_skb_requires_ts(struct sk_buff * skb)291  static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
292  {
293  	unsigned int type;
294  
295  	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
296  		return false;
297  
298  	type = ptp_classify_raw(skb);
299  	return !!ptp_parse_header(skb, type);
300  }
301  
mlxsw_sp_txhdr_handle(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)302  static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
303  				 struct mlxsw_sp_port *mlxsw_sp_port,
304  				 struct sk_buff *skb,
305  				 const struct mlxsw_tx_info *tx_info)
306  {
307  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
308  
309  	/* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
310  	 * need special handling and cannot be transmitted as regular control
311  	 * packets.
312  	 */
313  	if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
314  		return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
315  							  mlxsw_sp_port, skb,
316  							  tx_info);
317  
318  	if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
319  		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
320  		dev_kfree_skb_any(skb);
321  		return -ENOMEM;
322  	}
323  
324  	mlxsw_sp_txhdr_construct(skb, tx_info);
325  	return 0;
326  }
327  
mlxsw_sp_stp_spms_state(u8 state)328  enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
329  {
330  	switch (state) {
331  	case BR_STATE_FORWARDING:
332  		return MLXSW_REG_SPMS_STATE_FORWARDING;
333  	case BR_STATE_LEARNING:
334  		return MLXSW_REG_SPMS_STATE_LEARNING;
335  	case BR_STATE_LISTENING:
336  	case BR_STATE_DISABLED:
337  	case BR_STATE_BLOCKING:
338  		return MLXSW_REG_SPMS_STATE_DISCARDING;
339  	default:
340  		BUG();
341  	}
342  }
343  
mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u8 state)344  int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
345  			      u8 state)
346  {
347  	enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
348  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
349  	char *spms_pl;
350  	int err;
351  
352  	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
353  	if (!spms_pl)
354  		return -ENOMEM;
355  	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
356  	mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
357  
358  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
359  	kfree(spms_pl);
360  	return err;
361  }
362  
mlxsw_sp_base_mac_get(struct mlxsw_sp * mlxsw_sp)363  static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
364  {
365  	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
366  	int err;
367  
368  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
369  	if (err)
370  		return err;
371  	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
372  	return 0;
373  }
374  
mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_up)375  int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
376  				   bool is_up)
377  {
378  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
379  	char paos_pl[MLXSW_REG_PAOS_LEN];
380  
381  	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
382  			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
383  			    MLXSW_PORT_ADMIN_STATUS_DOWN);
384  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
385  }
386  
mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port * mlxsw_sp_port,const unsigned char * addr)387  static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
388  				      const unsigned char *addr)
389  {
390  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391  	char ppad_pl[MLXSW_REG_PPAD_LEN];
392  
393  	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
394  	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
395  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
396  }
397  
mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port * mlxsw_sp_port)398  static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
399  {
400  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401  
402  	eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
403  			mlxsw_sp_port->local_port);
404  	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
405  					  mlxsw_sp_port->dev->dev_addr);
406  }
407  
mlxsw_sp_port_mtu_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mtu)408  static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
409  {
410  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
411  	char pmtu_pl[MLXSW_REG_PMTU_LEN];
412  
413  	mtu += MLXSW_PORT_ETH_FRAME_HDR;
414  
415  	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
416  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
417  }
418  
mlxsw_sp_port_swid_set(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 swid)419  static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
420  				  u16 local_port, u8 swid)
421  {
422  	char pspa_pl[MLXSW_REG_PSPA_LEN];
423  
424  	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
425  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
426  }
427  
mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)428  int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
429  {
430  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
431  	char svpe_pl[MLXSW_REG_SVPE_LEN];
432  
433  	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
434  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
435  }
436  
mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool learn_enable)437  int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
438  				   bool learn_enable)
439  {
440  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
441  	char *spvmlr_pl;
442  	int err;
443  
444  	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
445  	if (!spvmlr_pl)
446  		return -ENOMEM;
447  	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
448  			      learn_enable);
449  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
450  	kfree(spvmlr_pl);
451  	return err;
452  }
453  
mlxsw_sp_port_security_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)454  int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
455  {
456  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
457  	char spfsr_pl[MLXSW_REG_SPFSR_LEN];
458  	int err;
459  
460  	if (mlxsw_sp_port->security == enable)
461  		return 0;
462  
463  	mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
464  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
465  	if (err)
466  		return err;
467  
468  	mlxsw_sp_port->security = enable;
469  	return 0;
470  }
471  
mlxsw_sp_ethtype_to_sver_type(u16 ethtype,u8 * p_sver_type)472  int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
473  {
474  	switch (ethtype) {
475  	case ETH_P_8021Q:
476  		*p_sver_type = 0;
477  		break;
478  	case ETH_P_8021AD:
479  		*p_sver_type = 1;
480  		break;
481  	default:
482  		return -EINVAL;
483  	}
484  
485  	return 0;
486  }
487  
mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 ethtype)488  int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
489  				     u16 ethtype)
490  {
491  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
492  	char spevet_pl[MLXSW_REG_SPEVET_LEN];
493  	u8 sver_type;
494  	int err;
495  
496  	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
497  	if (err)
498  		return err;
499  
500  	mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
501  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
502  }
503  
__mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u16 ethtype)504  static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
505  				    u16 vid, u16 ethtype)
506  {
507  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
508  	char spvid_pl[MLXSW_REG_SPVID_LEN];
509  	u8 sver_type;
510  	int err;
511  
512  	err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
513  	if (err)
514  		return err;
515  
516  	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
517  			     sver_type);
518  
519  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
520  }
521  
mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port * mlxsw_sp_port,bool allow)522  static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
523  					    bool allow)
524  {
525  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
526  	char spaft_pl[MLXSW_REG_SPAFT_LEN];
527  
528  	mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
529  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
530  }
531  
mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u16 ethtype)532  int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
533  			   u16 ethtype)
534  {
535  	int err;
536  
537  	if (!vid) {
538  		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
539  		if (err)
540  			return err;
541  	} else {
542  		err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
543  		if (err)
544  			return err;
545  		err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
546  		if (err)
547  			goto err_port_allow_untagged_set;
548  	}
549  
550  	mlxsw_sp_port->pvid = vid;
551  	return 0;
552  
553  err_port_allow_untagged_set:
554  	__mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
555  	return err;
556  }
557  
558  static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port * mlxsw_sp_port)559  mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
560  {
561  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562  	char sspr_pl[MLXSW_REG_SSPR_LEN];
563  
564  	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
565  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
566  }
567  
568  static int
mlxsw_sp_port_module_info_parse(struct mlxsw_sp * mlxsw_sp,u16 local_port,char * pmlp_pl,struct mlxsw_sp_port_mapping * port_mapping)569  mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
570  				u16 local_port, char *pmlp_pl,
571  				struct mlxsw_sp_port_mapping *port_mapping)
572  {
573  	bool separate_rxtx;
574  	u8 first_lane;
575  	u8 slot_index;
576  	u8 module;
577  	u8 width;
578  	int i;
579  
580  	module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
581  	slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
582  	width = mlxsw_reg_pmlp_width_get(pmlp_pl);
583  	separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
584  	first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
585  
586  	if (width && !is_power_of_2(width)) {
587  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
588  			local_port);
589  		return -EINVAL;
590  	}
591  
592  	for (i = 0; i < width; i++) {
593  		if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
594  			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
595  				local_port);
596  			return -EINVAL;
597  		}
598  		if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
599  			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
600  				local_port);
601  			return -EINVAL;
602  		}
603  		if (separate_rxtx &&
604  		    mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
605  		    mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
606  			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
607  				local_port);
608  			return -EINVAL;
609  		}
610  		if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
611  			dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
612  				local_port);
613  			return -EINVAL;
614  		}
615  	}
616  
617  	port_mapping->module = module;
618  	port_mapping->slot_index = slot_index;
619  	port_mapping->width = width;
620  	port_mapping->module_width = width;
621  	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
622  	return 0;
623  }
624  
625  static int
mlxsw_sp_port_module_info_get(struct mlxsw_sp * mlxsw_sp,u16 local_port,struct mlxsw_sp_port_mapping * port_mapping)626  mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
627  			      struct mlxsw_sp_port_mapping *port_mapping)
628  {
629  	char pmlp_pl[MLXSW_REG_PMLP_LEN];
630  	int err;
631  
632  	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
633  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
634  	if (err)
635  		return err;
636  	return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
637  					       pmlp_pl, port_mapping);
638  }
639  
640  static int
mlxsw_sp_port_module_map(struct mlxsw_sp * mlxsw_sp,u16 local_port,const struct mlxsw_sp_port_mapping * port_mapping)641  mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642  			 const struct mlxsw_sp_port_mapping *port_mapping)
643  {
644  	char pmlp_pl[MLXSW_REG_PMLP_LEN];
645  	int i, err;
646  
647  	mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
648  				  port_mapping->module);
649  
650  	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
651  	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
652  	for (i = 0; i < port_mapping->width; i++) {
653  		mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
654  					      port_mapping->slot_index);
655  		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
656  		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
657  	}
658  
659  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
660  	if (err)
661  		goto err_pmlp_write;
662  	return 0;
663  
664  err_pmlp_write:
665  	mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
666  				    port_mapping->module);
667  	return err;
668  }
669  
mlxsw_sp_port_module_unmap(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 slot_index,u8 module)670  static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
671  				       u8 slot_index, u8 module)
672  {
673  	char pmlp_pl[MLXSW_REG_PMLP_LEN];
674  
675  	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
676  	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
677  	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
678  	mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
679  }
680  
mlxsw_sp_port_open(struct net_device * dev)681  static int mlxsw_sp_port_open(struct net_device *dev)
682  {
683  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
684  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
685  	int err;
686  
687  	err = mlxsw_env_module_port_up(mlxsw_sp->core,
688  				       mlxsw_sp_port->mapping.slot_index,
689  				       mlxsw_sp_port->mapping.module);
690  	if (err)
691  		return err;
692  	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
693  	if (err)
694  		goto err_port_admin_status_set;
695  	netif_start_queue(dev);
696  	return 0;
697  
698  err_port_admin_status_set:
699  	mlxsw_env_module_port_down(mlxsw_sp->core,
700  				   mlxsw_sp_port->mapping.slot_index,
701  				   mlxsw_sp_port->mapping.module);
702  	return err;
703  }
704  
mlxsw_sp_port_stop(struct net_device * dev)705  static int mlxsw_sp_port_stop(struct net_device *dev)
706  {
707  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
708  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
709  
710  	netif_stop_queue(dev);
711  	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
712  	mlxsw_env_module_port_down(mlxsw_sp->core,
713  				   mlxsw_sp_port->mapping.slot_index,
714  				   mlxsw_sp_port->mapping.module);
715  	return 0;
716  }
717  
mlxsw_sp_port_xmit(struct sk_buff * skb,struct net_device * dev)718  static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
719  				      struct net_device *dev)
720  {
721  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
723  	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
724  	const struct mlxsw_tx_info tx_info = {
725  		.local_port = mlxsw_sp_port->local_port,
726  		.is_emad = false,
727  	};
728  	u64 len;
729  	int err;
730  
731  	memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
732  
733  	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
734  		return NETDEV_TX_BUSY;
735  
736  	if (eth_skb_pad(skb)) {
737  		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
738  		return NETDEV_TX_OK;
739  	}
740  
741  	err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
742  				    &tx_info);
743  	if (err)
744  		return NETDEV_TX_OK;
745  
746  	/* TX header is consumed by HW on the way so we shouldn't count its
747  	 * bytes as being sent.
748  	 */
749  	len = skb->len - MLXSW_TXHDR_LEN;
750  
751  	/* Due to a race we might fail here because of a full queue. In that
752  	 * unlikely case we simply drop the packet.
753  	 */
754  	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
755  
756  	if (!err) {
757  		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
758  		u64_stats_update_begin(&pcpu_stats->syncp);
759  		pcpu_stats->tx_packets++;
760  		pcpu_stats->tx_bytes += len;
761  		u64_stats_update_end(&pcpu_stats->syncp);
762  	} else {
763  		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
764  		dev_kfree_skb_any(skb);
765  	}
766  	return NETDEV_TX_OK;
767  }
768  
mlxsw_sp_set_rx_mode(struct net_device * dev)769  static void mlxsw_sp_set_rx_mode(struct net_device *dev)
770  {
771  }
772  
mlxsw_sp_port_set_mac_address(struct net_device * dev,void * p)773  static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
774  {
775  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
776  	struct sockaddr *addr = p;
777  	int err;
778  
779  	if (!is_valid_ether_addr(addr->sa_data))
780  		return -EADDRNOTAVAIL;
781  
782  	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
783  	if (err)
784  		return err;
785  	eth_hw_addr_set(dev, addr->sa_data);
786  	return 0;
787  }
788  
mlxsw_sp_port_change_mtu(struct net_device * dev,int mtu)789  static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
790  {
791  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
792  	struct mlxsw_sp_hdroom orig_hdroom;
793  	struct mlxsw_sp_hdroom hdroom;
794  	int err;
795  
796  	orig_hdroom = *mlxsw_sp_port->hdroom;
797  
798  	hdroom = orig_hdroom;
799  	hdroom.mtu = mtu;
800  	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
801  
802  	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
803  	if (err) {
804  		netdev_err(dev, "Failed to configure port's headroom\n");
805  		return err;
806  	}
807  
808  	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
809  	if (err)
810  		goto err_port_mtu_set;
811  	WRITE_ONCE(dev->mtu, mtu);
812  	return 0;
813  
814  err_port_mtu_set:
815  	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
816  	return err;
817  }
818  
819  static int
mlxsw_sp_port_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)820  mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
821  			     struct rtnl_link_stats64 *stats)
822  {
823  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
824  	struct mlxsw_sp_port_pcpu_stats *p;
825  	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
826  	u32 tx_dropped = 0;
827  	unsigned int start;
828  	int i;
829  
830  	for_each_possible_cpu(i) {
831  		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
832  		do {
833  			start = u64_stats_fetch_begin(&p->syncp);
834  			rx_packets	= p->rx_packets;
835  			rx_bytes	= p->rx_bytes;
836  			tx_packets	= p->tx_packets;
837  			tx_bytes	= p->tx_bytes;
838  		} while (u64_stats_fetch_retry(&p->syncp, start));
839  
840  		stats->rx_packets	+= rx_packets;
841  		stats->rx_bytes		+= rx_bytes;
842  		stats->tx_packets	+= tx_packets;
843  		stats->tx_bytes		+= tx_bytes;
844  		/* tx_dropped is u32, updated without syncp protection. */
845  		tx_dropped	+= p->tx_dropped;
846  	}
847  	stats->tx_dropped	= tx_dropped;
848  	return 0;
849  }
850  
mlxsw_sp_port_has_offload_stats(const struct net_device * dev,int attr_id)851  static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
852  {
853  	switch (attr_id) {
854  	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
855  		return true;
856  	}
857  
858  	return false;
859  }
860  
mlxsw_sp_port_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)861  static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
862  					   void *sp)
863  {
864  	switch (attr_id) {
865  	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
866  		return mlxsw_sp_port_get_sw_stats64(dev, sp);
867  	}
868  
869  	return -EINVAL;
870  }
871  
mlxsw_sp_port_get_stats_raw(struct net_device * dev,int grp,int prio,char * ppcnt_pl)872  int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
873  				int prio, char *ppcnt_pl)
874  {
875  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
876  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
877  
878  	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
879  	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
880  }
881  
mlxsw_sp_port_get_hw_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)882  static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
883  				      struct rtnl_link_stats64 *stats)
884  {
885  	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
886  	int err;
887  
888  	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
889  					  0, ppcnt_pl);
890  	if (err)
891  		goto out;
892  
893  	stats->tx_packets =
894  		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
895  	stats->rx_packets =
896  		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
897  	stats->tx_bytes =
898  		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
899  	stats->rx_bytes =
900  		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
901  	stats->multicast =
902  		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
903  
904  	stats->rx_crc_errors =
905  		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
906  	stats->rx_frame_errors =
907  		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
908  
909  	stats->rx_length_errors = (
910  		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
911  		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
912  		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
913  
914  	stats->rx_errors = (stats->rx_crc_errors +
915  		stats->rx_frame_errors + stats->rx_length_errors);
916  
917  out:
918  	return err;
919  }
920  
921  static void
mlxsw_sp_port_get_hw_xstats(struct net_device * dev,struct mlxsw_sp_port_xstats * xstats)922  mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
923  			    struct mlxsw_sp_port_xstats *xstats)
924  {
925  	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
926  	int err, i;
927  
928  	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
929  					  ppcnt_pl);
930  	if (!err)
931  		xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
932  
933  	for (i = 0; i < TC_MAX_QUEUE; i++) {
934  		err = mlxsw_sp_port_get_stats_raw(dev,
935  						  MLXSW_REG_PPCNT_TC_CONG_CNT,
936  						  i, ppcnt_pl);
937  		if (err)
938  			goto tc_cnt;
939  
940  		xstats->wred_drop[i] =
941  			mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
942  		xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
943  
944  tc_cnt:
945  		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
946  						  i, ppcnt_pl);
947  		if (err)
948  			continue;
949  
950  		xstats->backlog[i] =
951  			mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
952  		xstats->tail_drop[i] =
953  			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
954  	}
955  
956  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
957  		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
958  						  i, ppcnt_pl);
959  		if (err)
960  			continue;
961  
962  		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
963  		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
964  	}
965  }
966  
update_stats_cache(struct work_struct * work)967  static void update_stats_cache(struct work_struct *work)
968  {
969  	struct mlxsw_sp_port *mlxsw_sp_port =
970  		container_of(work, struct mlxsw_sp_port,
971  			     periodic_hw_stats.update_dw.work);
972  
973  	if (!netif_carrier_ok(mlxsw_sp_port->dev))
974  		/* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
975  		 * necessary when port goes down.
976  		 */
977  		goto out;
978  
979  	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
980  				   &mlxsw_sp_port->periodic_hw_stats.stats);
981  	mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
982  				    &mlxsw_sp_port->periodic_hw_stats.xstats);
983  
984  out:
985  	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
986  			       MLXSW_HW_STATS_UPDATE_TIME);
987  }
988  
989  /* Return the stats from a cache that is updated periodically,
990   * as this function might get called in an atomic context.
991   */
992  static void
mlxsw_sp_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)993  mlxsw_sp_port_get_stats64(struct net_device *dev,
994  			  struct rtnl_link_stats64 *stats)
995  {
996  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
997  
998  	memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
999  }
1000  
__mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1001  static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1002  				    u16 vid_begin, u16 vid_end,
1003  				    bool is_member, bool untagged)
1004  {
1005  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1006  	char *spvm_pl;
1007  	int err;
1008  
1009  	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1010  	if (!spvm_pl)
1011  		return -ENOMEM;
1012  
1013  	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
1014  			    vid_end, is_member, untagged);
1015  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1016  	kfree(spvm_pl);
1017  	return err;
1018  }
1019  
mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1020  int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1021  			   u16 vid_end, bool is_member, bool untagged)
1022  {
1023  	u16 vid, vid_e;
1024  	int err;
1025  
1026  	for (vid = vid_begin; vid <= vid_end;
1027  	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1028  		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1029  			    vid_end);
1030  
1031  		err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1032  					       is_member, untagged);
1033  		if (err)
1034  			return err;
1035  	}
1036  
1037  	return 0;
1038  }
1039  
mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port * mlxsw_sp_port,bool flush_default)1040  static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1041  				     bool flush_default)
1042  {
1043  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1044  
1045  	list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1046  				 &mlxsw_sp_port->vlans_list, list) {
1047  		if (!flush_default &&
1048  		    mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1049  			continue;
1050  		mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1051  	}
1052  }
1053  
1054  static void
mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1055  mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1056  {
1057  	if (mlxsw_sp_port_vlan->bridge_port)
1058  		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1059  	else if (mlxsw_sp_port_vlan->fid)
1060  		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1061  }
1062  
1063  struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1064  mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1065  {
1066  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1067  	bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1068  	int err;
1069  
1070  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1071  	if (mlxsw_sp_port_vlan)
1072  		return ERR_PTR(-EEXIST);
1073  
1074  	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1075  	if (err)
1076  		return ERR_PTR(err);
1077  
1078  	mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1079  	if (!mlxsw_sp_port_vlan) {
1080  		err = -ENOMEM;
1081  		goto err_port_vlan_alloc;
1082  	}
1083  
1084  	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1085  	mlxsw_sp_port_vlan->vid = vid;
1086  	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1087  
1088  	return mlxsw_sp_port_vlan;
1089  
1090  err_port_vlan_alloc:
1091  	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1092  	return ERR_PTR(err);
1093  }
1094  
mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1095  void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1096  {
1097  	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1098  	u16 vid = mlxsw_sp_port_vlan->vid;
1099  
1100  	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1101  	list_del(&mlxsw_sp_port_vlan->list);
1102  	kfree(mlxsw_sp_port_vlan);
1103  	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1104  }
1105  
mlxsw_sp_port_add_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1106  static int mlxsw_sp_port_add_vid(struct net_device *dev,
1107  				 __be16 __always_unused proto, u16 vid)
1108  {
1109  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1110  
1111  	/* VLAN 0 is added to HW filter when device goes up, but it is
1112  	 * reserved in our case, so simply return.
1113  	 */
1114  	if (!vid)
1115  		return 0;
1116  
1117  	return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1118  }
1119  
mlxsw_sp_port_kill_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1120  int mlxsw_sp_port_kill_vid(struct net_device *dev,
1121  			   __be16 __always_unused proto, u16 vid)
1122  {
1123  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1124  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1125  
1126  	/* VLAN 0 is removed from HW filter when device goes down, but
1127  	 * it is reserved in our case, so simply return.
1128  	 */
1129  	if (!vid)
1130  		return 0;
1131  
1132  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1133  	if (!mlxsw_sp_port_vlan)
1134  		return 0;
1135  	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1136  
1137  	return 0;
1138  }
1139  
mlxsw_sp_setup_tc_block(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)1140  static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1141  				   struct flow_block_offload *f)
1142  {
1143  	switch (f->binder_type) {
1144  	case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1145  		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1146  	case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1147  		return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1148  	case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1149  		return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1150  	case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1151  		return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1152  	default:
1153  		return -EOPNOTSUPP;
1154  	}
1155  }
1156  
mlxsw_sp_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1157  static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1158  			     void *type_data)
1159  {
1160  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1161  
1162  	switch (type) {
1163  	case TC_SETUP_BLOCK:
1164  		return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1165  	case TC_SETUP_QDISC_RED:
1166  		return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1167  	case TC_SETUP_QDISC_PRIO:
1168  		return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1169  	case TC_SETUP_QDISC_ETS:
1170  		return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1171  	case TC_SETUP_QDISC_TBF:
1172  		return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1173  	case TC_SETUP_QDISC_FIFO:
1174  		return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1175  	default:
1176  		return -EOPNOTSUPP;
1177  	}
1178  }
1179  
mlxsw_sp_feature_hw_tc(struct net_device * dev,bool enable)1180  static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1181  {
1182  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1183  
1184  	if (!enable) {
1185  		if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1186  		    mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1187  			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1188  			return -EINVAL;
1189  		}
1190  		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1191  		mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1192  	} else {
1193  		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1194  		mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1195  	}
1196  	return 0;
1197  }
1198  
mlxsw_sp_feature_loopback(struct net_device * dev,bool enable)1199  static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1200  {
1201  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1202  	char pplr_pl[MLXSW_REG_PPLR_LEN];
1203  	int err;
1204  
1205  	if (netif_running(dev))
1206  		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1207  
1208  	mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1209  	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1210  			      pplr_pl);
1211  
1212  	if (netif_running(dev))
1213  		mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1214  
1215  	return err;
1216  }
1217  
1218  typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1219  
mlxsw_sp_handle_feature(struct net_device * dev,netdev_features_t wanted_features,netdev_features_t feature,mlxsw_sp_feature_handler feature_handler)1220  static int mlxsw_sp_handle_feature(struct net_device *dev,
1221  				   netdev_features_t wanted_features,
1222  				   netdev_features_t feature,
1223  				   mlxsw_sp_feature_handler feature_handler)
1224  {
1225  	netdev_features_t changes = wanted_features ^ dev->features;
1226  	bool enable = !!(wanted_features & feature);
1227  	int err;
1228  
1229  	if (!(changes & feature))
1230  		return 0;
1231  
1232  	err = feature_handler(dev, enable);
1233  	if (err) {
1234  		netdev_err(dev, "%s feature %pNF failed, err %d\n",
1235  			   enable ? "Enable" : "Disable", &feature, err);
1236  		return err;
1237  	}
1238  
1239  	if (enable)
1240  		dev->features |= feature;
1241  	else
1242  		dev->features &= ~feature;
1243  
1244  	return 0;
1245  }
mlxsw_sp_set_features(struct net_device * dev,netdev_features_t features)1246  static int mlxsw_sp_set_features(struct net_device *dev,
1247  				 netdev_features_t features)
1248  {
1249  	netdev_features_t oper_features = dev->features;
1250  	int err = 0;
1251  
1252  	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1253  				       mlxsw_sp_feature_hw_tc);
1254  	err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1255  				       mlxsw_sp_feature_loopback);
1256  
1257  	if (err) {
1258  		dev->features = oper_features;
1259  		return -EINVAL;
1260  	}
1261  
1262  	return 0;
1263  }
1264  
mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct ifreq * ifr)1265  static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1266  				      struct ifreq *ifr)
1267  {
1268  	struct hwtstamp_config config;
1269  	int err;
1270  
1271  	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1272  		return -EFAULT;
1273  
1274  	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1275  							     &config);
1276  	if (err)
1277  		return err;
1278  
1279  	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1280  		return -EFAULT;
1281  
1282  	return 0;
1283  }
1284  
mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port * mlxsw_sp_port,struct ifreq * ifr)1285  static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1286  				      struct ifreq *ifr)
1287  {
1288  	struct hwtstamp_config config;
1289  	int err;
1290  
1291  	err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1292  							     &config);
1293  	if (err)
1294  		return err;
1295  
1296  	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1297  		return -EFAULT;
1298  
1299  	return 0;
1300  }
1301  
mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port * mlxsw_sp_port)1302  static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1303  {
1304  	struct hwtstamp_config config = {0};
1305  
1306  	mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1307  }
1308  
1309  static int
mlxsw_sp_port_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1310  mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1311  {
1312  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1313  
1314  	switch (cmd) {
1315  	case SIOCSHWTSTAMP:
1316  		return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1317  	case SIOCGHWTSTAMP:
1318  		return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1319  	default:
1320  		return -EOPNOTSUPP;
1321  	}
1322  }
1323  
1324  static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1325  	.ndo_open		= mlxsw_sp_port_open,
1326  	.ndo_stop		= mlxsw_sp_port_stop,
1327  	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1328  	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1329  	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1330  	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
1331  	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
1332  	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1333  	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
1334  	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1335  	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
1336  	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
1337  	.ndo_set_features	= mlxsw_sp_set_features,
1338  	.ndo_eth_ioctl		= mlxsw_sp_port_ioctl,
1339  };
1340  
1341  static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port * mlxsw_sp_port)1342  mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1343  {
1344  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1345  	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1346  	const struct mlxsw_sp_port_type_speed_ops *ops;
1347  	char ptys_pl[MLXSW_REG_PTYS_LEN];
1348  	u32 eth_proto_cap_masked;
1349  	int err;
1350  
1351  	ops = mlxsw_sp->port_type_speed_ops;
1352  
1353  	/* Set advertised speeds to speeds supported by both the driver
1354  	 * and the device.
1355  	 */
1356  	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1357  			       0, false);
1358  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1359  	if (err)
1360  		return err;
1361  
1362  	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1363  				 &eth_proto_admin, &eth_proto_oper);
1364  	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1365  	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1366  			       eth_proto_cap_masked,
1367  			       mlxsw_sp_port->link.autoneg);
1368  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1369  }
1370  
mlxsw_sp_port_speed_get(struct mlxsw_sp_port * mlxsw_sp_port,u32 * speed)1371  int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1372  {
1373  	const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1374  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1375  	char ptys_pl[MLXSW_REG_PTYS_LEN];
1376  	u32 eth_proto_oper;
1377  	int err;
1378  
1379  	port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1380  	port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1381  					       mlxsw_sp_port->local_port, 0,
1382  					       false);
1383  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1384  	if (err)
1385  		return err;
1386  	port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1387  						 &eth_proto_oper);
1388  	*speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1389  	return 0;
1390  }
1391  
mlxsw_sp_port_ets_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,bool dwrr,u8 dwrr_weight)1392  int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1393  			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1394  			  bool dwrr, u8 dwrr_weight)
1395  {
1396  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1397  	char qeec_pl[MLXSW_REG_QEEC_LEN];
1398  
1399  	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1400  			    next_index);
1401  	mlxsw_reg_qeec_de_set(qeec_pl, true);
1402  	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1403  	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1404  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1405  }
1406  
mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 maxrate,u8 burst_size)1407  int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408  				  enum mlxsw_reg_qeec_hr hr, u8 index,
1409  				  u8 next_index, u32 maxrate, u8 burst_size)
1410  {
1411  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412  	char qeec_pl[MLXSW_REG_QEEC_LEN];
1413  
1414  	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1415  			    next_index);
1416  	mlxsw_reg_qeec_mase_set(qeec_pl, true);
1417  	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1418  	mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1419  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1420  }
1421  
mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 minrate)1422  static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423  				    enum mlxsw_reg_qeec_hr hr, u8 index,
1424  				    u8 next_index, u32 minrate)
1425  {
1426  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427  	char qeec_pl[MLXSW_REG_QEEC_LEN];
1428  
1429  	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1430  			    next_index);
1431  	mlxsw_reg_qeec_mise_set(qeec_pl, true);
1432  	mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1433  
1434  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1435  }
1436  
mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 switch_prio,u8 tclass)1437  int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438  			      u8 switch_prio, u8 tclass)
1439  {
1440  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1441  	char qtct_pl[MLXSW_REG_QTCT_LEN];
1442  
1443  	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1444  			    tclass);
1445  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1446  }
1447  
mlxsw_sp_port_ets_init(struct mlxsw_sp_port * mlxsw_sp_port)1448  static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1449  {
1450  	int err, i;
1451  
1452  	/* Setup the elements hierarcy, so that each TC is linked to
1453  	 * one subgroup, which are all member in the same group.
1454  	 */
1455  	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1456  				    MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1457  	if (err)
1458  		return err;
1459  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1460  		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1461  					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
1462  					    0, false, 0);
1463  		if (err)
1464  			return err;
1465  	}
1466  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1467  		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1468  					    MLXSW_REG_QEEC_HR_TC, i, i,
1469  					    false, 0);
1470  		if (err)
1471  			return err;
1472  
1473  		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1474  					    MLXSW_REG_QEEC_HR_TC,
1475  					    i + 8, i,
1476  					    true, 100);
1477  		if (err)
1478  			return err;
1479  	}
1480  
1481  	/* Make sure the max shaper is disabled in all hierarchies that support
1482  	 * it. Note that this disables ptps (PTP shaper), but that is intended
1483  	 * for the initial configuration.
1484  	 */
1485  	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1486  					    MLXSW_REG_QEEC_HR_PORT, 0, 0,
1487  					    MLXSW_REG_QEEC_MAS_DIS, 0);
1488  	if (err)
1489  		return err;
1490  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1491  		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1492  						    MLXSW_REG_QEEC_HR_SUBGROUP,
1493  						    i, 0,
1494  						    MLXSW_REG_QEEC_MAS_DIS, 0);
1495  		if (err)
1496  			return err;
1497  	}
1498  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1499  		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1500  						    MLXSW_REG_QEEC_HR_TC,
1501  						    i, i,
1502  						    MLXSW_REG_QEEC_MAS_DIS, 0);
1503  		if (err)
1504  			return err;
1505  
1506  		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507  						    MLXSW_REG_QEEC_HR_TC,
1508  						    i + 8, i,
1509  						    MLXSW_REG_QEEC_MAS_DIS, 0);
1510  		if (err)
1511  			return err;
1512  	}
1513  
1514  	/* Configure the min shaper for multicast TCs. */
1515  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1516  		err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1517  					       MLXSW_REG_QEEC_HR_TC,
1518  					       i + 8, i,
1519  					       MLXSW_REG_QEEC_MIS_MIN);
1520  		if (err)
1521  			return err;
1522  	}
1523  
1524  	/* Map all priorities to traffic class 0. */
1525  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1526  		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1527  		if (err)
1528  			return err;
1529  	}
1530  
1531  	return 0;
1532  }
1533  
mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)1534  static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1535  					bool enable)
1536  {
1537  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1538  	char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1539  
1540  	mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1541  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1542  }
1543  
mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port * mlxsw_sp_port)1544  static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1545  {
1546  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1547  	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1548  	u8 module = mlxsw_sp_port->mapping.module;
1549  	u64 overheat_counter;
1550  	int err;
1551  
1552  	err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1553  						    module, &overheat_counter);
1554  	if (err)
1555  		return err;
1556  
1557  	mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1558  	return 0;
1559  }
1560  
1561  int
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_8021ad_tagged,bool is_8021q_tagged)1562  mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1563  				      bool is_8021ad_tagged,
1564  				      bool is_8021q_tagged)
1565  {
1566  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1567  	char spvc_pl[MLXSW_REG_SPVC_LEN];
1568  
1569  	mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1570  			    is_8021ad_tagged, is_8021q_tagged);
1571  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1572  }
1573  
mlxsw_sp_port_label_info_get(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 * port_number,u8 * split_port_subnumber,u8 * slot_index)1574  static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1575  					u16 local_port, u8 *port_number,
1576  					u8 *split_port_subnumber,
1577  					u8 *slot_index)
1578  {
1579  	char pllp_pl[MLXSW_REG_PLLP_LEN];
1580  	int err;
1581  
1582  	mlxsw_reg_pllp_pack(pllp_pl, local_port);
1583  	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1584  	if (err)
1585  		return err;
1586  	mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1587  			      split_port_subnumber, slot_index);
1588  	return 0;
1589  }
1590  
mlxsw_sp_port_create(struct mlxsw_sp * mlxsw_sp,u16 local_port,bool split,struct mlxsw_sp_port_mapping * port_mapping)1591  static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1592  				bool split,
1593  				struct mlxsw_sp_port_mapping *port_mapping)
1594  {
1595  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1596  	struct mlxsw_sp_port *mlxsw_sp_port;
1597  	u32 lanes = port_mapping->width;
1598  	u8 split_port_subnumber;
1599  	struct net_device *dev;
1600  	u8 port_number;
1601  	u8 slot_index;
1602  	bool splittable;
1603  	int err;
1604  
1605  	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1606  	if (err) {
1607  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1608  			local_port);
1609  		return err;
1610  	}
1611  
1612  	err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1613  	if (err) {
1614  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1615  			local_port);
1616  		goto err_port_swid_set;
1617  	}
1618  
1619  	err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1620  					   &split_port_subnumber, &slot_index);
1621  	if (err) {
1622  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1623  			local_port);
1624  		goto err_port_label_info_get;
1625  	}
1626  
1627  	splittable = lanes > 1 && !split;
1628  	err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1629  				   port_number, split, split_port_subnumber,
1630  				   splittable, lanes, mlxsw_sp->base_mac,
1631  				   sizeof(mlxsw_sp->base_mac));
1632  	if (err) {
1633  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1634  			local_port);
1635  		goto err_core_port_init;
1636  	}
1637  
1638  	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1639  	if (!dev) {
1640  		err = -ENOMEM;
1641  		goto err_alloc_etherdev;
1642  	}
1643  	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1644  	dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1645  	mlxsw_sp_port = netdev_priv(dev);
1646  	mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1647  				    mlxsw_sp_port, dev);
1648  	mlxsw_sp_port->dev = dev;
1649  	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1650  	mlxsw_sp_port->local_port = local_port;
1651  	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1652  	mlxsw_sp_port->split = split;
1653  	mlxsw_sp_port->mapping = *port_mapping;
1654  	mlxsw_sp_port->link.autoneg = 1;
1655  	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1656  
1657  	mlxsw_sp_port->pcpu_stats =
1658  		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1659  	if (!mlxsw_sp_port->pcpu_stats) {
1660  		err = -ENOMEM;
1661  		goto err_alloc_stats;
1662  	}
1663  
1664  	INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1665  			  &update_stats_cache);
1666  
1667  	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1668  	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1669  
1670  	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1671  	if (err) {
1672  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1673  			mlxsw_sp_port->local_port);
1674  		goto err_dev_addr_init;
1675  	}
1676  
1677  	netif_carrier_off(dev);
1678  
1679  	dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
1680  			 NETIF_F_HW_TC;
1681  	dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1682  	dev->lltx = true;
1683  	dev->netns_local = true;
1684  
1685  	dev->min_mtu = ETH_MIN_MTU;
1686  	dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
1687  
1688  	/* Each packet needs to have a Tx header (metadata) on top all other
1689  	 * headers.
1690  	 */
1691  	dev->needed_headroom = MLXSW_TXHDR_LEN;
1692  
1693  	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1694  	if (err) {
1695  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1696  			mlxsw_sp_port->local_port);
1697  		goto err_port_system_port_mapping_set;
1698  	}
1699  
1700  	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1701  	if (err) {
1702  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1703  			mlxsw_sp_port->local_port);
1704  		goto err_port_speed_by_width_set;
1705  	}
1706  
1707  	err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1708  							    &mlxsw_sp_port->max_speed);
1709  	if (err) {
1710  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1711  			mlxsw_sp_port->local_port);
1712  		goto err_max_speed_get;
1713  	}
1714  
1715  	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1716  	if (err) {
1717  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1718  			mlxsw_sp_port->local_port);
1719  		goto err_port_mtu_set;
1720  	}
1721  
1722  	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1723  	if (err)
1724  		goto err_port_admin_status_set;
1725  
1726  	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1727  	if (err) {
1728  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1729  			mlxsw_sp_port->local_port);
1730  		goto err_port_buffers_init;
1731  	}
1732  
1733  	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1734  	if (err) {
1735  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1736  			mlxsw_sp_port->local_port);
1737  		goto err_port_ets_init;
1738  	}
1739  
1740  	err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1741  	if (err) {
1742  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1743  			mlxsw_sp_port->local_port);
1744  		goto err_port_tc_mc_mode;
1745  	}
1746  
1747  	/* ETS and buffers must be initialized before DCB. */
1748  	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1749  	if (err) {
1750  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1751  			mlxsw_sp_port->local_port);
1752  		goto err_port_dcb_init;
1753  	}
1754  
1755  	err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1756  	if (err) {
1757  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1758  			mlxsw_sp_port->local_port);
1759  		goto err_port_fids_init;
1760  	}
1761  
1762  	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1763  	if (err) {
1764  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1765  			mlxsw_sp_port->local_port);
1766  		goto err_port_qdiscs_init;
1767  	}
1768  
1769  	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1770  				     false);
1771  	if (err) {
1772  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1773  			mlxsw_sp_port->local_port);
1774  		goto err_port_vlan_clear;
1775  	}
1776  
1777  	err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1778  	if (err) {
1779  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1780  			mlxsw_sp_port->local_port);
1781  		goto err_port_nve_init;
1782  	}
1783  
1784  	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1785  				     ETH_P_8021Q);
1786  	if (err) {
1787  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1788  			mlxsw_sp_port->local_port);
1789  		goto err_port_pvid_set;
1790  	}
1791  
1792  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1793  						       MLXSW_SP_DEFAULT_VID);
1794  	if (IS_ERR(mlxsw_sp_port_vlan)) {
1795  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1796  			mlxsw_sp_port->local_port);
1797  		err = PTR_ERR(mlxsw_sp_port_vlan);
1798  		goto err_port_vlan_create;
1799  	}
1800  	mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1801  
1802  	/* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1803  	 * only packets with 802.1q header as tagged packets.
1804  	 */
1805  	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1806  	if (err) {
1807  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1808  			local_port);
1809  		goto err_port_vlan_classification_set;
1810  	}
1811  
1812  	INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1813  			  mlxsw_sp->ptp_ops->shaper_work);
1814  
1815  	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1816  
1817  	err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1818  	if (err) {
1819  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1820  			mlxsw_sp_port->local_port);
1821  		goto err_port_overheat_init_val_set;
1822  	}
1823  
1824  	err = register_netdev(dev);
1825  	if (err) {
1826  		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1827  			mlxsw_sp_port->local_port);
1828  		goto err_register_netdev;
1829  	}
1830  
1831  	mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1832  	return 0;
1833  
1834  err_register_netdev:
1835  err_port_overheat_init_val_set:
1836  	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1837  err_port_vlan_classification_set:
1838  	mlxsw_sp->ports[local_port] = NULL;
1839  	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1840  err_port_vlan_create:
1841  err_port_pvid_set:
1842  	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1843  err_port_nve_init:
1844  err_port_vlan_clear:
1845  	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1846  err_port_qdiscs_init:
1847  	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1848  err_port_fids_init:
1849  	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1850  err_port_dcb_init:
1851  	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1852  err_port_tc_mc_mode:
1853  err_port_ets_init:
1854  	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1855  err_port_buffers_init:
1856  err_port_admin_status_set:
1857  err_port_mtu_set:
1858  err_max_speed_get:
1859  err_port_speed_by_width_set:
1860  err_port_system_port_mapping_set:
1861  err_dev_addr_init:
1862  	free_percpu(mlxsw_sp_port->pcpu_stats);
1863  err_alloc_stats:
1864  	free_netdev(dev);
1865  err_alloc_etherdev:
1866  	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1867  err_core_port_init:
1868  err_port_label_info_get:
1869  	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1870  			       MLXSW_PORT_SWID_DISABLED_PORT);
1871  err_port_swid_set:
1872  	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1873  				   port_mapping->slot_index,
1874  				   port_mapping->module);
1875  	return err;
1876  }
1877  
mlxsw_sp_port_remove(struct mlxsw_sp * mlxsw_sp,u16 local_port)1878  static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1879  {
1880  	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1881  	u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1882  	u8 module = mlxsw_sp_port->mapping.module;
1883  
1884  	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1885  	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1886  	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1887  	mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1888  	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1889  	mlxsw_sp->ports[local_port] = NULL;
1890  	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1891  	mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1892  	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1893  	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1894  	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1895  	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1896  	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1897  	free_percpu(mlxsw_sp_port->pcpu_stats);
1898  	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1899  	free_netdev(mlxsw_sp_port->dev);
1900  	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1901  	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1902  			       MLXSW_PORT_SWID_DISABLED_PORT);
1903  	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1904  }
1905  
mlxsw_sp_cpu_port_create(struct mlxsw_sp * mlxsw_sp)1906  static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1907  {
1908  	struct mlxsw_sp_port *mlxsw_sp_port;
1909  	int err;
1910  
1911  	mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1912  	if (!mlxsw_sp_port)
1913  		return -ENOMEM;
1914  
1915  	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1916  	mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1917  
1918  	err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1919  				       mlxsw_sp_port,
1920  				       mlxsw_sp->base_mac,
1921  				       sizeof(mlxsw_sp->base_mac));
1922  	if (err) {
1923  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1924  		goto err_core_cpu_port_init;
1925  	}
1926  
1927  	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1928  	return 0;
1929  
1930  err_core_cpu_port_init:
1931  	kfree(mlxsw_sp_port);
1932  	return err;
1933  }
1934  
mlxsw_sp_cpu_port_remove(struct mlxsw_sp * mlxsw_sp)1935  static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1936  {
1937  	struct mlxsw_sp_port *mlxsw_sp_port =
1938  				mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1939  
1940  	mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1941  	mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1942  	kfree(mlxsw_sp_port);
1943  }
1944  
mlxsw_sp_local_port_valid(u16 local_port)1945  static bool mlxsw_sp_local_port_valid(u16 local_port)
1946  {
1947  	return local_port != MLXSW_PORT_CPU_PORT;
1948  }
1949  
mlxsw_sp_port_created(struct mlxsw_sp * mlxsw_sp,u16 local_port)1950  static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1951  {
1952  	if (!mlxsw_sp_local_port_valid(local_port))
1953  		return false;
1954  	return mlxsw_sp->ports[local_port] != NULL;
1955  }
1956  
mlxsw_sp_port_mapping_event_set(struct mlxsw_sp * mlxsw_sp,u16 local_port,bool enable)1957  static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1958  					   u16 local_port, bool enable)
1959  {
1960  	char pmecr_pl[MLXSW_REG_PMECR_LEN];
1961  
1962  	mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1963  			     enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1964  				      MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1965  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1966  }
1967  
1968  struct mlxsw_sp_port_mapping_event {
1969  	struct list_head list;
1970  	char pmlp_pl[MLXSW_REG_PMLP_LEN];
1971  };
1972  
mlxsw_sp_port_mapping_events_work(struct work_struct * work)1973  static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1974  {
1975  	struct mlxsw_sp_port_mapping_event *event, *next_event;
1976  	struct mlxsw_sp_port_mapping_events *events;
1977  	struct mlxsw_sp_port_mapping port_mapping;
1978  	struct mlxsw_sp *mlxsw_sp;
1979  	struct devlink *devlink;
1980  	LIST_HEAD(event_queue);
1981  	u16 local_port;
1982  	int err;
1983  
1984  	events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
1985  	mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
1986  	devlink = priv_to_devlink(mlxsw_sp->core);
1987  
1988  	spin_lock_bh(&events->queue_lock);
1989  	list_splice_init(&events->queue, &event_queue);
1990  	spin_unlock_bh(&events->queue_lock);
1991  
1992  	list_for_each_entry_safe(event, next_event, &event_queue, list) {
1993  		local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
1994  		err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
1995  						      event->pmlp_pl, &port_mapping);
1996  		if (err)
1997  			goto out;
1998  
1999  		if (WARN_ON_ONCE(!port_mapping.width))
2000  			goto out;
2001  
2002  		devl_lock(devlink);
2003  
2004  		if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2005  			mlxsw_sp_port_create(mlxsw_sp, local_port,
2006  					     false, &port_mapping);
2007  		else
2008  			WARN_ON_ONCE(1);
2009  
2010  		devl_unlock(devlink);
2011  
2012  		mlxsw_sp->port_mapping[local_port] = port_mapping;
2013  
2014  out:
2015  		kfree(event);
2016  	}
2017  }
2018  
2019  static void
mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info * reg,char * pmlp_pl,void * priv)2020  mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2021  				    char *pmlp_pl, void *priv)
2022  {
2023  	struct mlxsw_sp_port_mapping_events *events;
2024  	struct mlxsw_sp_port_mapping_event *event;
2025  	struct mlxsw_sp *mlxsw_sp = priv;
2026  	u16 local_port;
2027  
2028  	local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2029  	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2030  		return;
2031  
2032  	events = &mlxsw_sp->port_mapping_events;
2033  	event = kmalloc(sizeof(*event), GFP_ATOMIC);
2034  	if (!event)
2035  		return;
2036  	memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2037  	spin_lock(&events->queue_lock);
2038  	list_add_tail(&event->list, &events->queue);
2039  	spin_unlock(&events->queue_lock);
2040  	mlxsw_core_schedule_work(&events->work);
2041  }
2042  
2043  static void
__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp * mlxsw_sp)2044  __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2045  {
2046  	struct mlxsw_sp_port_mapping_event *event, *next_event;
2047  	struct mlxsw_sp_port_mapping_events *events;
2048  
2049  	events = &mlxsw_sp->port_mapping_events;
2050  
2051  	/* Caller needs to make sure that no new event is going to appear. */
2052  	cancel_work_sync(&events->work);
2053  	list_for_each_entry_safe(event, next_event, &events->queue, list) {
2054  		list_del(&event->list);
2055  		kfree(event);
2056  	}
2057  }
2058  
mlxsw_sp_ports_remove(struct mlxsw_sp * mlxsw_sp)2059  static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2060  {
2061  	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2062  	int i;
2063  
2064  	for (i = 1; i < max_ports; i++)
2065  		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2066  	/* Make sure all scheduled events are processed */
2067  	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2068  
2069  	for (i = 1; i < max_ports; i++)
2070  		if (mlxsw_sp_port_created(mlxsw_sp, i))
2071  			mlxsw_sp_port_remove(mlxsw_sp, i);
2072  	mlxsw_sp_cpu_port_remove(mlxsw_sp);
2073  	kfree(mlxsw_sp->ports);
2074  	mlxsw_sp->ports = NULL;
2075  }
2076  
2077  static void
mlxsw_sp_ports_remove_selected(struct mlxsw_core * mlxsw_core,bool (* selector)(void * priv,u16 local_port),void * priv)2078  mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2079  			       bool (*selector)(void *priv, u16 local_port),
2080  			       void *priv)
2081  {
2082  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2083  	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2084  	int i;
2085  
2086  	for (i = 1; i < max_ports; i++)
2087  		if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2088  			mlxsw_sp_port_remove(mlxsw_sp, i);
2089  }
2090  
mlxsw_sp_ports_create(struct mlxsw_sp * mlxsw_sp)2091  static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2092  {
2093  	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2094  	struct mlxsw_sp_port_mapping_events *events;
2095  	struct mlxsw_sp_port_mapping *port_mapping;
2096  	size_t alloc_size;
2097  	int i;
2098  	int err;
2099  
2100  	alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2101  	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2102  	if (!mlxsw_sp->ports)
2103  		return -ENOMEM;
2104  
2105  	events = &mlxsw_sp->port_mapping_events;
2106  	INIT_LIST_HEAD(&events->queue);
2107  	spin_lock_init(&events->queue_lock);
2108  	INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2109  
2110  	for (i = 1; i < max_ports; i++) {
2111  		err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2112  		if (err)
2113  			goto err_event_enable;
2114  	}
2115  
2116  	err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2117  	if (err)
2118  		goto err_cpu_port_create;
2119  
2120  	for (i = 1; i < max_ports; i++) {
2121  		port_mapping = &mlxsw_sp->port_mapping[i];
2122  		if (!port_mapping->width)
2123  			continue;
2124  		err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2125  		if (err)
2126  			goto err_port_create;
2127  	}
2128  	return 0;
2129  
2130  err_port_create:
2131  	for (i--; i >= 1; i--)
2132  		if (mlxsw_sp_port_created(mlxsw_sp, i))
2133  			mlxsw_sp_port_remove(mlxsw_sp, i);
2134  	i = max_ports;
2135  	mlxsw_sp_cpu_port_remove(mlxsw_sp);
2136  err_cpu_port_create:
2137  err_event_enable:
2138  	for (i--; i >= 1; i--)
2139  		mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2140  	/* Make sure all scheduled events are processed */
2141  	__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2142  	kfree(mlxsw_sp->ports);
2143  	mlxsw_sp->ports = NULL;
2144  	return err;
2145  }
2146  
mlxsw_sp_port_module_info_init(struct mlxsw_sp * mlxsw_sp)2147  static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2148  {
2149  	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2150  	struct mlxsw_sp_port_mapping *port_mapping;
2151  	int i;
2152  	int err;
2153  
2154  	mlxsw_sp->port_mapping = kcalloc(max_ports,
2155  					 sizeof(struct mlxsw_sp_port_mapping),
2156  					 GFP_KERNEL);
2157  	if (!mlxsw_sp->port_mapping)
2158  		return -ENOMEM;
2159  
2160  	for (i = 1; i < max_ports; i++) {
2161  		port_mapping = &mlxsw_sp->port_mapping[i];
2162  		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2163  		if (err)
2164  			goto err_port_module_info_get;
2165  	}
2166  	return 0;
2167  
2168  err_port_module_info_get:
2169  	kfree(mlxsw_sp->port_mapping);
2170  	return err;
2171  }
2172  
mlxsw_sp_port_module_info_fini(struct mlxsw_sp * mlxsw_sp)2173  static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2174  {
2175  	kfree(mlxsw_sp->port_mapping);
2176  }
2177  
2178  static int
mlxsw_sp_port_split_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port_mapping * port_mapping,unsigned int count,const char * pmtdb_pl)2179  mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2180  			   struct mlxsw_sp_port_mapping *port_mapping,
2181  			   unsigned int count, const char *pmtdb_pl)
2182  {
2183  	struct mlxsw_sp_port_mapping split_port_mapping;
2184  	int err, i;
2185  
2186  	split_port_mapping = *port_mapping;
2187  	split_port_mapping.width /= count;
2188  	for (i = 0; i < count; i++) {
2189  		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2190  
2191  		if (!mlxsw_sp_local_port_valid(s_local_port))
2192  			continue;
2193  
2194  		err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2195  					   true, &split_port_mapping);
2196  		if (err)
2197  			goto err_port_create;
2198  		split_port_mapping.lane += split_port_mapping.width;
2199  	}
2200  
2201  	return 0;
2202  
2203  err_port_create:
2204  	for (i--; i >= 0; i--) {
2205  		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2206  
2207  		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2208  			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2209  	}
2210  	return err;
2211  }
2212  
mlxsw_sp_port_unsplit_create(struct mlxsw_sp * mlxsw_sp,unsigned int count,const char * pmtdb_pl)2213  static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2214  					 unsigned int count,
2215  					 const char *pmtdb_pl)
2216  {
2217  	struct mlxsw_sp_port_mapping *port_mapping;
2218  	int i;
2219  
2220  	/* Go over original unsplit ports in the gap and recreate them. */
2221  	for (i = 0; i < count; i++) {
2222  		u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2223  
2224  		port_mapping = &mlxsw_sp->port_mapping[local_port];
2225  		if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2226  			continue;
2227  		mlxsw_sp_port_create(mlxsw_sp, local_port,
2228  				     false, port_mapping);
2229  	}
2230  }
2231  
2232  static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp * mlxsw_sp,u16 local_port)2233  mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2234  {
2235  	if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2236  		return mlxsw_sp->ports[local_port];
2237  	return NULL;
2238  }
2239  
mlxsw_sp_port_split(struct mlxsw_core * mlxsw_core,u16 local_port,unsigned int count,struct netlink_ext_ack * extack)2240  static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2241  			       unsigned int count,
2242  			       struct netlink_ext_ack *extack)
2243  {
2244  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2245  	struct mlxsw_sp_port_mapping port_mapping;
2246  	struct mlxsw_sp_port *mlxsw_sp_port;
2247  	enum mlxsw_reg_pmtdb_status status;
2248  	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2249  	int i;
2250  	int err;
2251  
2252  	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2253  	if (!mlxsw_sp_port) {
2254  		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2255  			local_port);
2256  		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2257  		return -EINVAL;
2258  	}
2259  
2260  	if (mlxsw_sp_port->split) {
2261  		NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2262  		return -EINVAL;
2263  	}
2264  
2265  	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2266  			     mlxsw_sp_port->mapping.module,
2267  			     mlxsw_sp_port->mapping.module_width / count,
2268  			     count);
2269  	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2270  	if (err) {
2271  		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2272  		return err;
2273  	}
2274  
2275  	status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2276  	if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2277  		NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2278  		return -EINVAL;
2279  	}
2280  
2281  	port_mapping = mlxsw_sp_port->mapping;
2282  
2283  	for (i = 0; i < count; i++) {
2284  		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2285  
2286  		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2287  			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2288  	}
2289  
2290  	err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2291  					 count, pmtdb_pl);
2292  	if (err) {
2293  		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2294  		goto err_port_split_create;
2295  	}
2296  
2297  	return 0;
2298  
2299  err_port_split_create:
2300  	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2301  
2302  	return err;
2303  }
2304  
mlxsw_sp_port_unsplit(struct mlxsw_core * mlxsw_core,u16 local_port,struct netlink_ext_ack * extack)2305  static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2306  				 struct netlink_ext_ack *extack)
2307  {
2308  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2309  	struct mlxsw_sp_port *mlxsw_sp_port;
2310  	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2311  	unsigned int count;
2312  	int i;
2313  	int err;
2314  
2315  	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2316  	if (!mlxsw_sp_port) {
2317  		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2318  			local_port);
2319  		NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2320  		return -EINVAL;
2321  	}
2322  
2323  	if (!mlxsw_sp_port->split) {
2324  		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2325  		return -EINVAL;
2326  	}
2327  
2328  	count = mlxsw_sp_port->mapping.module_width /
2329  		mlxsw_sp_port->mapping.width;
2330  
2331  	mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2332  			     mlxsw_sp_port->mapping.module,
2333  			     mlxsw_sp_port->mapping.module_width / count,
2334  			     count);
2335  	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2336  	if (err) {
2337  		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2338  		return err;
2339  	}
2340  
2341  	for (i = 0; i < count; i++) {
2342  		u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2343  
2344  		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2345  			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2346  	}
2347  
2348  	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2349  
2350  	return 0;
2351  }
2352  
2353  static void
mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port * mlxsw_sp_port)2354  mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2355  {
2356  	int i;
2357  
2358  	for (i = 0; i < TC_MAX_QUEUE; i++)
2359  		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2360  }
2361  
mlxsw_sp_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)2362  static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2363  				     char *pude_pl, void *priv)
2364  {
2365  	struct mlxsw_sp *mlxsw_sp = priv;
2366  	struct mlxsw_sp_port *mlxsw_sp_port;
2367  	enum mlxsw_reg_pude_oper_status status;
2368  	u16 local_port;
2369  
2370  	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2371  
2372  	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2373  		return;
2374  	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2375  	if (!mlxsw_sp_port)
2376  		return;
2377  
2378  	status = mlxsw_reg_pude_oper_status_get(pude_pl);
2379  	if (status == MLXSW_PORT_OPER_STATUS_UP) {
2380  		netdev_info(mlxsw_sp_port->dev, "link up\n");
2381  		netif_carrier_on(mlxsw_sp_port->dev);
2382  		mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2383  	} else {
2384  		netdev_info(mlxsw_sp_port->dev, "link down\n");
2385  		netif_carrier_off(mlxsw_sp_port->dev);
2386  		mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2387  	}
2388  }
2389  
mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp * mlxsw_sp,char * mtpptr_pl,bool ingress)2390  static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2391  					  char *mtpptr_pl, bool ingress)
2392  {
2393  	u16 local_port;
2394  	u8 num_rec;
2395  	int i;
2396  
2397  	local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2398  	num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2399  	for (i = 0; i < num_rec; i++) {
2400  		u8 domain_number;
2401  		u8 message_type;
2402  		u16 sequence_id;
2403  		u64 timestamp;
2404  
2405  		mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2406  					&domain_number, &sequence_id,
2407  					&timestamp);
2408  		mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2409  					    message_type, domain_number,
2410  					    sequence_id, timestamp);
2411  	}
2412  }
2413  
mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info * reg,char * mtpptr_pl,void * priv)2414  static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2415  					      char *mtpptr_pl, void *priv)
2416  {
2417  	struct mlxsw_sp *mlxsw_sp = priv;
2418  
2419  	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2420  }
2421  
mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info * reg,char * mtpptr_pl,void * priv)2422  static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2423  					      char *mtpptr_pl, void *priv)
2424  {
2425  	struct mlxsw_sp *mlxsw_sp = priv;
2426  
2427  	mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2428  }
2429  
mlxsw_sp_rx_listener_no_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2430  void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2431  				       u16 local_port, void *priv)
2432  {
2433  	struct mlxsw_sp *mlxsw_sp = priv;
2434  	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2435  	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2436  
2437  	if (unlikely(!mlxsw_sp_port)) {
2438  		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2439  				     local_port);
2440  		return;
2441  	}
2442  
2443  	skb->dev = mlxsw_sp_port->dev;
2444  
2445  	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2446  	u64_stats_update_begin(&pcpu_stats->syncp);
2447  	pcpu_stats->rx_packets++;
2448  	pcpu_stats->rx_bytes += skb->len;
2449  	u64_stats_update_end(&pcpu_stats->syncp);
2450  
2451  	skb->protocol = eth_type_trans(skb, skb->dev);
2452  	netif_receive_skb(skb);
2453  }
2454  
mlxsw_sp_rx_listener_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2455  static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2456  					   void *priv)
2457  {
2458  	skb->offload_fwd_mark = 1;
2459  	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2460  }
2461  
mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2462  static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2463  					      u16 local_port, void *priv)
2464  {
2465  	skb->offload_l3_fwd_mark = 1;
2466  	skb->offload_fwd_mark = 1;
2467  	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2468  }
2469  
mlxsw_sp_ptp_receive(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)2470  void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2471  			  u16 local_port)
2472  {
2473  	mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2474  }
2475  
2476  #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2477  	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2478  		  _is_ctrl, SP_##_trap_group, DISCARD)
2479  
2480  #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2481  	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2482  		_is_ctrl, SP_##_trap_group, DISCARD)
2483  
2484  #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2485  	MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action,	\
2486  		_is_ctrl, SP_##_trap_group, DISCARD)
2487  
2488  #define MLXSW_SP_EVENTL(_func, _trap_id)		\
2489  	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2490  
2491  static const struct mlxsw_listener mlxsw_sp_listener[] = {
2492  	/* Events */
2493  	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2494  	/* L2 traps */
2495  	MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2496  	/* L3 traps */
2497  	MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2498  			  false),
2499  	MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2500  	MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2501  			  false),
2502  	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2503  			     ROUTER_EXP, false),
2504  	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2505  			     ROUTER_EXP, false),
2506  	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2507  			     ROUTER_EXP, false),
2508  	MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2509  			     ROUTER_EXP, false),
2510  	/* Multicast Router Traps */
2511  	MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2512  	MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2513  	/* NVE traps */
2514  	MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2515  };
2516  
2517  static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2518  	/* Events */
2519  	MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2520  	MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2521  };
2522  
2523  static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2524  	/* Events */
2525  	MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2526  };
2527  
mlxsw_sp_cpu_policers_set(struct mlxsw_core * mlxsw_core)2528  static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2529  {
2530  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2531  	char qpcr_pl[MLXSW_REG_QPCR_LEN];
2532  	enum mlxsw_reg_qpcr_ir_units ir_units;
2533  	int max_cpu_policers;
2534  	bool is_bytes;
2535  	u8 burst_size;
2536  	u32 rate;
2537  	int i, err;
2538  
2539  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2540  		return -EIO;
2541  
2542  	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2543  
2544  	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2545  	for (i = 0; i < max_cpu_policers; i++) {
2546  		is_bytes = false;
2547  		switch (i) {
2548  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2549  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2550  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2551  			rate = 1024;
2552  			burst_size = 7;
2553  			break;
2554  		default:
2555  			continue;
2556  		}
2557  
2558  		__set_bit(i, mlxsw_sp->trap->policers_usage);
2559  		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2560  				    burst_size);
2561  		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2562  		if (err)
2563  			return err;
2564  	}
2565  
2566  	return 0;
2567  }
2568  
mlxsw_sp_trap_groups_set(struct mlxsw_core * mlxsw_core)2569  static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2570  {
2571  	char htgt_pl[MLXSW_REG_HTGT_LEN];
2572  	enum mlxsw_reg_htgt_trap_group i;
2573  	int max_cpu_policers;
2574  	int max_trap_groups;
2575  	u8 priority, tc;
2576  	u16 policer_id;
2577  	int err;
2578  
2579  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2580  		return -EIO;
2581  
2582  	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2583  	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2584  
2585  	for (i = 0; i < max_trap_groups; i++) {
2586  		policer_id = i;
2587  		switch (i) {
2588  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2589  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2590  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2591  			priority = 1;
2592  			tc = 1;
2593  			break;
2594  		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2595  			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2596  			tc = MLXSW_REG_HTGT_DEFAULT_TC;
2597  			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2598  			break;
2599  		default:
2600  			continue;
2601  		}
2602  
2603  		if (max_cpu_policers <= policer_id &&
2604  		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2605  			return -EIO;
2606  
2607  		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2608  		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2609  		if (err)
2610  			return err;
2611  	}
2612  
2613  	return 0;
2614  }
2615  
mlxsw_sp_traps_init(struct mlxsw_sp * mlxsw_sp)2616  static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2617  {
2618  	struct mlxsw_sp_trap *trap;
2619  	u64 max_policers;
2620  	int err;
2621  
2622  	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2623  		return -EIO;
2624  	max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2625  	trap = kzalloc(struct_size(trap, policers_usage,
2626  				   BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2627  	if (!trap)
2628  		return -ENOMEM;
2629  	trap->max_policers = max_policers;
2630  	mlxsw_sp->trap = trap;
2631  
2632  	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2633  	if (err)
2634  		goto err_cpu_policers_set;
2635  
2636  	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2637  	if (err)
2638  		goto err_trap_groups_set;
2639  
2640  	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2641  					ARRAY_SIZE(mlxsw_sp_listener),
2642  					mlxsw_sp);
2643  	if (err)
2644  		goto err_traps_register;
2645  
2646  	err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2647  					mlxsw_sp->listeners_count, mlxsw_sp);
2648  	if (err)
2649  		goto err_extra_traps_init;
2650  
2651  	return 0;
2652  
2653  err_extra_traps_init:
2654  	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2655  				    ARRAY_SIZE(mlxsw_sp_listener),
2656  				    mlxsw_sp);
2657  err_traps_register:
2658  err_trap_groups_set:
2659  err_cpu_policers_set:
2660  	kfree(trap);
2661  	return err;
2662  }
2663  
mlxsw_sp_traps_fini(struct mlxsw_sp * mlxsw_sp)2664  static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2665  {
2666  	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2667  				    mlxsw_sp->listeners_count,
2668  				    mlxsw_sp);
2669  	mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2670  				    ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2671  	kfree(mlxsw_sp->trap);
2672  }
2673  
mlxsw_sp_lag_pgt_init(struct mlxsw_sp * mlxsw_sp)2674  static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2675  {
2676  	char sgcr_pl[MLXSW_REG_SGCR_LEN];
2677  	int err;
2678  
2679  	if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2680  	    MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2681  		return 0;
2682  
2683  	/* In DDD mode, which we by default use, each LAG entry is 8 PGT
2684  	 * entries. The LAG table address needs to be 8-aligned, but that ought
2685  	 * to be the case, since the LAG table is allocated first.
2686  	 */
2687  	err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2688  					   mlxsw_sp->max_lag * 8);
2689  	if (err)
2690  		return err;
2691  	if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2692  		err = -EINVAL;
2693  		goto err_mid_alloc_range;
2694  	}
2695  
2696  	mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2697  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2698  	if (err)
2699  		goto err_mid_alloc_range;
2700  
2701  	return 0;
2702  
2703  err_mid_alloc_range:
2704  	mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2705  				    mlxsw_sp->max_lag * 8);
2706  	return err;
2707  }
2708  
mlxsw_sp_lag_pgt_fini(struct mlxsw_sp * mlxsw_sp)2709  static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2710  {
2711  	if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2712  	    MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2713  		return;
2714  
2715  	mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2716  				    mlxsw_sp->max_lag * 8);
2717  }
2718  
2719  #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2720  
2721  struct mlxsw_sp_lag {
2722  	struct net_device *dev;
2723  	refcount_t ref_count;
2724  	u16 lag_id;
2725  };
2726  
mlxsw_sp_lag_init(struct mlxsw_sp * mlxsw_sp)2727  static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2728  {
2729  	char slcr_pl[MLXSW_REG_SLCR_LEN];
2730  	u32 seed;
2731  	int err;
2732  
2733  	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2734  		     MLXSW_SP_LAG_SEED_INIT);
2735  	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2736  				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
2737  				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2738  				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
2739  				     MLXSW_REG_SLCR_LAG_HASH_SIP |
2740  				     MLXSW_REG_SLCR_LAG_HASH_DIP |
2741  				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
2742  				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
2743  				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2744  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2745  	if (err)
2746  		return err;
2747  
2748  	err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
2749  	if (err)
2750  		return err;
2751  
2752  	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2753  		return -EIO;
2754  
2755  	err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2756  	if (err)
2757  		return err;
2758  
2759  	mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
2760  				 GFP_KERNEL);
2761  	if (!mlxsw_sp->lags) {
2762  		err = -ENOMEM;
2763  		goto err_kcalloc;
2764  	}
2765  
2766  	return 0;
2767  
2768  err_kcalloc:
2769  	mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2770  	return err;
2771  }
2772  
mlxsw_sp_lag_fini(struct mlxsw_sp * mlxsw_sp)2773  static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2774  {
2775  	mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2776  	kfree(mlxsw_sp->lags);
2777  }
2778  
2779  static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2780  	.clock_init	= mlxsw_sp1_ptp_clock_init,
2781  	.clock_fini	= mlxsw_sp1_ptp_clock_fini,
2782  	.init		= mlxsw_sp1_ptp_init,
2783  	.fini		= mlxsw_sp1_ptp_fini,
2784  	.receive	= mlxsw_sp1_ptp_receive,
2785  	.transmitted	= mlxsw_sp1_ptp_transmitted,
2786  	.hwtstamp_get	= mlxsw_sp1_ptp_hwtstamp_get,
2787  	.hwtstamp_set	= mlxsw_sp1_ptp_hwtstamp_set,
2788  	.shaper_work	= mlxsw_sp1_ptp_shaper_work,
2789  #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2790  	.get_ts_info	= mlxsw_sp1_ptp_get_ts_info,
2791  #endif
2792  	.get_stats_count = mlxsw_sp1_get_stats_count,
2793  	.get_stats_strings = mlxsw_sp1_get_stats_strings,
2794  	.get_stats	= mlxsw_sp1_get_stats,
2795  	.txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2796  };
2797  
2798  static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2799  	.clock_init	= mlxsw_sp2_ptp_clock_init,
2800  	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2801  	.init		= mlxsw_sp2_ptp_init,
2802  	.fini		= mlxsw_sp2_ptp_fini,
2803  	.receive	= mlxsw_sp2_ptp_receive,
2804  	.transmitted	= mlxsw_sp2_ptp_transmitted,
2805  	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2806  	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2807  	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2808  #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2809  	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2810  #endif
2811  	.get_stats_count = mlxsw_sp2_get_stats_count,
2812  	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2813  	.get_stats	= mlxsw_sp2_get_stats,
2814  	.txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2815  };
2816  
2817  static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2818  	.clock_init	= mlxsw_sp2_ptp_clock_init,
2819  	.clock_fini	= mlxsw_sp2_ptp_clock_fini,
2820  	.init		= mlxsw_sp2_ptp_init,
2821  	.fini		= mlxsw_sp2_ptp_fini,
2822  	.receive	= mlxsw_sp2_ptp_receive,
2823  	.transmitted	= mlxsw_sp2_ptp_transmitted,
2824  	.hwtstamp_get	= mlxsw_sp2_ptp_hwtstamp_get,
2825  	.hwtstamp_set	= mlxsw_sp2_ptp_hwtstamp_set,
2826  	.shaper_work	= mlxsw_sp2_ptp_shaper_work,
2827  #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2828  	.get_ts_info	= mlxsw_sp2_ptp_get_ts_info,
2829  #endif
2830  	.get_stats_count = mlxsw_sp2_get_stats_count,
2831  	.get_stats_strings = mlxsw_sp2_get_stats_strings,
2832  	.get_stats	= mlxsw_sp2_get_stats,
2833  	.txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2834  };
2835  
2836  struct mlxsw_sp_sample_trigger_node {
2837  	struct mlxsw_sp_sample_trigger trigger;
2838  	struct mlxsw_sp_sample_params params;
2839  	struct rhash_head ht_node;
2840  	struct rcu_head rcu;
2841  	refcount_t refcount;
2842  };
2843  
2844  static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2845  	.key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2846  	.head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2847  	.key_len = sizeof(struct mlxsw_sp_sample_trigger),
2848  	.automatic_shrinking = true,
2849  };
2850  
2851  static void
mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger * key,const struct mlxsw_sp_sample_trigger * trigger)2852  mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2853  				 const struct mlxsw_sp_sample_trigger *trigger)
2854  {
2855  	memset(key, 0, sizeof(*key));
2856  	key->type = trigger->type;
2857  	key->local_port = trigger->local_port;
2858  }
2859  
2860  /* RCU read lock must be held */
2861  struct mlxsw_sp_sample_params *
mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger)2862  mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2863  				      const struct mlxsw_sp_sample_trigger *trigger)
2864  {
2865  	struct mlxsw_sp_sample_trigger_node *trigger_node;
2866  	struct mlxsw_sp_sample_trigger key;
2867  
2868  	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2869  	trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2870  					 mlxsw_sp_sample_trigger_ht_params);
2871  	if (!trigger_node)
2872  		return NULL;
2873  
2874  	return &trigger_node->params;
2875  }
2876  
2877  static int
mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger,const struct mlxsw_sp_sample_params * params)2878  mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2879  				  const struct mlxsw_sp_sample_trigger *trigger,
2880  				  const struct mlxsw_sp_sample_params *params)
2881  {
2882  	struct mlxsw_sp_sample_trigger_node *trigger_node;
2883  	int err;
2884  
2885  	trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2886  	if (!trigger_node)
2887  		return -ENOMEM;
2888  
2889  	trigger_node->trigger = *trigger;
2890  	trigger_node->params = *params;
2891  	refcount_set(&trigger_node->refcount, 1);
2892  
2893  	err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2894  				     &trigger_node->ht_node,
2895  				     mlxsw_sp_sample_trigger_ht_params);
2896  	if (err)
2897  		goto err_rhashtable_insert;
2898  
2899  	return 0;
2900  
2901  err_rhashtable_insert:
2902  	kfree(trigger_node);
2903  	return err;
2904  }
2905  
2906  static void
mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_sample_trigger_node * trigger_node)2907  mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2908  				  struct mlxsw_sp_sample_trigger_node *trigger_node)
2909  {
2910  	rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2911  			       &trigger_node->ht_node,
2912  			       mlxsw_sp_sample_trigger_ht_params);
2913  	kfree_rcu(trigger_node, rcu);
2914  }
2915  
2916  int
mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger,const struct mlxsw_sp_sample_params * params,struct netlink_ext_ack * extack)2917  mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2918  				   const struct mlxsw_sp_sample_trigger *trigger,
2919  				   const struct mlxsw_sp_sample_params *params,
2920  				   struct netlink_ext_ack *extack)
2921  {
2922  	struct mlxsw_sp_sample_trigger_node *trigger_node;
2923  	struct mlxsw_sp_sample_trigger key;
2924  
2925  	ASSERT_RTNL();
2926  
2927  	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2928  
2929  	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2930  					      &key,
2931  					      mlxsw_sp_sample_trigger_ht_params);
2932  	if (!trigger_node)
2933  		return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2934  							 params);
2935  
2936  	if (trigger_node->trigger.local_port) {
2937  		NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2938  		return -EINVAL;
2939  	}
2940  
2941  	if (trigger_node->params.psample_group != params->psample_group ||
2942  	    trigger_node->params.truncate != params->truncate ||
2943  	    trigger_node->params.rate != params->rate ||
2944  	    trigger_node->params.trunc_size != params->trunc_size) {
2945  		NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2946  		return -EINVAL;
2947  	}
2948  
2949  	refcount_inc(&trigger_node->refcount);
2950  
2951  	return 0;
2952  }
2953  
2954  void
mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger)2955  mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2956  				     const struct mlxsw_sp_sample_trigger *trigger)
2957  {
2958  	struct mlxsw_sp_sample_trigger_node *trigger_node;
2959  	struct mlxsw_sp_sample_trigger key;
2960  
2961  	ASSERT_RTNL();
2962  
2963  	mlxsw_sp_sample_trigger_key_init(&key, trigger);
2964  
2965  	trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2966  					      &key,
2967  					      mlxsw_sp_sample_trigger_ht_params);
2968  	if (!trigger_node)
2969  		return;
2970  
2971  	if (!refcount_dec_and_test(&trigger_node->refcount))
2972  		return;
2973  
2974  	mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2975  }
2976  
2977  static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2978  				    unsigned long event, void *ptr);
2979  
2980  #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2981  #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2982  #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2983  
mlxsw_sp_parsing_init(struct mlxsw_sp * mlxsw_sp)2984  static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2985  {
2986  	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
2987  	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2988  	mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2989  	mutex_init(&mlxsw_sp->parsing.lock);
2990  }
2991  
mlxsw_sp_parsing_fini(struct mlxsw_sp * mlxsw_sp)2992  static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2993  {
2994  	mutex_destroy(&mlxsw_sp->parsing.lock);
2995  	WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
2996  }
2997  
2998  struct mlxsw_sp_ipv6_addr_node {
2999  	struct in6_addr key;
3000  	struct rhash_head ht_node;
3001  	u32 kvdl_index;
3002  	refcount_t refcount;
3003  };
3004  
3005  static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3006  	.key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3007  	.head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3008  	.key_len = sizeof(struct in6_addr),
3009  	.automatic_shrinking = true,
3010  };
3011  
3012  static int
mlxsw_sp_ipv6_addr_init(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6,u32 * p_kvdl_index)3013  mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3014  			u32 *p_kvdl_index)
3015  {
3016  	struct mlxsw_sp_ipv6_addr_node *node;
3017  	char rips_pl[MLXSW_REG_RIPS_LEN];
3018  	int err;
3019  
3020  	err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3021  				  MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3022  				  p_kvdl_index);
3023  	if (err)
3024  		return err;
3025  
3026  	mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
3027  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
3028  	if (err)
3029  		goto err_rips_write;
3030  
3031  	node = kzalloc(sizeof(*node), GFP_KERNEL);
3032  	if (!node) {
3033  		err = -ENOMEM;
3034  		goto err_node_alloc;
3035  	}
3036  
3037  	node->key = *addr6;
3038  	node->kvdl_index = *p_kvdl_index;
3039  	refcount_set(&node->refcount, 1);
3040  
3041  	err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
3042  				     &node->ht_node,
3043  				     mlxsw_sp_ipv6_addr_ht_params);
3044  	if (err)
3045  		goto err_rhashtable_insert;
3046  
3047  	return 0;
3048  
3049  err_rhashtable_insert:
3050  	kfree(node);
3051  err_node_alloc:
3052  err_rips_write:
3053  	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3054  			   *p_kvdl_index);
3055  	return err;
3056  }
3057  
mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipv6_addr_node * node)3058  static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3059  				    struct mlxsw_sp_ipv6_addr_node *node)
3060  {
3061  	u32 kvdl_index = node->kvdl_index;
3062  
3063  	rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3064  			       mlxsw_sp_ipv6_addr_ht_params);
3065  	kfree(node);
3066  	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3067  			   kvdl_index);
3068  }
3069  
mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6,u32 * p_kvdl_index)3070  int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3071  				      const struct in6_addr *addr6,
3072  				      u32 *p_kvdl_index)
3073  {
3074  	struct mlxsw_sp_ipv6_addr_node *node;
3075  	int err = 0;
3076  
3077  	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3078  	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3079  				      mlxsw_sp_ipv6_addr_ht_params);
3080  	if (node) {
3081  		refcount_inc(&node->refcount);
3082  		*p_kvdl_index = node->kvdl_index;
3083  		goto out_unlock;
3084  	}
3085  
3086  	err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3087  
3088  out_unlock:
3089  	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3090  	return err;
3091  }
3092  
3093  void
mlxsw_sp_ipv6_addr_put(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6)3094  mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3095  {
3096  	struct mlxsw_sp_ipv6_addr_node *node;
3097  
3098  	mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3099  	node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3100  				      mlxsw_sp_ipv6_addr_ht_params);
3101  	if (WARN_ON(!node))
3102  		goto out_unlock;
3103  
3104  	if (!refcount_dec_and_test(&node->refcount))
3105  		goto out_unlock;
3106  
3107  	mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3108  
3109  out_unlock:
3110  	mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3111  }
3112  
mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp * mlxsw_sp)3113  static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3114  {
3115  	int err;
3116  
3117  	err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3118  			      &mlxsw_sp_ipv6_addr_ht_params);
3119  	if (err)
3120  		return err;
3121  
3122  	mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3123  	return 0;
3124  }
3125  
mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp * mlxsw_sp)3126  static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3127  {
3128  	mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3129  	rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3130  }
3131  
mlxsw_sp_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3132  static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3133  			 const struct mlxsw_bus_info *mlxsw_bus_info,
3134  			 struct netlink_ext_ack *extack)
3135  {
3136  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3137  	int err;
3138  
3139  	mlxsw_sp->core = mlxsw_core;
3140  	mlxsw_sp->bus_info = mlxsw_bus_info;
3141  
3142  	mlxsw_sp_parsing_init(mlxsw_sp);
3143  
3144  	err = mlxsw_sp_base_mac_get(mlxsw_sp);
3145  	if (err) {
3146  		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3147  		return err;
3148  	}
3149  
3150  	err = mlxsw_sp_kvdl_init(mlxsw_sp);
3151  	if (err) {
3152  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3153  		return err;
3154  	}
3155  
3156  	err = mlxsw_sp_pgt_init(mlxsw_sp);
3157  	if (err) {
3158  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3159  		goto err_pgt_init;
3160  	}
3161  
3162  	/* Initialize before FIDs so that the LAG table is at the start of PGT
3163  	 * and 8-aligned without overallocation.
3164  	 */
3165  	err = mlxsw_sp_lag_init(mlxsw_sp);
3166  	if (err) {
3167  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3168  		goto err_lag_init;
3169  	}
3170  
3171  	err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3172  	if (err) {
3173  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3174  		goto err_fid_core_init;
3175  	}
3176  
3177  	err = mlxsw_sp_policers_init(mlxsw_sp);
3178  	if (err) {
3179  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3180  		goto err_policers_init;
3181  	}
3182  
3183  	err = mlxsw_sp_traps_init(mlxsw_sp);
3184  	if (err) {
3185  		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3186  		goto err_traps_init;
3187  	}
3188  
3189  	err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3190  	if (err) {
3191  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3192  		goto err_devlink_traps_init;
3193  	}
3194  
3195  	err = mlxsw_sp_buffers_init(mlxsw_sp);
3196  	if (err) {
3197  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3198  		goto err_buffers_init;
3199  	}
3200  
3201  	/* Initialize SPAN before router and switchdev, so that those components
3202  	 * can call mlxsw_sp_span_respin().
3203  	 */
3204  	err = mlxsw_sp_span_init(mlxsw_sp);
3205  	if (err) {
3206  		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3207  		goto err_span_init;
3208  	}
3209  
3210  	err = mlxsw_sp_switchdev_init(mlxsw_sp);
3211  	if (err) {
3212  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3213  		goto err_switchdev_init;
3214  	}
3215  
3216  	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3217  	if (err) {
3218  		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3219  		goto err_counter_pool_init;
3220  	}
3221  
3222  	err = mlxsw_sp_afa_init(mlxsw_sp);
3223  	if (err) {
3224  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3225  		goto err_afa_init;
3226  	}
3227  
3228  	err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3229  	if (err) {
3230  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3231  		goto err_ipv6_addr_ht_init;
3232  	}
3233  
3234  	err = mlxsw_sp_nve_init(mlxsw_sp);
3235  	if (err) {
3236  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3237  		goto err_nve_init;
3238  	}
3239  
3240  	err = mlxsw_sp_port_range_init(mlxsw_sp);
3241  	if (err) {
3242  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3243  		goto err_port_range_init;
3244  	}
3245  
3246  	err = mlxsw_sp_acl_init(mlxsw_sp);
3247  	if (err) {
3248  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3249  		goto err_acl_init;
3250  	}
3251  
3252  	err = mlxsw_sp_router_init(mlxsw_sp, extack);
3253  	if (err) {
3254  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3255  		goto err_router_init;
3256  	}
3257  
3258  	if (mlxsw_sp->bus_info->read_clock_capable) {
3259  		/* NULL is a valid return value from clock_init */
3260  		mlxsw_sp->clock =
3261  			mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3262  						      mlxsw_sp->bus_info->dev);
3263  		if (IS_ERR(mlxsw_sp->clock)) {
3264  			err = PTR_ERR(mlxsw_sp->clock);
3265  			dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3266  			goto err_ptp_clock_init;
3267  		}
3268  	}
3269  
3270  	if (mlxsw_sp->clock) {
3271  		/* NULL is a valid return value from ptp_ops->init */
3272  		mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3273  		if (IS_ERR(mlxsw_sp->ptp_state)) {
3274  			err = PTR_ERR(mlxsw_sp->ptp_state);
3275  			dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3276  			goto err_ptp_init;
3277  		}
3278  	}
3279  
3280  	/* Initialize netdevice notifier after SPAN is initialized, so that the
3281  	 * event handler can call SPAN respin.
3282  	 */
3283  	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3284  	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3285  					      &mlxsw_sp->netdevice_nb);
3286  	if (err) {
3287  		dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3288  		goto err_netdev_notifier;
3289  	}
3290  
3291  	err = mlxsw_sp_dpipe_init(mlxsw_sp);
3292  	if (err) {
3293  		dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3294  		goto err_dpipe_init;
3295  	}
3296  
3297  	err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3298  	if (err) {
3299  		dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3300  		goto err_port_module_info_init;
3301  	}
3302  
3303  	err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3304  			      &mlxsw_sp_sample_trigger_ht_params);
3305  	if (err) {
3306  		dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3307  		goto err_sample_trigger_init;
3308  	}
3309  
3310  	err = mlxsw_sp_ports_create(mlxsw_sp);
3311  	if (err) {
3312  		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3313  		goto err_ports_create;
3314  	}
3315  
3316  	return 0;
3317  
3318  err_ports_create:
3319  	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3320  err_sample_trigger_init:
3321  	mlxsw_sp_port_module_info_fini(mlxsw_sp);
3322  err_port_module_info_init:
3323  	mlxsw_sp_dpipe_fini(mlxsw_sp);
3324  err_dpipe_init:
3325  	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3326  					  &mlxsw_sp->netdevice_nb);
3327  err_netdev_notifier:
3328  	if (mlxsw_sp->clock)
3329  		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3330  err_ptp_init:
3331  	if (mlxsw_sp->clock)
3332  		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3333  err_ptp_clock_init:
3334  	mlxsw_sp_router_fini(mlxsw_sp);
3335  err_router_init:
3336  	mlxsw_sp_acl_fini(mlxsw_sp);
3337  err_acl_init:
3338  	mlxsw_sp_port_range_fini(mlxsw_sp);
3339  err_port_range_init:
3340  	mlxsw_sp_nve_fini(mlxsw_sp);
3341  err_nve_init:
3342  	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3343  err_ipv6_addr_ht_init:
3344  	mlxsw_sp_afa_fini(mlxsw_sp);
3345  err_afa_init:
3346  	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3347  err_counter_pool_init:
3348  	mlxsw_sp_switchdev_fini(mlxsw_sp);
3349  err_switchdev_init:
3350  	mlxsw_sp_span_fini(mlxsw_sp);
3351  err_span_init:
3352  	mlxsw_sp_buffers_fini(mlxsw_sp);
3353  err_buffers_init:
3354  	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3355  err_devlink_traps_init:
3356  	mlxsw_sp_traps_fini(mlxsw_sp);
3357  err_traps_init:
3358  	mlxsw_sp_policers_fini(mlxsw_sp);
3359  err_policers_init:
3360  	mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3361  err_fid_core_init:
3362  	mlxsw_sp_lag_fini(mlxsw_sp);
3363  err_lag_init:
3364  	mlxsw_sp_pgt_fini(mlxsw_sp);
3365  err_pgt_init:
3366  	mlxsw_sp_kvdl_fini(mlxsw_sp);
3367  	mlxsw_sp_parsing_fini(mlxsw_sp);
3368  	return err;
3369  }
3370  
mlxsw_sp1_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3371  static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3372  			  const struct mlxsw_bus_info *mlxsw_bus_info,
3373  			  struct netlink_ext_ack *extack)
3374  {
3375  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3376  
3377  	mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3378  	mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3379  	mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3380  	mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3381  	mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3382  	mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3383  	mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3384  	mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3385  	mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3386  	mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3387  	mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3388  	mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3389  	mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3390  	mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3391  	mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3392  	mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3393  	mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3394  	mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3395  	mlxsw_sp->listeners = mlxsw_sp1_listener;
3396  	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3397  	mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3398  	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3399  	mlxsw_sp->pgt_smpe_index_valid = true;
3400  
3401  	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3402  }
3403  
mlxsw_sp2_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3404  static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3405  			  const struct mlxsw_bus_info *mlxsw_bus_info,
3406  			  struct netlink_ext_ack *extack)
3407  {
3408  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3409  
3410  	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3411  	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3412  	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3413  	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3414  	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3415  	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3416  	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3417  	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3418  	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3419  	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3420  	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3421  	mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3422  	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3423  	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3424  	mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3425  	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3426  	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3427  	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3428  	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3429  	mlxsw_sp->listeners = mlxsw_sp2_listener;
3430  	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3431  	mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3432  	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3433  	mlxsw_sp->pgt_smpe_index_valid = false;
3434  
3435  	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3436  }
3437  
mlxsw_sp3_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3438  static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3439  			  const struct mlxsw_bus_info *mlxsw_bus_info,
3440  			  struct netlink_ext_ack *extack)
3441  {
3442  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3443  
3444  	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3445  	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3446  	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3447  	mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3448  	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3449  	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3450  	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3451  	mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3452  	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3453  	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3454  	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3455  	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3456  	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3457  	mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3458  	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3459  	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3460  	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3461  	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3462  	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3463  	mlxsw_sp->listeners = mlxsw_sp2_listener;
3464  	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3465  	mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3466  	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3467  	mlxsw_sp->pgt_smpe_index_valid = false;
3468  
3469  	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3470  }
3471  
mlxsw_sp4_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3472  static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3473  			  const struct mlxsw_bus_info *mlxsw_bus_info,
3474  			  struct netlink_ext_ack *extack)
3475  {
3476  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3477  
3478  	mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3479  	mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3480  	mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3481  	mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3482  	mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3483  	mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3484  	mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3485  	mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3486  	mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3487  	mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3488  	mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3489  	mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3490  	mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3491  	mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3492  	mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3493  	mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3494  	mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3495  	mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3496  	mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3497  	mlxsw_sp->listeners = mlxsw_sp2_listener;
3498  	mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3499  	mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3500  	mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3501  	mlxsw_sp->pgt_smpe_index_valid = false;
3502  
3503  	return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3504  }
3505  
mlxsw_sp_fini(struct mlxsw_core * mlxsw_core)3506  static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3507  {
3508  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3509  
3510  	mlxsw_sp_ports_remove(mlxsw_sp);
3511  	rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3512  	mlxsw_sp_port_module_info_fini(mlxsw_sp);
3513  	mlxsw_sp_dpipe_fini(mlxsw_sp);
3514  	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3515  					  &mlxsw_sp->netdevice_nb);
3516  	if (mlxsw_sp->clock) {
3517  		mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3518  		mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3519  	}
3520  	mlxsw_sp_router_fini(mlxsw_sp);
3521  	mlxsw_sp_acl_fini(mlxsw_sp);
3522  	mlxsw_sp_port_range_fini(mlxsw_sp);
3523  	mlxsw_sp_nve_fini(mlxsw_sp);
3524  	mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3525  	mlxsw_sp_afa_fini(mlxsw_sp);
3526  	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3527  	mlxsw_sp_switchdev_fini(mlxsw_sp);
3528  	mlxsw_sp_span_fini(mlxsw_sp);
3529  	mlxsw_sp_buffers_fini(mlxsw_sp);
3530  	mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3531  	mlxsw_sp_traps_fini(mlxsw_sp);
3532  	mlxsw_sp_policers_fini(mlxsw_sp);
3533  	mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3534  	mlxsw_sp_lag_fini(mlxsw_sp);
3535  	mlxsw_sp_pgt_fini(mlxsw_sp);
3536  	mlxsw_sp_kvdl_fini(mlxsw_sp);
3537  	mlxsw_sp_parsing_fini(mlxsw_sp);
3538  }
3539  
3540  static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3541  	.used_flood_mode                = 1,
3542  	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3543  	.used_max_ib_mc			= 1,
3544  	.max_ib_mc			= 0,
3545  	.used_max_pkey			= 1,
3546  	.max_pkey			= 0,
3547  	.used_ubridge			= 1,
3548  	.ubridge			= 1,
3549  	.used_kvd_sizes			= 1,
3550  	.kvd_hash_single_parts		= 59,
3551  	.kvd_hash_double_parts		= 41,
3552  	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3553  	.swid_config			= {
3554  		{
3555  			.used_type	= 1,
3556  			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3557  		}
3558  	},
3559  };
3560  
3561  static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3562  	.used_flood_mode                = 1,
3563  	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3564  	.used_max_ib_mc			= 1,
3565  	.max_ib_mc			= 0,
3566  	.used_max_pkey			= 1,
3567  	.max_pkey			= 0,
3568  	.used_ubridge			= 1,
3569  	.ubridge			= 1,
3570  	.swid_config			= {
3571  		{
3572  			.used_type	= 1,
3573  			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3574  		}
3575  	},
3576  	.used_cqe_time_stamp_type	= 1,
3577  	.cqe_time_stamp_type		= MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3578  	.lag_mode_prefer_sw		= true,
3579  	.flood_mode_prefer_cff		= true,
3580  };
3581  
3582  /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3583   * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3584   * table.
3585   */
3586  #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3587  
3588  static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3589  	.used_max_lag			= 1,
3590  	.max_lag			= MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3591  	.used_flood_mode                = 1,
3592  	.flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3593  	.used_max_ib_mc			= 1,
3594  	.max_ib_mc			= 0,
3595  	.used_max_pkey			= 1,
3596  	.max_pkey			= 0,
3597  	.used_ubridge			= 1,
3598  	.ubridge			= 1,
3599  	.swid_config			= {
3600  		{
3601  			.used_type	= 1,
3602  			.type		= MLXSW_PORT_SWID_TYPE_ETH,
3603  		}
3604  	},
3605  	.used_cqe_time_stamp_type	= 1,
3606  	.cqe_time_stamp_type		= MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3607  	.lag_mode_prefer_sw		= true,
3608  	.flood_mode_prefer_cff		= true,
3609  };
3610  
3611  static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core * mlxsw_core,struct devlink_resource_size_params * kvd_size_params,struct devlink_resource_size_params * linear_size_params,struct devlink_resource_size_params * hash_double_size_params,struct devlink_resource_size_params * hash_single_size_params)3612  mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3613  				      struct devlink_resource_size_params *kvd_size_params,
3614  				      struct devlink_resource_size_params *linear_size_params,
3615  				      struct devlink_resource_size_params *hash_double_size_params,
3616  				      struct devlink_resource_size_params *hash_single_size_params)
3617  {
3618  	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3619  						 KVD_SINGLE_MIN_SIZE);
3620  	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3621  						 KVD_DOUBLE_MIN_SIZE);
3622  	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3623  	u32 linear_size_min = 0;
3624  
3625  	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3626  					  MLXSW_SP_KVD_GRANULARITY,
3627  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3628  	devlink_resource_size_params_init(linear_size_params, linear_size_min,
3629  					  kvd_size - single_size_min -
3630  					  double_size_min,
3631  					  MLXSW_SP_KVD_GRANULARITY,
3632  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3633  	devlink_resource_size_params_init(hash_double_size_params,
3634  					  double_size_min,
3635  					  kvd_size - single_size_min -
3636  					  linear_size_min,
3637  					  MLXSW_SP_KVD_GRANULARITY,
3638  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3639  	devlink_resource_size_params_init(hash_single_size_params,
3640  					  single_size_min,
3641  					  kvd_size - double_size_min -
3642  					  linear_size_min,
3643  					  MLXSW_SP_KVD_GRANULARITY,
3644  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3645  }
3646  
mlxsw_sp1_resources_kvd_register(struct mlxsw_core * mlxsw_core)3647  static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3648  {
3649  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3650  	struct devlink_resource_size_params hash_single_size_params;
3651  	struct devlink_resource_size_params hash_double_size_params;
3652  	struct devlink_resource_size_params linear_size_params;
3653  	struct devlink_resource_size_params kvd_size_params;
3654  	u32 kvd_size, single_size, double_size, linear_size;
3655  	const struct mlxsw_config_profile *profile;
3656  	int err;
3657  
3658  	profile = &mlxsw_sp1_config_profile;
3659  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3660  		return -EIO;
3661  
3662  	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3663  					      &linear_size_params,
3664  					      &hash_double_size_params,
3665  					      &hash_single_size_params);
3666  
3667  	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3668  	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3669  				     kvd_size, MLXSW_SP_RESOURCE_KVD,
3670  				     DEVLINK_RESOURCE_ID_PARENT_TOP,
3671  				     &kvd_size_params);
3672  	if (err)
3673  		return err;
3674  
3675  	linear_size = profile->kvd_linear_size;
3676  	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3677  				     linear_size,
3678  				     MLXSW_SP_RESOURCE_KVD_LINEAR,
3679  				     MLXSW_SP_RESOURCE_KVD,
3680  				     &linear_size_params);
3681  	if (err)
3682  		return err;
3683  
3684  	err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3685  	if  (err)
3686  		return err;
3687  
3688  	double_size = kvd_size - linear_size;
3689  	double_size *= profile->kvd_hash_double_parts;
3690  	double_size /= profile->kvd_hash_double_parts +
3691  		       profile->kvd_hash_single_parts;
3692  	double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3693  	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3694  				     double_size,
3695  				     MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3696  				     MLXSW_SP_RESOURCE_KVD,
3697  				     &hash_double_size_params);
3698  	if (err)
3699  		return err;
3700  
3701  	single_size = kvd_size - double_size - linear_size;
3702  	err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3703  				     single_size,
3704  				     MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3705  				     MLXSW_SP_RESOURCE_KVD,
3706  				     &hash_single_size_params);
3707  	if (err)
3708  		return err;
3709  
3710  	return 0;
3711  }
3712  
mlxsw_sp2_resources_kvd_register(struct mlxsw_core * mlxsw_core)3713  static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3714  {
3715  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3716  	struct devlink_resource_size_params kvd_size_params;
3717  	u32 kvd_size;
3718  
3719  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3720  		return -EIO;
3721  
3722  	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3723  	devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3724  					  MLXSW_SP_KVD_GRANULARITY,
3725  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3726  
3727  	return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3728  				      kvd_size, MLXSW_SP_RESOURCE_KVD,
3729  				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3730  				      &kvd_size_params);
3731  }
3732  
mlxsw_sp_resources_span_register(struct mlxsw_core * mlxsw_core)3733  static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3734  {
3735  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3736  	struct devlink_resource_size_params span_size_params;
3737  	u32 max_span;
3738  
3739  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3740  		return -EIO;
3741  
3742  	max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3743  	devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3744  					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3745  
3746  	return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3747  				      max_span, MLXSW_SP_RESOURCE_SPAN,
3748  				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3749  				      &span_size_params);
3750  }
3751  
3752  static int
mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core * mlxsw_core)3753  mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3754  {
3755  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3756  	struct devlink_resource_size_params size_params;
3757  	u8 max_rif_mac_profiles;
3758  
3759  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3760  		max_rif_mac_profiles = 1;
3761  	else
3762  		max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3763  							  MAX_RIF_MAC_PROFILES);
3764  	devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3765  					  max_rif_mac_profiles, 1,
3766  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3767  
3768  	return devl_resource_register(devlink,
3769  				      "rif_mac_profiles",
3770  				      max_rif_mac_profiles,
3771  				      MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3772  				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3773  				      &size_params);
3774  }
3775  
mlxsw_sp_resources_rifs_register(struct mlxsw_core * mlxsw_core)3776  static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3777  {
3778  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3779  	struct devlink_resource_size_params size_params;
3780  	u64 max_rifs;
3781  
3782  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3783  		return -EIO;
3784  
3785  	max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3786  	devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3787  					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
3788  
3789  	return devl_resource_register(devlink, "rifs", max_rifs,
3790  				      MLXSW_SP_RESOURCE_RIFS,
3791  				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3792  				      &size_params);
3793  }
3794  
3795  static int
mlxsw_sp_resources_port_range_register(struct mlxsw_core * mlxsw_core)3796  mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3797  {
3798  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3799  	struct devlink_resource_size_params size_params;
3800  	u64 max;
3801  
3802  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3803  		return -EIO;
3804  
3805  	max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3806  	devlink_resource_size_params_init(&size_params, max, max, 1,
3807  					  DEVLINK_RESOURCE_UNIT_ENTRY);
3808  
3809  	return devl_resource_register(devlink, "port_range_registers", max,
3810  				      MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3811  				      DEVLINK_RESOURCE_ID_PARENT_TOP,
3812  				      &size_params);
3813  }
3814  
mlxsw_sp1_resources_register(struct mlxsw_core * mlxsw_core)3815  static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3816  {
3817  	int err;
3818  
3819  	err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3820  	if (err)
3821  		return err;
3822  
3823  	err = mlxsw_sp_resources_span_register(mlxsw_core);
3824  	if (err)
3825  		goto err_resources_span_register;
3826  
3827  	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3828  	if (err)
3829  		goto err_resources_counter_register;
3830  
3831  	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3832  	if (err)
3833  		goto err_policer_resources_register;
3834  
3835  	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3836  	if (err)
3837  		goto err_resources_rif_mac_profile_register;
3838  
3839  	err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3840  	if (err)
3841  		goto err_resources_rifs_register;
3842  
3843  	err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3844  	if (err)
3845  		goto err_resources_port_range_register;
3846  
3847  	return 0;
3848  
3849  err_resources_port_range_register:
3850  err_resources_rifs_register:
3851  err_resources_rif_mac_profile_register:
3852  err_policer_resources_register:
3853  err_resources_counter_register:
3854  err_resources_span_register:
3855  	devl_resources_unregister(priv_to_devlink(mlxsw_core));
3856  	return err;
3857  }
3858  
mlxsw_sp2_resources_register(struct mlxsw_core * mlxsw_core)3859  static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3860  {
3861  	int err;
3862  
3863  	err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3864  	if (err)
3865  		return err;
3866  
3867  	err = mlxsw_sp_resources_span_register(mlxsw_core);
3868  	if (err)
3869  		goto err_resources_span_register;
3870  
3871  	err = mlxsw_sp_counter_resources_register(mlxsw_core);
3872  	if (err)
3873  		goto err_resources_counter_register;
3874  
3875  	err = mlxsw_sp_policer_resources_register(mlxsw_core);
3876  	if (err)
3877  		goto err_policer_resources_register;
3878  
3879  	err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3880  	if (err)
3881  		goto err_resources_rif_mac_profile_register;
3882  
3883  	err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3884  	if (err)
3885  		goto err_resources_rifs_register;
3886  
3887  	err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3888  	if (err)
3889  		goto err_resources_port_range_register;
3890  
3891  	return 0;
3892  
3893  err_resources_port_range_register:
3894  err_resources_rifs_register:
3895  err_resources_rif_mac_profile_register:
3896  err_policer_resources_register:
3897  err_resources_counter_register:
3898  err_resources_span_register:
3899  	devl_resources_unregister(priv_to_devlink(mlxsw_core));
3900  	return err;
3901  }
3902  
mlxsw_sp_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)3903  static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3904  				  const struct mlxsw_config_profile *profile,
3905  				  u64 *p_single_size, u64 *p_double_size,
3906  				  u64 *p_linear_size)
3907  {
3908  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
3909  	u32 double_size;
3910  	int err;
3911  
3912  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3913  	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3914  		return -EIO;
3915  
3916  	/* The hash part is what left of the kvd without the
3917  	 * linear part. It is split to the single size and
3918  	 * double size by the parts ratio from the profile.
3919  	 * Both sizes must be a multiplications of the
3920  	 * granularity from the profile. In case the user
3921  	 * provided the sizes they are obtained via devlink.
3922  	 */
3923  	err = devl_resource_size_get(devlink,
3924  				     MLXSW_SP_RESOURCE_KVD_LINEAR,
3925  				     p_linear_size);
3926  	if (err)
3927  		*p_linear_size = profile->kvd_linear_size;
3928  
3929  	err = devl_resource_size_get(devlink,
3930  				     MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3931  				     p_double_size);
3932  	if (err) {
3933  		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3934  			      *p_linear_size;
3935  		double_size *= profile->kvd_hash_double_parts;
3936  		double_size /= profile->kvd_hash_double_parts +
3937  			       profile->kvd_hash_single_parts;
3938  		*p_double_size = rounddown(double_size,
3939  					   MLXSW_SP_KVD_GRANULARITY);
3940  	}
3941  
3942  	err = devl_resource_size_get(devlink,
3943  				     MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3944  				     p_single_size);
3945  	if (err)
3946  		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3947  				 *p_double_size - *p_linear_size;
3948  
3949  	/* Check results are legal. */
3950  	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3951  	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3952  	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3953  		return -EIO;
3954  
3955  	return 0;
3956  }
3957  
mlxsw_sp_ptp_transmitted(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,u16 local_port)3958  static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3959  				     struct sk_buff *skb, u16 local_port)
3960  {
3961  	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3962  
3963  	skb_pull(skb, MLXSW_TXHDR_LEN);
3964  	mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3965  }
3966  
3967  static struct mlxsw_driver mlxsw_sp1_driver = {
3968  	.kind				= mlxsw_sp1_driver_name,
3969  	.priv_size			= sizeof(struct mlxsw_sp),
3970  	.fw_req_rev			= &mlxsw_sp1_fw_rev,
3971  	.fw_filename			= MLXSW_SP1_FW_FILENAME,
3972  	.init				= mlxsw_sp1_init,
3973  	.fini				= mlxsw_sp_fini,
3974  	.port_split			= mlxsw_sp_port_split,
3975  	.port_unsplit			= mlxsw_sp_port_unsplit,
3976  	.sb_pool_get			= mlxsw_sp_sb_pool_get,
3977  	.sb_pool_set			= mlxsw_sp_sb_pool_set,
3978  	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
3979  	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
3980  	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
3981  	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
3982  	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
3983  	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
3984  	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
3985  	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
3986  	.trap_init			= mlxsw_sp_trap_init,
3987  	.trap_fini			= mlxsw_sp_trap_fini,
3988  	.trap_action_set		= mlxsw_sp_trap_action_set,
3989  	.trap_group_init		= mlxsw_sp_trap_group_init,
3990  	.trap_group_set			= mlxsw_sp_trap_group_set,
3991  	.trap_policer_init		= mlxsw_sp_trap_policer_init,
3992  	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
3993  	.trap_policer_set		= mlxsw_sp_trap_policer_set,
3994  	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
3995  	.txhdr_construct		= mlxsw_sp_txhdr_construct,
3996  	.resources_register		= mlxsw_sp1_resources_register,
3997  	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
3998  	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
3999  	.txhdr_len			= MLXSW_TXHDR_LEN,
4000  	.profile			= &mlxsw_sp1_config_profile,
4001  	.sdq_supports_cqe_v2		= false,
4002  };
4003  
4004  static struct mlxsw_driver mlxsw_sp2_driver = {
4005  	.kind				= mlxsw_sp2_driver_name,
4006  	.priv_size			= sizeof(struct mlxsw_sp),
4007  	.fw_req_rev			= &mlxsw_sp2_fw_rev,
4008  	.fw_filename			= MLXSW_SP2_FW_FILENAME,
4009  	.init				= mlxsw_sp2_init,
4010  	.fini				= mlxsw_sp_fini,
4011  	.port_split			= mlxsw_sp_port_split,
4012  	.port_unsplit			= mlxsw_sp_port_unsplit,
4013  	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
4014  	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4015  	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4016  	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4017  	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4018  	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4019  	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4020  	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4021  	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4022  	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4023  	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4024  	.trap_init			= mlxsw_sp_trap_init,
4025  	.trap_fini			= mlxsw_sp_trap_fini,
4026  	.trap_action_set		= mlxsw_sp_trap_action_set,
4027  	.trap_group_init		= mlxsw_sp_trap_group_init,
4028  	.trap_group_set			= mlxsw_sp_trap_group_set,
4029  	.trap_policer_init		= mlxsw_sp_trap_policer_init,
4030  	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
4031  	.trap_policer_set		= mlxsw_sp_trap_policer_set,
4032  	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
4033  	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4034  	.resources_register		= mlxsw_sp2_resources_register,
4035  	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
4036  	.txhdr_len			= MLXSW_TXHDR_LEN,
4037  	.profile			= &mlxsw_sp2_config_profile,
4038  	.sdq_supports_cqe_v2		= true,
4039  };
4040  
4041  static struct mlxsw_driver mlxsw_sp3_driver = {
4042  	.kind				= mlxsw_sp3_driver_name,
4043  	.priv_size			= sizeof(struct mlxsw_sp),
4044  	.fw_req_rev			= &mlxsw_sp3_fw_rev,
4045  	.fw_filename			= MLXSW_SP3_FW_FILENAME,
4046  	.init				= mlxsw_sp3_init,
4047  	.fini				= mlxsw_sp_fini,
4048  	.port_split			= mlxsw_sp_port_split,
4049  	.port_unsplit			= mlxsw_sp_port_unsplit,
4050  	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
4051  	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4052  	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4053  	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4054  	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4055  	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4056  	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4057  	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4058  	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4059  	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4060  	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4061  	.trap_init			= mlxsw_sp_trap_init,
4062  	.trap_fini			= mlxsw_sp_trap_fini,
4063  	.trap_action_set		= mlxsw_sp_trap_action_set,
4064  	.trap_group_init		= mlxsw_sp_trap_group_init,
4065  	.trap_group_set			= mlxsw_sp_trap_group_set,
4066  	.trap_policer_init		= mlxsw_sp_trap_policer_init,
4067  	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
4068  	.trap_policer_set		= mlxsw_sp_trap_policer_set,
4069  	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
4070  	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4071  	.resources_register		= mlxsw_sp2_resources_register,
4072  	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
4073  	.txhdr_len			= MLXSW_TXHDR_LEN,
4074  	.profile			= &mlxsw_sp2_config_profile,
4075  	.sdq_supports_cqe_v2		= true,
4076  };
4077  
4078  static struct mlxsw_driver mlxsw_sp4_driver = {
4079  	.kind				= mlxsw_sp4_driver_name,
4080  	.priv_size			= sizeof(struct mlxsw_sp),
4081  	.init				= mlxsw_sp4_init,
4082  	.fini				= mlxsw_sp_fini,
4083  	.port_split			= mlxsw_sp_port_split,
4084  	.port_unsplit			= mlxsw_sp_port_unsplit,
4085  	.ports_remove_selected		= mlxsw_sp_ports_remove_selected,
4086  	.sb_pool_get			= mlxsw_sp_sb_pool_get,
4087  	.sb_pool_set			= mlxsw_sp_sb_pool_set,
4088  	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
4089  	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
4090  	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
4091  	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
4092  	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
4093  	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
4094  	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
4095  	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
4096  	.trap_init			= mlxsw_sp_trap_init,
4097  	.trap_fini			= mlxsw_sp_trap_fini,
4098  	.trap_action_set		= mlxsw_sp_trap_action_set,
4099  	.trap_group_init		= mlxsw_sp_trap_group_init,
4100  	.trap_group_set			= mlxsw_sp_trap_group_set,
4101  	.trap_policer_init		= mlxsw_sp_trap_policer_init,
4102  	.trap_policer_fini		= mlxsw_sp_trap_policer_fini,
4103  	.trap_policer_set		= mlxsw_sp_trap_policer_set,
4104  	.trap_policer_counter_get	= mlxsw_sp_trap_policer_counter_get,
4105  	.txhdr_construct		= mlxsw_sp_txhdr_construct,
4106  	.resources_register		= mlxsw_sp2_resources_register,
4107  	.ptp_transmitted		= mlxsw_sp_ptp_transmitted,
4108  	.txhdr_len			= MLXSW_TXHDR_LEN,
4109  	.profile			= &mlxsw_sp4_config_profile,
4110  	.sdq_supports_cqe_v2		= true,
4111  };
4112  
mlxsw_sp_port_dev_check(const struct net_device * dev)4113  bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4114  {
4115  	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4116  }
4117  
mlxsw_sp_lower_dev_walk(struct net_device * lower_dev,struct netdev_nested_priv * priv)4118  static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4119  				   struct netdev_nested_priv *priv)
4120  {
4121  	int ret = 0;
4122  
4123  	if (mlxsw_sp_port_dev_check(lower_dev)) {
4124  		priv->data = (void *)netdev_priv(lower_dev);
4125  		ret = 1;
4126  	}
4127  
4128  	return ret;
4129  }
4130  
mlxsw_sp_port_dev_lower_find(struct net_device * dev)4131  struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4132  {
4133  	struct netdev_nested_priv priv = {
4134  		.data = NULL,
4135  	};
4136  
4137  	if (mlxsw_sp_port_dev_check(dev))
4138  		return netdev_priv(dev);
4139  
4140  	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4141  
4142  	return (struct mlxsw_sp_port *)priv.data;
4143  }
4144  
mlxsw_sp_lower_get(struct net_device * dev)4145  struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4146  {
4147  	struct mlxsw_sp_port *mlxsw_sp_port;
4148  
4149  	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4150  	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4151  }
4152  
mlxsw_sp_port_dev_lower_find_rcu(struct net_device * dev)4153  struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4154  {
4155  	struct netdev_nested_priv priv = {
4156  		.data = NULL,
4157  	};
4158  
4159  	if (mlxsw_sp_port_dev_check(dev))
4160  		return netdev_priv(dev);
4161  
4162  	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4163  				      &priv);
4164  
4165  	return (struct mlxsw_sp_port *)priv.data;
4166  }
4167  
mlxsw_sp_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp)4168  int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4169  {
4170  	char mprs_pl[MLXSW_REG_MPRS_LEN];
4171  	int err = 0;
4172  
4173  	mutex_lock(&mlxsw_sp->parsing.lock);
4174  
4175  	if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4176  		goto out_unlock;
4177  
4178  	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4179  			    mlxsw_sp->parsing.vxlan_udp_dport);
4180  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4181  	if (err)
4182  		goto out_unlock;
4183  
4184  	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4185  	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4186  
4187  out_unlock:
4188  	mutex_unlock(&mlxsw_sp->parsing.lock);
4189  	return err;
4190  }
4191  
mlxsw_sp_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp)4192  void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4193  {
4194  	char mprs_pl[MLXSW_REG_MPRS_LEN];
4195  
4196  	mutex_lock(&mlxsw_sp->parsing.lock);
4197  
4198  	if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4199  		goto out_unlock;
4200  
4201  	mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4202  			    mlxsw_sp->parsing.vxlan_udp_dport);
4203  	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4204  	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4205  
4206  out_unlock:
4207  	mutex_unlock(&mlxsw_sp->parsing.lock);
4208  }
4209  
mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp * mlxsw_sp,__be16 udp_dport)4210  int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4211  					 __be16 udp_dport)
4212  {
4213  	char mprs_pl[MLXSW_REG_MPRS_LEN];
4214  	int err;
4215  
4216  	mutex_lock(&mlxsw_sp->parsing.lock);
4217  
4218  	mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4219  			    be16_to_cpu(udp_dport));
4220  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4221  	if (err)
4222  		goto out_unlock;
4223  
4224  	mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4225  
4226  out_unlock:
4227  	mutex_unlock(&mlxsw_sp->parsing.lock);
4228  	return err;
4229  }
4230  
4231  static void
mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4232  mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4233  				 struct net_device *lag_dev)
4234  {
4235  	struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4236  	struct net_device *upper_dev;
4237  	struct list_head *iter;
4238  
4239  	if (netif_is_bridge_port(lag_dev))
4240  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4241  
4242  	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4243  		if (!netif_is_bridge_port(upper_dev))
4244  			continue;
4245  		br_dev = netdev_master_upper_dev_get(upper_dev);
4246  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4247  	}
4248  }
4249  
4250  static struct mlxsw_sp_lag *
mlxsw_sp_lag_create(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netlink_ext_ack * extack)4251  mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4252  		    struct netlink_ext_ack *extack)
4253  {
4254  	char sldr_pl[MLXSW_REG_SLDR_LEN];
4255  	struct mlxsw_sp_lag *lag;
4256  	u16 lag_id;
4257  	int i, err;
4258  
4259  	for (i = 0; i < mlxsw_sp->max_lag; i++) {
4260  		if (!mlxsw_sp->lags[i].dev)
4261  			break;
4262  	}
4263  
4264  	if (i == mlxsw_sp->max_lag) {
4265  		NL_SET_ERR_MSG_MOD(extack,
4266  				   "Exceeded number of supported LAG devices");
4267  		return ERR_PTR(-EBUSY);
4268  	}
4269  
4270  	lag_id = i;
4271  	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4272  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4273  	if (err)
4274  		return ERR_PTR(err);
4275  
4276  	lag = &mlxsw_sp->lags[lag_id];
4277  	lag->lag_id = lag_id;
4278  	lag->dev = lag_dev;
4279  	refcount_set(&lag->ref_count, 1);
4280  
4281  	return lag;
4282  }
4283  
4284  static int
mlxsw_sp_lag_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lag * lag)4285  mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4286  {
4287  	char sldr_pl[MLXSW_REG_SLDR_LEN];
4288  
4289  	lag->dev = NULL;
4290  
4291  	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
4292  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4293  }
4294  
mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id,u8 port_index)4295  static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4296  				     u16 lag_id, u8 port_index)
4297  {
4298  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4299  	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4300  
4301  	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4302  				      lag_id, port_index);
4303  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4304  }
4305  
mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4306  static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4307  					u16 lag_id)
4308  {
4309  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4310  	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4311  
4312  	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4313  					 lag_id);
4314  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4315  }
4316  
mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4317  static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4318  					u16 lag_id)
4319  {
4320  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4321  	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4322  
4323  	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4324  					lag_id);
4325  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4326  }
4327  
mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4328  static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4329  					 u16 lag_id)
4330  {
4331  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4332  	char slcor_pl[MLXSW_REG_SLCOR_LEN];
4333  
4334  	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4335  					 lag_id);
4336  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4337  }
4338  
4339  static struct mlxsw_sp_lag *
mlxsw_sp_lag_find(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev)4340  mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
4341  {
4342  	int i;
4343  
4344  	for (i = 0; i < mlxsw_sp->max_lag; i++) {
4345  		if (!mlxsw_sp->lags[i].dev)
4346  			continue;
4347  
4348  		if (mlxsw_sp->lags[i].dev == lag_dev)
4349  			return &mlxsw_sp->lags[i];
4350  	}
4351  
4352  	return NULL;
4353  }
4354  
4355  static struct mlxsw_sp_lag *
mlxsw_sp_lag_get(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netlink_ext_ack * extack)4356  mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4357  		 struct netlink_ext_ack *extack)
4358  {
4359  	struct mlxsw_sp_lag *lag;
4360  
4361  	lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
4362  	if (lag) {
4363  		refcount_inc(&lag->ref_count);
4364  		return lag;
4365  	}
4366  
4367  	return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
4368  }
4369  
4370  static void
mlxsw_sp_lag_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lag * lag)4371  mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4372  {
4373  	if (!refcount_dec_and_test(&lag->ref_count))
4374  		return;
4375  
4376  	mlxsw_sp_lag_destroy(mlxsw_sp, lag);
4377  }
4378  
4379  static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netdev_lag_upper_info * lag_upper_info,struct netlink_ext_ack * extack)4380  mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4381  			  struct net_device *lag_dev,
4382  			  struct netdev_lag_upper_info *lag_upper_info,
4383  			  struct netlink_ext_ack *extack)
4384  {
4385  	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4386  		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4387  		return false;
4388  	}
4389  	return true;
4390  }
4391  
mlxsw_sp_port_lag_index_get(struct mlxsw_sp * mlxsw_sp,u16 lag_id,u8 * p_port_index)4392  static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4393  				       u16 lag_id, u8 *p_port_index)
4394  {
4395  	u64 max_lag_members;
4396  	int i;
4397  
4398  	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4399  					     MAX_LAG_MEMBERS);
4400  	for (i = 0; i < max_lag_members; i++) {
4401  		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4402  			*p_port_index = i;
4403  			return 0;
4404  		}
4405  	}
4406  	return -EBUSY;
4407  }
4408  
mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)4409  static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4410  					   struct net_device *lag_dev,
4411  					   struct netlink_ext_ack *extack)
4412  {
4413  	struct net_device *upper_dev;
4414  	struct net_device *master;
4415  	struct list_head *iter;
4416  	int done = 0;
4417  	int err;
4418  
4419  	master = netdev_master_upper_dev_get(lag_dev);
4420  	if (master && netif_is_bridge_master(master)) {
4421  		err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4422  						extack);
4423  		if (err)
4424  			return err;
4425  	}
4426  
4427  	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4428  		if (!is_vlan_dev(upper_dev))
4429  			continue;
4430  
4431  		master = netdev_master_upper_dev_get(upper_dev);
4432  		if (master && netif_is_bridge_master(master)) {
4433  			err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4434  							upper_dev, master,
4435  							extack);
4436  			if (err)
4437  				goto err_port_bridge_join;
4438  		}
4439  
4440  		++done;
4441  	}
4442  
4443  	return 0;
4444  
4445  err_port_bridge_join:
4446  	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4447  		if (!is_vlan_dev(upper_dev))
4448  			continue;
4449  
4450  		master = netdev_master_upper_dev_get(upper_dev);
4451  		if (!master || !netif_is_bridge_master(master))
4452  			continue;
4453  
4454  		if (!done--)
4455  			break;
4456  
4457  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4458  	}
4459  
4460  	master = netdev_master_upper_dev_get(lag_dev);
4461  	if (master && netif_is_bridge_master(master))
4462  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4463  
4464  	return err;
4465  }
4466  
4467  static void
mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4468  mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4469  				 struct net_device *lag_dev)
4470  {
4471  	struct net_device *upper_dev;
4472  	struct net_device *master;
4473  	struct list_head *iter;
4474  
4475  	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4476  		if (!is_vlan_dev(upper_dev))
4477  			continue;
4478  
4479  		master = netdev_master_upper_dev_get(upper_dev);
4480  		if (!master)
4481  			continue;
4482  
4483  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4484  	}
4485  
4486  	master = netdev_master_upper_dev_get(lag_dev);
4487  	if (master)
4488  		mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4489  }
4490  
mlxsw_sp_port_lag_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)4491  static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4492  				  struct net_device *lag_dev,
4493  				  struct netlink_ext_ack *extack)
4494  {
4495  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4496  	struct mlxsw_sp_lag *lag;
4497  	u16 lag_id;
4498  	u8 port_index;
4499  	int err;
4500  
4501  	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
4502  	if (IS_ERR(lag))
4503  		return PTR_ERR(lag);
4504  
4505  	lag_id = lag->lag_id;
4506  	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4507  	if (err)
4508  		return err;
4509  
4510  	err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4511  					      extack);
4512  	if (err)
4513  		goto err_lag_uppers_bridge_join;
4514  
4515  	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4516  	if (err)
4517  		goto err_col_port_add;
4518  
4519  	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4520  				   mlxsw_sp_port->local_port);
4521  	mlxsw_sp_port->lag_id = lag_id;
4522  	mlxsw_sp_port->lagged = 1;
4523  
4524  	err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4525  	if (err)
4526  		goto err_fid_port_join_lag;
4527  
4528  	/* Port is no longer usable as a router interface */
4529  	if (mlxsw_sp_port->default_vlan->fid)
4530  		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4531  
4532  	/* Join a router interface configured on the LAG, if exists */
4533  	err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4534  					    extack);
4535  	if (err)
4536  		goto err_router_join;
4537  
4538  	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4539  	if (err)
4540  		goto err_replay;
4541  
4542  	return 0;
4543  
4544  err_replay:
4545  	mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4546  err_router_join:
4547  	mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4548  err_fid_port_join_lag:
4549  	mlxsw_sp_port->lagged = 0;
4550  	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4551  				     mlxsw_sp_port->local_port);
4552  	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4553  err_col_port_add:
4554  	mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4555  err_lag_uppers_bridge_join:
4556  	mlxsw_sp_lag_put(mlxsw_sp, lag);
4557  	return err;
4558  }
4559  
mlxsw_sp_port_lag_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4560  static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4561  				    struct net_device *lag_dev)
4562  {
4563  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4564  	u16 lag_id = mlxsw_sp_port->lag_id;
4565  	struct mlxsw_sp_lag *lag;
4566  
4567  	if (!mlxsw_sp_port->lagged)
4568  		return;
4569  	lag = &mlxsw_sp->lags[lag_id];
4570  
4571  	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4572  
4573  	/* Any VLANs configured on the port are no longer valid */
4574  	mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4575  	mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4576  	/* Make the LAG and its directly linked uppers leave bridges they
4577  	 * are memeber in
4578  	 */
4579  	mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4580  
4581  	mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4582  
4583  	mlxsw_sp_lag_put(mlxsw_sp, lag);
4584  
4585  	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4586  				     mlxsw_sp_port->local_port);
4587  	mlxsw_sp_port->lagged = 0;
4588  
4589  	/* Make sure untagged frames are allowed to ingress */
4590  	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4591  			       ETH_P_8021Q);
4592  }
4593  
mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4594  static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4595  				      u16 lag_id)
4596  {
4597  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4598  	char sldr_pl[MLXSW_REG_SLDR_LEN];
4599  
4600  	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4601  					 mlxsw_sp_port->local_port);
4602  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4603  }
4604  
mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4605  static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4606  					 u16 lag_id)
4607  {
4608  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4609  	char sldr_pl[MLXSW_REG_SLDR_LEN];
4610  
4611  	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4612  					    mlxsw_sp_port->local_port);
4613  	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4614  }
4615  
4616  static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port * mlxsw_sp_port)4617  mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4618  {
4619  	int err;
4620  
4621  	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4622  					   mlxsw_sp_port->lag_id);
4623  	if (err)
4624  		return err;
4625  
4626  	err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4627  	if (err)
4628  		goto err_dist_port_add;
4629  
4630  	return 0;
4631  
4632  err_dist_port_add:
4633  	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4634  	return err;
4635  }
4636  
4637  static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port * mlxsw_sp_port)4638  mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4639  {
4640  	int err;
4641  
4642  	err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4643  					    mlxsw_sp_port->lag_id);
4644  	if (err)
4645  		return err;
4646  
4647  	err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4648  					    mlxsw_sp_port->lag_id);
4649  	if (err)
4650  		goto err_col_port_disable;
4651  
4652  	return 0;
4653  
4654  err_col_port_disable:
4655  	mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4656  	return err;
4657  }
4658  
mlxsw_sp_port_lag_changed(struct mlxsw_sp_port * mlxsw_sp_port,struct netdev_lag_lower_state_info * info)4659  static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4660  				     struct netdev_lag_lower_state_info *info)
4661  {
4662  	if (info->tx_enabled)
4663  		return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4664  	else
4665  		return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4666  }
4667  
mlxsw_sp_port_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)4668  static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4669  				 bool enable)
4670  {
4671  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4672  	enum mlxsw_reg_spms_state spms_state;
4673  	char *spms_pl;
4674  	u16 vid;
4675  	int err;
4676  
4677  	spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4678  			      MLXSW_REG_SPMS_STATE_DISCARDING;
4679  
4680  	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4681  	if (!spms_pl)
4682  		return -ENOMEM;
4683  	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4684  
4685  	for (vid = 0; vid < VLAN_N_VID; vid++)
4686  		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4687  
4688  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4689  	kfree(spms_pl);
4690  	return err;
4691  }
4692  
mlxsw_sp_port_ovs_join(struct mlxsw_sp_port * mlxsw_sp_port)4693  static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4694  {
4695  	u16 vid = 1;
4696  	int err;
4697  
4698  	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4699  	if (err)
4700  		return err;
4701  	err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4702  	if (err)
4703  		goto err_port_stp_set;
4704  	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4705  				     true, false);
4706  	if (err)
4707  		goto err_port_vlan_set;
4708  
4709  	for (; vid <= VLAN_N_VID - 1; vid++) {
4710  		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4711  						     vid, false);
4712  		if (err)
4713  			goto err_vid_learning_set;
4714  	}
4715  
4716  	return 0;
4717  
4718  err_vid_learning_set:
4719  	for (vid--; vid >= 1; vid--)
4720  		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4721  err_port_vlan_set:
4722  	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4723  err_port_stp_set:
4724  	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4725  	return err;
4726  }
4727  
mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port * mlxsw_sp_port)4728  static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4729  {
4730  	u16 vid;
4731  
4732  	for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4733  		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4734  					       vid, true);
4735  
4736  	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4737  			       false, false);
4738  	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4739  	mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4740  }
4741  
mlxsw_sp_bridge_has_multiple_vxlans(struct net_device * br_dev)4742  static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4743  {
4744  	unsigned int num_vxlans = 0;
4745  	struct net_device *dev;
4746  	struct list_head *iter;
4747  
4748  	netdev_for_each_lower_dev(br_dev, dev, iter) {
4749  		if (netif_is_vxlan(dev))
4750  			num_vxlans++;
4751  	}
4752  
4753  	return num_vxlans > 1;
4754  }
4755  
mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device * br_dev)4756  static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4757  {
4758  	DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4759  	struct net_device *dev;
4760  	struct list_head *iter;
4761  
4762  	netdev_for_each_lower_dev(br_dev, dev, iter) {
4763  		u16 pvid;
4764  		int err;
4765  
4766  		if (!netif_is_vxlan(dev))
4767  			continue;
4768  
4769  		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4770  		if (err || !pvid)
4771  			continue;
4772  
4773  		if (test_and_set_bit(pvid, vlans))
4774  			return false;
4775  	}
4776  
4777  	return true;
4778  }
4779  
mlxsw_sp_bridge_vxlan_is_valid(struct net_device * br_dev,struct netlink_ext_ack * extack)4780  static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4781  					   struct netlink_ext_ack *extack)
4782  {
4783  	if (br_multicast_enabled(br_dev)) {
4784  		NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4785  		return false;
4786  	}
4787  
4788  	if (!br_vlan_enabled(br_dev) &&
4789  	    mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4790  		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4791  		return false;
4792  	}
4793  
4794  	if (br_vlan_enabled(br_dev) &&
4795  	    !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4796  		NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4797  		return false;
4798  	}
4799  
4800  	return true;
4801  }
4802  
mlxsw_sp_netdev_is_master(struct net_device * upper_dev,struct net_device * dev)4803  static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4804  				      struct net_device *dev)
4805  {
4806  	return upper_dev == netdev_master_upper_dev_get(dev);
4807  }
4808  
4809  static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4810  				      unsigned long event, void *ptr,
4811  				      bool process_foreign);
4812  
mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,struct netlink_ext_ack * extack)4813  static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4814  					      struct net_device *dev,
4815  					      struct netlink_ext_ack *extack)
4816  {
4817  	struct net_device *upper_dev;
4818  	struct list_head *iter;
4819  	int err;
4820  
4821  	netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4822  		struct netdev_notifier_changeupper_info info = {
4823  			.info = {
4824  				.dev = dev,
4825  				.extack = extack,
4826  			},
4827  			.master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4828  			.upper_dev = upper_dev,
4829  			.linking = true,
4830  
4831  			/* upper_info is relevant for LAG devices. But we would
4832  			 * only need this if LAG were a valid upper above
4833  			 * another upper (e.g. a bridge that is a member of a
4834  			 * LAG), and that is never a valid configuration. So we
4835  			 * can keep this as NULL.
4836  			 */
4837  			.upper_info = NULL,
4838  		};
4839  
4840  		err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4841  						 NETDEV_PRECHANGEUPPER,
4842  						 &info, true);
4843  		if (err)
4844  			return err;
4845  
4846  		err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4847  							 extack);
4848  		if (err)
4849  			return err;
4850  	}
4851  
4852  	return 0;
4853  }
4854  
mlxsw_sp_netdevice_port_upper_event(struct net_device * lower_dev,struct net_device * dev,unsigned long event,void * ptr,bool replay_deslavement)4855  static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4856  					       struct net_device *dev,
4857  					       unsigned long event, void *ptr,
4858  					       bool replay_deslavement)
4859  {
4860  	struct netdev_notifier_changeupper_info *info;
4861  	struct mlxsw_sp_port *mlxsw_sp_port;
4862  	struct netlink_ext_ack *extack;
4863  	struct net_device *upper_dev;
4864  	struct mlxsw_sp *mlxsw_sp;
4865  	int err = 0;
4866  	u16 proto;
4867  
4868  	mlxsw_sp_port = netdev_priv(dev);
4869  	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4870  	info = ptr;
4871  	extack = netdev_notifier_info_to_extack(&info->info);
4872  
4873  	switch (event) {
4874  	case NETDEV_PRECHANGEUPPER:
4875  		upper_dev = info->upper_dev;
4876  		if (!is_vlan_dev(upper_dev) &&
4877  		    !netif_is_lag_master(upper_dev) &&
4878  		    !netif_is_bridge_master(upper_dev) &&
4879  		    !netif_is_ovs_master(upper_dev) &&
4880  		    !netif_is_macvlan(upper_dev) &&
4881  		    !netif_is_l3_master(upper_dev)) {
4882  			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4883  			return -EINVAL;
4884  		}
4885  		if (!info->linking)
4886  			break;
4887  		if (netif_is_bridge_master(upper_dev) &&
4888  		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4889  		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4890  		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4891  			return -EOPNOTSUPP;
4892  		if (netdev_has_any_upper_dev(upper_dev) &&
4893  		    (!netif_is_bridge_master(upper_dev) ||
4894  		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4895  							  upper_dev))) {
4896  			err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4897  								 upper_dev,
4898  								 extack);
4899  			if (err)
4900  				return err;
4901  		}
4902  		if (netif_is_lag_master(upper_dev) &&
4903  		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4904  					       info->upper_info, extack))
4905  			return -EINVAL;
4906  		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4907  			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4908  			return -EINVAL;
4909  		}
4910  		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4911  		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4912  			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4913  			return -EINVAL;
4914  		}
4915  		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4916  			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4917  			return -EINVAL;
4918  		}
4919  		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4920  			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4921  			return -EINVAL;
4922  		}
4923  		if (netif_is_bridge_master(upper_dev)) {
4924  			br_vlan_get_proto(upper_dev, &proto);
4925  			if (br_vlan_enabled(upper_dev) &&
4926  			    proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4927  				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4928  				return -EOPNOTSUPP;
4929  			}
4930  			if (vlan_uses_dev(lower_dev) &&
4931  			    br_vlan_enabled(upper_dev) &&
4932  			    proto == ETH_P_8021AD) {
4933  				NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4934  				return -EOPNOTSUPP;
4935  			}
4936  		}
4937  		if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4938  			struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4939  
4940  			if (br_vlan_enabled(br_dev)) {
4941  				br_vlan_get_proto(br_dev, &proto);
4942  				if (proto == ETH_P_8021AD) {
4943  					NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4944  					return -EOPNOTSUPP;
4945  				}
4946  			}
4947  		}
4948  		if (is_vlan_dev(upper_dev) &&
4949  		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4950  			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4951  			return -EOPNOTSUPP;
4952  		}
4953  		if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4954  			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4955  			return -EOPNOTSUPP;
4956  		}
4957  		break;
4958  	case NETDEV_CHANGEUPPER:
4959  		upper_dev = info->upper_dev;
4960  		if (netif_is_bridge_master(upper_dev)) {
4961  			if (info->linking) {
4962  				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4963  								lower_dev,
4964  								upper_dev,
4965  								extack);
4966  			} else {
4967  				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4968  							   lower_dev,
4969  							   upper_dev);
4970  				if (!replay_deslavement)
4971  					break;
4972  				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4973  								      lower_dev);
4974  			}
4975  		} else if (netif_is_lag_master(upper_dev)) {
4976  			if (info->linking) {
4977  				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4978  							     upper_dev, extack);
4979  			} else {
4980  				mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4981  				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4982  							upper_dev);
4983  				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4984  								      dev);
4985  			}
4986  		} else if (netif_is_ovs_master(upper_dev)) {
4987  			if (info->linking)
4988  				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4989  			else
4990  				mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4991  		} else if (netif_is_macvlan(upper_dev)) {
4992  			if (!info->linking)
4993  				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4994  		} else if (is_vlan_dev(upper_dev)) {
4995  			struct net_device *br_dev;
4996  
4997  			if (!netif_is_bridge_port(upper_dev))
4998  				break;
4999  			if (info->linking)
5000  				break;
5001  			br_dev = netdev_master_upper_dev_get(upper_dev);
5002  			mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5003  						   br_dev);
5004  		}
5005  		break;
5006  	}
5007  
5008  	return err;
5009  }
5010  
mlxsw_sp_netdevice_port_lower_event(struct net_device * dev,unsigned long event,void * ptr)5011  static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5012  					       unsigned long event, void *ptr)
5013  {
5014  	struct netdev_notifier_changelowerstate_info *info;
5015  	struct mlxsw_sp_port *mlxsw_sp_port;
5016  	int err;
5017  
5018  	mlxsw_sp_port = netdev_priv(dev);
5019  	info = ptr;
5020  
5021  	switch (event) {
5022  	case NETDEV_CHANGELOWERSTATE:
5023  		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5024  			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5025  							info->lower_state_info);
5026  			if (err)
5027  				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5028  		}
5029  		break;
5030  	}
5031  
5032  	return 0;
5033  }
5034  
mlxsw_sp_netdevice_port_event(struct net_device * lower_dev,struct net_device * port_dev,unsigned long event,void * ptr,bool replay_deslavement)5035  static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5036  					 struct net_device *port_dev,
5037  					 unsigned long event, void *ptr,
5038  					 bool replay_deslavement)
5039  {
5040  	switch (event) {
5041  	case NETDEV_PRECHANGEUPPER:
5042  	case NETDEV_CHANGEUPPER:
5043  		return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5044  							   event, ptr,
5045  							   replay_deslavement);
5046  	case NETDEV_CHANGELOWERSTATE:
5047  		return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5048  							   ptr);
5049  	}
5050  
5051  	return 0;
5052  }
5053  
5054  /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5055   * to do any per-LAG / per-LAG-upper processing.
5056   */
mlxsw_sp_netdevice_post_lag_event(struct net_device * dev,unsigned long event,void * ptr)5057  static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5058  					     unsigned long event,
5059  					     void *ptr)
5060  {
5061  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5062  	struct netdev_notifier_changeupper_info *info = ptr;
5063  
5064  	if (!mlxsw_sp)
5065  		return 0;
5066  
5067  	switch (event) {
5068  	case NETDEV_CHANGEUPPER:
5069  		if (info->linking)
5070  			break;
5071  		if (netif_is_bridge_master(info->upper_dev))
5072  			mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5073  		break;
5074  	}
5075  	return 0;
5076  }
5077  
mlxsw_sp_netdevice_lag_event(struct net_device * lag_dev,unsigned long event,void * ptr)5078  static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5079  					unsigned long event, void *ptr)
5080  {
5081  	struct net_device *dev;
5082  	struct list_head *iter;
5083  	int ret;
5084  
5085  	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5086  		if (mlxsw_sp_port_dev_check(dev)) {
5087  			ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5088  							    ptr, false);
5089  			if (ret)
5090  				return ret;
5091  		}
5092  	}
5093  
5094  	return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5095  }
5096  
mlxsw_sp_netdevice_port_vlan_event(struct net_device * vlan_dev,struct net_device * dev,unsigned long event,void * ptr,u16 vid,bool replay_deslavement)5097  static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5098  					      struct net_device *dev,
5099  					      unsigned long event, void *ptr,
5100  					      u16 vid, bool replay_deslavement)
5101  {
5102  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5103  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5104  	struct netdev_notifier_changeupper_info *info = ptr;
5105  	struct netlink_ext_ack *extack;
5106  	struct net_device *upper_dev;
5107  	int err = 0;
5108  
5109  	extack = netdev_notifier_info_to_extack(&info->info);
5110  
5111  	switch (event) {
5112  	case NETDEV_PRECHANGEUPPER:
5113  		upper_dev = info->upper_dev;
5114  		if (!netif_is_bridge_master(upper_dev) &&
5115  		    !netif_is_macvlan(upper_dev) &&
5116  		    !netif_is_l3_master(upper_dev)) {
5117  			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5118  			return -EINVAL;
5119  		}
5120  		if (!info->linking)
5121  			break;
5122  		if (netif_is_bridge_master(upper_dev) &&
5123  		    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5124  		    mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5125  		    !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5126  			return -EOPNOTSUPP;
5127  		if (netdev_has_any_upper_dev(upper_dev) &&
5128  		    (!netif_is_bridge_master(upper_dev) ||
5129  		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5130  							  upper_dev))) {
5131  			err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5132  								 upper_dev,
5133  								 extack);
5134  			if (err)
5135  				return err;
5136  		}
5137  		break;
5138  	case NETDEV_CHANGEUPPER:
5139  		upper_dev = info->upper_dev;
5140  		if (netif_is_bridge_master(upper_dev)) {
5141  			if (info->linking) {
5142  				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5143  								vlan_dev,
5144  								upper_dev,
5145  								extack);
5146  			} else {
5147  				mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5148  							   vlan_dev,
5149  							   upper_dev);
5150  				if (!replay_deslavement)
5151  					break;
5152  				mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5153  								      vlan_dev);
5154  			}
5155  		} else if (netif_is_macvlan(upper_dev)) {
5156  			if (!info->linking)
5157  				mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5158  		}
5159  		break;
5160  	}
5161  
5162  	return err;
5163  }
5164  
mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device * vlan_dev,struct net_device * lag_dev,unsigned long event,void * ptr,u16 vid)5165  static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5166  						  struct net_device *lag_dev,
5167  						  unsigned long event,
5168  						  void *ptr, u16 vid)
5169  {
5170  	struct net_device *dev;
5171  	struct list_head *iter;
5172  	int ret;
5173  
5174  	netdev_for_each_lower_dev(lag_dev, dev, iter) {
5175  		if (mlxsw_sp_port_dev_check(dev)) {
5176  			ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5177  								 event, ptr,
5178  								 vid, false);
5179  			if (ret)
5180  				return ret;
5181  		}
5182  	}
5183  
5184  	return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5185  }
5186  
mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,struct net_device * br_dev,unsigned long event,void * ptr,u16 vid,bool process_foreign)5187  static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5188  						struct net_device *vlan_dev,
5189  						struct net_device *br_dev,
5190  						unsigned long event, void *ptr,
5191  						u16 vid, bool process_foreign)
5192  {
5193  	struct netdev_notifier_changeupper_info *info = ptr;
5194  	struct netlink_ext_ack *extack;
5195  	struct net_device *upper_dev;
5196  
5197  	if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5198  		return 0;
5199  
5200  	extack = netdev_notifier_info_to_extack(&info->info);
5201  
5202  	switch (event) {
5203  	case NETDEV_PRECHANGEUPPER:
5204  		upper_dev = info->upper_dev;
5205  		if (!netif_is_macvlan(upper_dev) &&
5206  		    !netif_is_l3_master(upper_dev)) {
5207  			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5208  			return -EOPNOTSUPP;
5209  		}
5210  		break;
5211  	case NETDEV_CHANGEUPPER:
5212  		upper_dev = info->upper_dev;
5213  		if (info->linking)
5214  			break;
5215  		if (netif_is_macvlan(upper_dev))
5216  			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5217  		break;
5218  	}
5219  
5220  	return 0;
5221  }
5222  
mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,void * ptr,bool process_foreign)5223  static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5224  					 struct net_device *vlan_dev,
5225  					 unsigned long event, void *ptr,
5226  					 bool process_foreign)
5227  {
5228  	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5229  	u16 vid = vlan_dev_vlan_id(vlan_dev);
5230  
5231  	if (mlxsw_sp_port_dev_check(real_dev))
5232  		return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5233  							  event, ptr, vid,
5234  							  true);
5235  	else if (netif_is_lag_master(real_dev))
5236  		return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5237  							      real_dev, event,
5238  							      ptr, vid);
5239  	else if (netif_is_bridge_master(real_dev))
5240  		return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5241  							    real_dev, event,
5242  							    ptr, vid,
5243  							    process_foreign);
5244  
5245  	return 0;
5246  }
5247  
mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,unsigned long event,void * ptr,bool process_foreign)5248  static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5249  					   struct net_device *br_dev,
5250  					   unsigned long event, void *ptr,
5251  					   bool process_foreign)
5252  {
5253  	struct netdev_notifier_changeupper_info *info = ptr;
5254  	struct netlink_ext_ack *extack;
5255  	struct net_device *upper_dev;
5256  	u16 proto;
5257  
5258  	if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5259  		return 0;
5260  
5261  	extack = netdev_notifier_info_to_extack(&info->info);
5262  
5263  	switch (event) {
5264  	case NETDEV_PRECHANGEUPPER:
5265  		upper_dev = info->upper_dev;
5266  		if (!is_vlan_dev(upper_dev) &&
5267  		    !netif_is_macvlan(upper_dev) &&
5268  		    !netif_is_l3_master(upper_dev)) {
5269  			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5270  			return -EOPNOTSUPP;
5271  		}
5272  		if (!info->linking)
5273  			break;
5274  		if (br_vlan_enabled(br_dev)) {
5275  			br_vlan_get_proto(br_dev, &proto);
5276  			if (proto == ETH_P_8021AD) {
5277  				NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5278  				return -EOPNOTSUPP;
5279  			}
5280  		}
5281  		if (is_vlan_dev(upper_dev) &&
5282  		    ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5283  			NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5284  			return -EOPNOTSUPP;
5285  		}
5286  		break;
5287  	case NETDEV_CHANGEUPPER:
5288  		upper_dev = info->upper_dev;
5289  		if (info->linking)
5290  			break;
5291  		if (is_vlan_dev(upper_dev))
5292  			mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5293  		if (netif_is_macvlan(upper_dev))
5294  			mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5295  		break;
5296  	}
5297  
5298  	return 0;
5299  }
5300  
mlxsw_sp_netdevice_macvlan_event(struct net_device * macvlan_dev,unsigned long event,void * ptr)5301  static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5302  					    unsigned long event, void *ptr)
5303  {
5304  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5305  	struct netdev_notifier_changeupper_info *info = ptr;
5306  	struct netlink_ext_ack *extack;
5307  	struct net_device *upper_dev;
5308  
5309  	if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5310  		return 0;
5311  
5312  	extack = netdev_notifier_info_to_extack(&info->info);
5313  	upper_dev = info->upper_dev;
5314  
5315  	if (!netif_is_l3_master(upper_dev)) {
5316  		NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5317  		return -EOPNOTSUPP;
5318  	}
5319  
5320  	return 0;
5321  }
5322  
mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,void * ptr)5323  static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5324  					  struct net_device *dev,
5325  					  unsigned long event, void *ptr)
5326  {
5327  	struct netdev_notifier_changeupper_info *cu_info;
5328  	struct netdev_notifier_info *info = ptr;
5329  	struct netlink_ext_ack *extack;
5330  	struct net_device *upper_dev;
5331  
5332  	extack = netdev_notifier_info_to_extack(info);
5333  
5334  	switch (event) {
5335  	case NETDEV_CHANGEUPPER:
5336  		cu_info = container_of(info,
5337  				       struct netdev_notifier_changeupper_info,
5338  				       info);
5339  		upper_dev = cu_info->upper_dev;
5340  		if (!netif_is_bridge_master(upper_dev))
5341  			return 0;
5342  		if (!mlxsw_sp_lower_get(upper_dev))
5343  			return 0;
5344  		if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5345  			return -EOPNOTSUPP;
5346  		if (cu_info->linking) {
5347  			if (!netif_running(dev))
5348  				return 0;
5349  			/* When the bridge is VLAN-aware, the VNI of the VxLAN
5350  			 * device needs to be mapped to a VLAN, but at this
5351  			 * point no VLANs are configured on the VxLAN device
5352  			 */
5353  			if (br_vlan_enabled(upper_dev))
5354  				return 0;
5355  			return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5356  							  dev, 0, extack);
5357  		} else {
5358  			/* VLANs were already flushed, which triggered the
5359  			 * necessary cleanup
5360  			 */
5361  			if (br_vlan_enabled(upper_dev))
5362  				return 0;
5363  			mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5364  		}
5365  		break;
5366  	case NETDEV_PRE_UP:
5367  		upper_dev = netdev_master_upper_dev_get(dev);
5368  		if (!upper_dev)
5369  			return 0;
5370  		if (!netif_is_bridge_master(upper_dev))
5371  			return 0;
5372  		if (!mlxsw_sp_lower_get(upper_dev))
5373  			return 0;
5374  		return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5375  						  extack);
5376  	case NETDEV_DOWN:
5377  		upper_dev = netdev_master_upper_dev_get(dev);
5378  		if (!upper_dev)
5379  			return 0;
5380  		if (!netif_is_bridge_master(upper_dev))
5381  			return 0;
5382  		if (!mlxsw_sp_lower_get(upper_dev))
5383  			return 0;
5384  		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5385  		break;
5386  	}
5387  
5388  	return 0;
5389  }
5390  
__mlxsw_sp_netdevice_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,void * ptr,bool process_foreign)5391  static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5392  				      unsigned long event, void *ptr,
5393  				      bool process_foreign)
5394  {
5395  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5396  	struct mlxsw_sp_span_entry *span_entry;
5397  	int err = 0;
5398  
5399  	if (event == NETDEV_UNREGISTER) {
5400  		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5401  		if (span_entry)
5402  			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5403  	}
5404  
5405  	if (netif_is_vxlan(dev))
5406  		err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5407  	else if (mlxsw_sp_port_dev_check(dev))
5408  		err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5409  	else if (netif_is_lag_master(dev))
5410  		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5411  	else if (is_vlan_dev(dev))
5412  		err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5413  						    process_foreign);
5414  	else if (netif_is_bridge_master(dev))
5415  		err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5416  						      process_foreign);
5417  	else if (netif_is_macvlan(dev))
5418  		err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5419  
5420  	return err;
5421  }
5422  
mlxsw_sp_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)5423  static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5424  				    unsigned long event, void *ptr)
5425  {
5426  	struct mlxsw_sp *mlxsw_sp;
5427  	int err;
5428  
5429  	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5430  	mlxsw_sp_span_respin(mlxsw_sp);
5431  	err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5432  
5433  	return notifier_from_errno(err);
5434  }
5435  
5436  static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5437  	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5438  	{0, },
5439  };
5440  
5441  static struct pci_driver mlxsw_sp1_pci_driver = {
5442  	.name = mlxsw_sp1_driver_name,
5443  	.id_table = mlxsw_sp1_pci_id_table,
5444  };
5445  
5446  static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5447  	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5448  	{0, },
5449  };
5450  
5451  static struct pci_driver mlxsw_sp2_pci_driver = {
5452  	.name = mlxsw_sp2_driver_name,
5453  	.id_table = mlxsw_sp2_pci_id_table,
5454  };
5455  
5456  static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5457  	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5458  	{0, },
5459  };
5460  
5461  static struct pci_driver mlxsw_sp3_pci_driver = {
5462  	.name = mlxsw_sp3_driver_name,
5463  	.id_table = mlxsw_sp3_pci_id_table,
5464  };
5465  
5466  static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5467  	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5468  	{0, },
5469  };
5470  
5471  static struct pci_driver mlxsw_sp4_pci_driver = {
5472  	.name = mlxsw_sp4_driver_name,
5473  	.id_table = mlxsw_sp4_pci_id_table,
5474  };
5475  
mlxsw_sp_module_init(void)5476  static int __init mlxsw_sp_module_init(void)
5477  {
5478  	int err;
5479  
5480  	err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5481  	if (err)
5482  		return err;
5483  
5484  	err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5485  	if (err)
5486  		goto err_sp2_core_driver_register;
5487  
5488  	err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5489  	if (err)
5490  		goto err_sp3_core_driver_register;
5491  
5492  	err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5493  	if (err)
5494  		goto err_sp4_core_driver_register;
5495  
5496  	err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5497  	if (err)
5498  		goto err_sp1_pci_driver_register;
5499  
5500  	err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5501  	if (err)
5502  		goto err_sp2_pci_driver_register;
5503  
5504  	err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5505  	if (err)
5506  		goto err_sp3_pci_driver_register;
5507  
5508  	err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5509  	if (err)
5510  		goto err_sp4_pci_driver_register;
5511  
5512  	return 0;
5513  
5514  err_sp4_pci_driver_register:
5515  	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5516  err_sp3_pci_driver_register:
5517  	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5518  err_sp2_pci_driver_register:
5519  	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5520  err_sp1_pci_driver_register:
5521  	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5522  err_sp4_core_driver_register:
5523  	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5524  err_sp3_core_driver_register:
5525  	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5526  err_sp2_core_driver_register:
5527  	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5528  	return err;
5529  }
5530  
mlxsw_sp_module_exit(void)5531  static void __exit mlxsw_sp_module_exit(void)
5532  {
5533  	mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5534  	mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5535  	mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5536  	mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5537  	mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5538  	mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5539  	mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5540  	mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5541  }
5542  
5543  module_init(mlxsw_sp_module_init);
5544  module_exit(mlxsw_sp_module_exit);
5545  
5546  MODULE_LICENSE("Dual BSD/GPL");
5547  MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5548  MODULE_DESCRIPTION("Mellanox Spectrum driver");
5549  MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5550  MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5551  MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5552  MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5553  MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5554  MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5555  MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5556  MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
5557