1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8 
9 #include <linux/netdevice.h>
10 #include <net/dsa.h>
11 #include <linux/if_bridge.h>
12 
13 #include "qca8k.h"
14 
15 #define MIB_DESC(_s, _o, _n)	\
16 	{			\
17 		.size = (_s),	\
18 		.offset = (_o),	\
19 		.name = (_n),	\
20 	}
21 
22 const struct qca8k_mib_desc ar8327_mib[] = {
23 	MIB_DESC(1, 0x00, "RxBroad"),
24 	MIB_DESC(1, 0x04, "RxPause"),
25 	MIB_DESC(1, 0x08, "RxMulti"),
26 	MIB_DESC(1, 0x0c, "RxFcsErr"),
27 	MIB_DESC(1, 0x10, "RxAlignErr"),
28 	MIB_DESC(1, 0x14, "RxRunt"),
29 	MIB_DESC(1, 0x18, "RxFragment"),
30 	MIB_DESC(1, 0x1c, "Rx64Byte"),
31 	MIB_DESC(1, 0x20, "Rx128Byte"),
32 	MIB_DESC(1, 0x24, "Rx256Byte"),
33 	MIB_DESC(1, 0x28, "Rx512Byte"),
34 	MIB_DESC(1, 0x2c, "Rx1024Byte"),
35 	MIB_DESC(1, 0x30, "Rx1518Byte"),
36 	MIB_DESC(1, 0x34, "RxMaxByte"),
37 	MIB_DESC(1, 0x38, "RxTooLong"),
38 	MIB_DESC(2, 0x3c, "RxGoodByte"),
39 	MIB_DESC(2, 0x44, "RxBadByte"),
40 	MIB_DESC(1, 0x4c, "RxOverFlow"),
41 	MIB_DESC(1, 0x50, "Filtered"),
42 	MIB_DESC(1, 0x54, "TxBroad"),
43 	MIB_DESC(1, 0x58, "TxPause"),
44 	MIB_DESC(1, 0x5c, "TxMulti"),
45 	MIB_DESC(1, 0x60, "TxUnderRun"),
46 	MIB_DESC(1, 0x64, "Tx64Byte"),
47 	MIB_DESC(1, 0x68, "Tx128Byte"),
48 	MIB_DESC(1, 0x6c, "Tx256Byte"),
49 	MIB_DESC(1, 0x70, "Tx512Byte"),
50 	MIB_DESC(1, 0x74, "Tx1024Byte"),
51 	MIB_DESC(1, 0x78, "Tx1518Byte"),
52 	MIB_DESC(1, 0x7c, "TxMaxByte"),
53 	MIB_DESC(1, 0x80, "TxOverSize"),
54 	MIB_DESC(2, 0x84, "TxByte"),
55 	MIB_DESC(1, 0x8c, "TxCollision"),
56 	MIB_DESC(1, 0x90, "TxAbortCol"),
57 	MIB_DESC(1, 0x94, "TxMultiCol"),
58 	MIB_DESC(1, 0x98, "TxSingleCol"),
59 	MIB_DESC(1, 0x9c, "TxExcDefer"),
60 	MIB_DESC(1, 0xa0, "TxDefer"),
61 	MIB_DESC(1, 0xa4, "TxLateCol"),
62 	MIB_DESC(1, 0xa8, "RXUnicast"),
63 	MIB_DESC(1, 0xac, "TXUnicast"),
64 };
65 
qca8k_read(struct qca8k_priv * priv,u32 reg,u32 * val)66 int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
67 {
68 	return regmap_read(priv->regmap, reg, val);
69 }
70 
qca8k_write(struct qca8k_priv * priv,u32 reg,u32 val)71 int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
72 {
73 	return regmap_write(priv->regmap, reg, val);
74 }
75 
qca8k_rmw(struct qca8k_priv * priv,u32 reg,u32 mask,u32 write_val)76 int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
77 {
78 	return regmap_update_bits(priv->regmap, reg, mask, write_val);
79 }
80 
81 static const struct regmap_range qca8k_readable_ranges[] = {
82 	regmap_reg_range(0x0000, 0x00e4), /* Global control */
83 	regmap_reg_range(0x0100, 0x0168), /* EEE control */
84 	regmap_reg_range(0x0200, 0x0270), /* Parser control */
85 	regmap_reg_range(0x0400, 0x0454), /* ACL */
86 	regmap_reg_range(0x0600, 0x0718), /* Lookup */
87 	regmap_reg_range(0x0800, 0x0b70), /* QM */
88 	regmap_reg_range(0x0c00, 0x0c80), /* PKT */
89 	regmap_reg_range(0x0e00, 0x0e98), /* L3 */
90 	regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
91 	regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
92 	regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
93 	regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
94 	regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
95 	regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
96 	regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
97 };
98 
99 const struct regmap_access_table qca8k_readable_table = {
100 	.yes_ranges = qca8k_readable_ranges,
101 	.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
102 };
103 
qca8k_busy_wait(struct qca8k_priv * priv,u32 reg,u32 mask)104 static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
105 {
106 	u32 val;
107 
108 	return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
109 				       QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
110 }
111 
qca8k_fdb_read(struct qca8k_priv * priv,struct qca8k_fdb * fdb)112 static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
113 {
114 	u32 reg[QCA8K_ATU_TABLE_SIZE];
115 	int ret;
116 
117 	/* load the ARL table into an array */
118 	ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
119 			       QCA8K_ATU_TABLE_SIZE);
120 	if (ret)
121 		return ret;
122 
123 	/* vid - 83:72 */
124 	fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
125 	/* aging - 67:64 */
126 	fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
127 	/* portmask - 54:48 */
128 	fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
129 	/* mac - 47:0 */
130 	fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
131 	fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
132 	fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
133 	fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
134 	fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
135 	fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
136 
137 	return 0;
138 }
139 
qca8k_fdb_write(struct qca8k_priv * priv,u16 vid,u8 port_mask,const u8 * mac,u8 aging)140 static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
141 			    const u8 *mac, u8 aging)
142 {
143 	u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
144 
145 	/* vid - 83:72 */
146 	reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
147 	/* aging - 67:64 */
148 	reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
149 	/* portmask - 54:48 */
150 	reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
151 	/* mac - 47:0 */
152 	reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
153 	reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
154 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
155 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
156 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
157 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
158 
159 	/* load the array into the ARL table */
160 	regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
161 			  QCA8K_ATU_TABLE_SIZE);
162 }
163 
qca8k_fdb_access(struct qca8k_priv * priv,enum qca8k_fdb_cmd cmd,int port)164 static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
165 			    int port)
166 {
167 	u32 reg;
168 	int ret;
169 
170 	/* Set the command and FDB index */
171 	reg = QCA8K_ATU_FUNC_BUSY;
172 	reg |= cmd;
173 	if (port >= 0) {
174 		reg |= QCA8K_ATU_FUNC_PORT_EN;
175 		reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
176 	}
177 
178 	/* Write the function register triggering the table access */
179 	ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
180 	if (ret)
181 		return ret;
182 
183 	/* wait for completion */
184 	ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
185 	if (ret)
186 		return ret;
187 
188 	/* Check for table full violation when adding an entry */
189 	if (cmd == QCA8K_FDB_LOAD) {
190 		ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
191 		if (ret < 0)
192 			return ret;
193 		if (reg & QCA8K_ATU_FUNC_FULL)
194 			return -1;
195 	}
196 
197 	return 0;
198 }
199 
qca8k_fdb_next(struct qca8k_priv * priv,struct qca8k_fdb * fdb,int port)200 static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
201 			  int port)
202 {
203 	int ret;
204 
205 	qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
206 	ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
207 	if (ret < 0)
208 		return ret;
209 
210 	return qca8k_fdb_read(priv, fdb);
211 }
212 
qca8k_fdb_add(struct qca8k_priv * priv,const u8 * mac,u16 port_mask,u16 vid,u8 aging)213 static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
214 			 u16 port_mask, u16 vid, u8 aging)
215 {
216 	int ret;
217 
218 	mutex_lock(&priv->reg_mutex);
219 	qca8k_fdb_write(priv, vid, port_mask, mac, aging);
220 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
221 	mutex_unlock(&priv->reg_mutex);
222 
223 	return ret;
224 }
225 
qca8k_fdb_del(struct qca8k_priv * priv,const u8 * mac,u16 port_mask,u16 vid)226 static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
227 			 u16 port_mask, u16 vid)
228 {
229 	int ret;
230 
231 	mutex_lock(&priv->reg_mutex);
232 	qca8k_fdb_write(priv, vid, port_mask, mac, 0);
233 	ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
234 	mutex_unlock(&priv->reg_mutex);
235 
236 	return ret;
237 }
238 
qca8k_fdb_flush(struct qca8k_priv * priv)239 void qca8k_fdb_flush(struct qca8k_priv *priv)
240 {
241 	mutex_lock(&priv->reg_mutex);
242 	qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
243 	mutex_unlock(&priv->reg_mutex);
244 }
245 
qca8k_fdb_search_and_insert(struct qca8k_priv * priv,u8 port_mask,const u8 * mac,u16 vid,u8 aging)246 static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
247 				       const u8 *mac, u16 vid, u8 aging)
248 {
249 	struct qca8k_fdb fdb = { 0 };
250 	int ret;
251 
252 	mutex_lock(&priv->reg_mutex);
253 
254 	qca8k_fdb_write(priv, vid, 0, mac, 0);
255 	ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
256 	if (ret < 0)
257 		goto exit;
258 
259 	ret = qca8k_fdb_read(priv, &fdb);
260 	if (ret < 0)
261 		goto exit;
262 
263 	/* Rule exist. Delete first */
264 	if (fdb.aging) {
265 		ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
266 		if (ret)
267 			goto exit;
268 	} else {
269 		fdb.aging = aging;
270 	}
271 
272 	/* Add port to fdb portmask */
273 	fdb.port_mask |= port_mask;
274 
275 	qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
276 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
277 
278 exit:
279 	mutex_unlock(&priv->reg_mutex);
280 	return ret;
281 }
282 
qca8k_fdb_search_and_del(struct qca8k_priv * priv,u8 port_mask,const u8 * mac,u16 vid)283 static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
284 				    const u8 *mac, u16 vid)
285 {
286 	struct qca8k_fdb fdb = { 0 };
287 	int ret;
288 
289 	mutex_lock(&priv->reg_mutex);
290 
291 	qca8k_fdb_write(priv, vid, 0, mac, 0);
292 	ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
293 	if (ret < 0)
294 		goto exit;
295 
296 	ret = qca8k_fdb_read(priv, &fdb);
297 	if (ret < 0)
298 		goto exit;
299 
300 	/* Rule doesn't exist. Why delete? */
301 	if (!fdb.aging) {
302 		ret = -EINVAL;
303 		goto exit;
304 	}
305 
306 	ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
307 	if (ret)
308 		goto exit;
309 
310 	/* Only port in the rule is this port. Don't re insert */
311 	if (fdb.port_mask == port_mask)
312 		goto exit;
313 
314 	/* Remove port from port mask */
315 	fdb.port_mask &= ~port_mask;
316 
317 	qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
318 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
319 
320 exit:
321 	mutex_unlock(&priv->reg_mutex);
322 	return ret;
323 }
324 
qca8k_vlan_access(struct qca8k_priv * priv,enum qca8k_vlan_cmd cmd,u16 vid)325 static int qca8k_vlan_access(struct qca8k_priv *priv,
326 			     enum qca8k_vlan_cmd cmd, u16 vid)
327 {
328 	u32 reg;
329 	int ret;
330 
331 	/* Set the command and VLAN index */
332 	reg = QCA8K_VTU_FUNC1_BUSY;
333 	reg |= cmd;
334 	reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
335 
336 	/* Write the function register triggering the table access */
337 	ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
338 	if (ret)
339 		return ret;
340 
341 	/* wait for completion */
342 	ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
343 	if (ret)
344 		return ret;
345 
346 	/* Check for table full violation when adding an entry */
347 	if (cmd == QCA8K_VLAN_LOAD) {
348 		ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
349 		if (ret < 0)
350 			return ret;
351 		if (reg & QCA8K_VTU_FUNC1_FULL)
352 			return -ENOMEM;
353 	}
354 
355 	return 0;
356 }
357 
qca8k_vlan_add(struct qca8k_priv * priv,u8 port,u16 vid,bool untagged)358 static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
359 			  bool untagged)
360 {
361 	u32 reg;
362 	int ret;
363 
364 	/* We do the right thing with VLAN 0 and treat it as untagged while
365 	 * preserving the tag on egress.
366 	 */
367 	if (vid == 0)
368 		return 0;
369 
370 	mutex_lock(&priv->reg_mutex);
371 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
372 	if (ret < 0)
373 		goto out;
374 
375 	ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
376 	if (ret < 0)
377 		goto out;
378 	reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
379 	reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
380 	if (untagged)
381 		reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
382 	else
383 		reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
384 
385 	ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
386 	if (ret)
387 		goto out;
388 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
389 
390 out:
391 	mutex_unlock(&priv->reg_mutex);
392 
393 	return ret;
394 }
395 
qca8k_vlan_del(struct qca8k_priv * priv,u8 port,u16 vid)396 static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
397 {
398 	u32 reg, mask;
399 	int ret, i;
400 	bool del;
401 
402 	mutex_lock(&priv->reg_mutex);
403 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
404 	if (ret < 0)
405 		goto out;
406 
407 	ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
408 	if (ret < 0)
409 		goto out;
410 	reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
411 	reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
412 
413 	/* Check if we're the last member to be removed */
414 	del = true;
415 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
416 		mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
417 
418 		if ((reg & mask) != mask) {
419 			del = false;
420 			break;
421 		}
422 	}
423 
424 	if (del) {
425 		ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
426 	} else {
427 		ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
428 		if (ret)
429 			goto out;
430 		ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
431 	}
432 
433 out:
434 	mutex_unlock(&priv->reg_mutex);
435 
436 	return ret;
437 }
438 
qca8k_mib_init(struct qca8k_priv * priv)439 int qca8k_mib_init(struct qca8k_priv *priv)
440 {
441 	int ret;
442 
443 	mutex_lock(&priv->reg_mutex);
444 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
445 				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
446 				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
447 				 QCA8K_MIB_BUSY);
448 	if (ret)
449 		goto exit;
450 
451 	ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
452 	if (ret)
453 		goto exit;
454 
455 	ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
456 	if (ret)
457 		goto exit;
458 
459 	ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
460 
461 exit:
462 	mutex_unlock(&priv->reg_mutex);
463 	return ret;
464 }
465 
qca8k_port_set_status(struct qca8k_priv * priv,int port,int enable)466 void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
467 {
468 	u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
469 
470 	/* Port 0 and 6 have no internal PHY */
471 	if (port > 0 && port < 6)
472 		mask |= QCA8K_PORT_STATUS_LINK_AUTO;
473 
474 	if (enable)
475 		regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
476 	else
477 		regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
478 }
479 
qca8k_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)480 void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
481 		       uint8_t *data)
482 {
483 	struct qca8k_priv *priv = ds->priv;
484 	int i;
485 
486 	if (stringset != ETH_SS_STATS)
487 		return;
488 
489 	for (i = 0; i < priv->info->mib_count; i++)
490 		ethtool_puts(&data, ar8327_mib[i].name);
491 }
492 
qca8k_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)493 void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
494 			     uint64_t *data)
495 {
496 	struct qca8k_priv *priv = ds->priv;
497 	const struct qca8k_mib_desc *mib;
498 	u32 reg, i, val;
499 	u32 hi = 0;
500 	int ret;
501 
502 	if (priv->mgmt_conduit && priv->info->ops->autocast_mib &&
503 	    priv->info->ops->autocast_mib(ds, port, data) > 0)
504 		return;
505 
506 	for (i = 0; i < priv->info->mib_count; i++) {
507 		mib = &ar8327_mib[i];
508 		reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
509 
510 		ret = qca8k_read(priv, reg, &val);
511 		if (ret < 0)
512 			continue;
513 
514 		if (mib->size == 2) {
515 			ret = qca8k_read(priv, reg + 4, &hi);
516 			if (ret < 0)
517 				continue;
518 		}
519 
520 		data[i] = val;
521 		if (mib->size == 2)
522 			data[i] |= (u64)hi << 32;
523 	}
524 }
525 
qca8k_get_sset_count(struct dsa_switch * ds,int port,int sset)526 int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
527 {
528 	struct qca8k_priv *priv = ds->priv;
529 
530 	if (sset != ETH_SS_STATS)
531 		return 0;
532 
533 	return priv->info->mib_count;
534 }
535 
qca8k_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * eee)536 int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
537 		      struct ethtool_keee *eee)
538 {
539 	u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
540 	struct qca8k_priv *priv = ds->priv;
541 	u32 reg;
542 	int ret;
543 
544 	mutex_lock(&priv->reg_mutex);
545 	ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
546 	if (ret < 0)
547 		goto exit;
548 
549 	if (eee->eee_enabled)
550 		reg |= lpi_en;
551 	else
552 		reg &= ~lpi_en;
553 	ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
554 
555 exit:
556 	mutex_unlock(&priv->reg_mutex);
557 	return ret;
558 }
559 
qca8k_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)560 int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
561 		      struct ethtool_keee *e)
562 {
563 	/* Nothing to do on the port's MAC */
564 	return 0;
565 }
566 
qca8k_port_configure_learning(struct dsa_switch * ds,int port,bool learning)567 static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
568 					 bool learning)
569 {
570 	struct qca8k_priv *priv = ds->priv;
571 
572 	if (learning)
573 		return regmap_set_bits(priv->regmap,
574 				       QCA8K_PORT_LOOKUP_CTRL(port),
575 				       QCA8K_PORT_LOOKUP_LEARN);
576 	else
577 		return regmap_clear_bits(priv->regmap,
578 					 QCA8K_PORT_LOOKUP_CTRL(port),
579 					 QCA8K_PORT_LOOKUP_LEARN);
580 }
581 
qca8k_port_stp_state_set(struct dsa_switch * ds,int port,u8 state)582 void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
583 {
584 	struct dsa_port *dp = dsa_to_port(ds, port);
585 	struct qca8k_priv *priv = ds->priv;
586 	bool learning = false;
587 	u32 stp_state;
588 
589 	switch (state) {
590 	case BR_STATE_DISABLED:
591 		stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
592 		break;
593 	case BR_STATE_BLOCKING:
594 		stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
595 		break;
596 	case BR_STATE_LISTENING:
597 		stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
598 		break;
599 	case BR_STATE_LEARNING:
600 		stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
601 		learning = dp->learning;
602 		break;
603 	case BR_STATE_FORWARDING:
604 		learning = dp->learning;
605 		fallthrough;
606 	default:
607 		stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
608 		break;
609 	}
610 
611 	qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
612 		  QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
613 
614 	qca8k_port_configure_learning(ds, port, learning);
615 }
616 
qca8k_update_port_member(struct qca8k_priv * priv,int port,const struct net_device * bridge_dev,bool join)617 static int qca8k_update_port_member(struct qca8k_priv *priv, int port,
618 				    const struct net_device *bridge_dev,
619 				    bool join)
620 {
621 	bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated;
622 	struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
623 	u32 port_mask = BIT(dp->cpu_dp->index);
624 	int i, ret;
625 
626 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
627 		if (i == port)
628 			continue;
629 		if (dsa_is_cpu_port(priv->ds, i))
630 			continue;
631 
632 		other_dp = dsa_to_port(priv->ds, i);
633 		if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
634 			continue;
635 
636 		other_isolated = !!(priv->port_isolated_map & BIT(i));
637 
638 		/* Add/remove this port to/from the portvlan mask of the other
639 		 * ports in the bridge
640 		 */
641 		if (join && !(isolated && other_isolated)) {
642 			port_mask |= BIT(i);
643 			ret = regmap_set_bits(priv->regmap,
644 					      QCA8K_PORT_LOOKUP_CTRL(i),
645 					      BIT(port));
646 		} else {
647 			ret = regmap_clear_bits(priv->regmap,
648 						QCA8K_PORT_LOOKUP_CTRL(i),
649 						BIT(port));
650 		}
651 
652 		if (ret)
653 			return ret;
654 	}
655 
656 	/* Add/remove all other ports to/from this port's portvlan mask */
657 	ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
658 			QCA8K_PORT_LOOKUP_MEMBER, port_mask);
659 
660 	return ret;
661 }
662 
qca8k_port_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)663 int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
664 				struct switchdev_brport_flags flags,
665 				struct netlink_ext_ack *extack)
666 {
667 	if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
668 		return -EINVAL;
669 
670 	return 0;
671 }
672 
qca8k_port_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)673 int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
674 			    struct switchdev_brport_flags flags,
675 			    struct netlink_ext_ack *extack)
676 {
677 	struct qca8k_priv *priv = ds->priv;
678 	int ret;
679 
680 	if (flags.mask & BR_LEARNING) {
681 		ret = qca8k_port_configure_learning(ds, port,
682 						    flags.val & BR_LEARNING);
683 		if (ret)
684 			return ret;
685 	}
686 
687 	if (flags.mask & BR_ISOLATED) {
688 		struct dsa_port *dp = dsa_to_port(ds, port);
689 		struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
690 
691 		if (flags.val & BR_ISOLATED)
692 			priv->port_isolated_map |= BIT(port);
693 		else
694 			priv->port_isolated_map &= ~BIT(port);
695 
696 		ret = qca8k_update_port_member(priv, port, bridge_dev, true);
697 		if (ret)
698 			return ret;
699 	}
700 
701 	return 0;
702 }
703 
qca8k_port_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)704 int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
705 			   struct dsa_bridge bridge,
706 			   bool *tx_fwd_offload,
707 			   struct netlink_ext_ack *extack)
708 {
709 	struct qca8k_priv *priv = ds->priv;
710 
711 	return qca8k_update_port_member(priv, port, bridge.dev, true);
712 }
713 
qca8k_port_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)714 void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
715 			     struct dsa_bridge bridge)
716 {
717 	struct qca8k_priv *priv = ds->priv;
718 	int err;
719 
720 	err = qca8k_update_port_member(priv, port, bridge.dev, false);
721 	if (err)
722 		dev_err(priv->dev,
723 			"Failed to update switch config for bridge leave: %d\n",
724 			err);
725 }
726 
qca8k_port_fast_age(struct dsa_switch * ds,int port)727 void qca8k_port_fast_age(struct dsa_switch *ds, int port)
728 {
729 	struct qca8k_priv *priv = ds->priv;
730 
731 	mutex_lock(&priv->reg_mutex);
732 	qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
733 	mutex_unlock(&priv->reg_mutex);
734 }
735 
qca8k_set_ageing_time(struct dsa_switch * ds,unsigned int msecs)736 int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
737 {
738 	struct qca8k_priv *priv = ds->priv;
739 	unsigned int secs = msecs / 1000;
740 	u32 val;
741 
742 	/* AGE_TIME reg is set in 7s step */
743 	val = secs / 7;
744 
745 	/* Handle case with 0 as val to NOT disable
746 	 * learning
747 	 */
748 	if (!val)
749 		val = 1;
750 
751 	return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
752 				  QCA8K_ATU_AGE_TIME_MASK,
753 				  QCA8K_ATU_AGE_TIME(val));
754 }
755 
qca8k_port_enable(struct dsa_switch * ds,int port,struct phy_device * phy)756 int qca8k_port_enable(struct dsa_switch *ds, int port,
757 		      struct phy_device *phy)
758 {
759 	struct qca8k_priv *priv = ds->priv;
760 
761 	qca8k_port_set_status(priv, port, 1);
762 	priv->port_enabled_map |= BIT(port);
763 
764 	if (dsa_is_user_port(ds, port))
765 		phy_support_asym_pause(phy);
766 
767 	return 0;
768 }
769 
qca8k_port_disable(struct dsa_switch * ds,int port)770 void qca8k_port_disable(struct dsa_switch *ds, int port)
771 {
772 	struct qca8k_priv *priv = ds->priv;
773 
774 	qca8k_port_set_status(priv, port, 0);
775 	priv->port_enabled_map &= ~BIT(port);
776 }
777 
qca8k_port_change_mtu(struct dsa_switch * ds,int port,int new_mtu)778 int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
779 {
780 	struct qca8k_priv *priv = ds->priv;
781 	int ret;
782 
783 	/* We have only have a general MTU setting.
784 	 * DSA always set the CPU port's MTU to the largest MTU of the user
785 	 * ports.
786 	 * Setting MTU just for the CPU port is sufficient to correctly set a
787 	 * value for every port.
788 	 */
789 	if (!dsa_is_cpu_port(ds, port))
790 		return 0;
791 
792 	/* To change the MAX_FRAME_SIZE the cpu ports must be off or
793 	 * the switch panics.
794 	 * Turn off both cpu ports before applying the new value to prevent
795 	 * this.
796 	 */
797 	if (priv->port_enabled_map & BIT(0))
798 		qca8k_port_set_status(priv, 0, 0);
799 
800 	if (priv->port_enabled_map & BIT(6))
801 		qca8k_port_set_status(priv, 6, 0);
802 
803 	/* Include L2 header / FCS length */
804 	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
805 			  ETH_HLEN + ETH_FCS_LEN);
806 
807 	if (priv->port_enabled_map & BIT(0))
808 		qca8k_port_set_status(priv, 0, 1);
809 
810 	if (priv->port_enabled_map & BIT(6))
811 		qca8k_port_set_status(priv, 6, 1);
812 
813 	return ret;
814 }
815 
qca8k_port_max_mtu(struct dsa_switch * ds,int port)816 int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
817 {
818 	return QCA8K_MAX_MTU;
819 }
820 
qca8k_port_fdb_insert(struct qca8k_priv * priv,const u8 * addr,u16 port_mask,u16 vid)821 int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
822 			  u16 port_mask, u16 vid)
823 {
824 	/* Set the vid to the port vlan id if no vid is set */
825 	if (!vid)
826 		vid = QCA8K_PORT_VID_DEF;
827 
828 	return qca8k_fdb_add(priv, addr, port_mask, vid,
829 			     QCA8K_ATU_STATUS_STATIC);
830 }
831 
qca8k_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)832 int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
833 		       const unsigned char *addr, u16 vid,
834 		       struct dsa_db db)
835 {
836 	struct qca8k_priv *priv = ds->priv;
837 	u16 port_mask = BIT(port);
838 
839 	return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
840 }
841 
qca8k_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)842 int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
843 		       const unsigned char *addr, u16 vid,
844 		       struct dsa_db db)
845 {
846 	struct qca8k_priv *priv = ds->priv;
847 	u16 port_mask = BIT(port);
848 
849 	if (!vid)
850 		vid = QCA8K_PORT_VID_DEF;
851 
852 	return qca8k_fdb_del(priv, addr, port_mask, vid);
853 }
854 
qca8k_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)855 int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
856 			dsa_fdb_dump_cb_t *cb, void *data)
857 {
858 	struct qca8k_priv *priv = ds->priv;
859 	struct qca8k_fdb _fdb = { 0 };
860 	int cnt = QCA8K_NUM_FDB_RECORDS;
861 	bool is_static;
862 	int ret = 0;
863 
864 	mutex_lock(&priv->reg_mutex);
865 	while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
866 		if (!_fdb.aging)
867 			break;
868 		is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
869 		ret = cb(_fdb.mac, _fdb.vid, is_static, data);
870 		if (ret)
871 			break;
872 	}
873 	mutex_unlock(&priv->reg_mutex);
874 
875 	return 0;
876 }
877 
qca8k_port_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)878 int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
879 		       const struct switchdev_obj_port_mdb *mdb,
880 		       struct dsa_db db)
881 {
882 	struct qca8k_priv *priv = ds->priv;
883 	const u8 *addr = mdb->addr;
884 	u16 vid = mdb->vid;
885 
886 	if (!vid)
887 		vid = QCA8K_PORT_VID_DEF;
888 
889 	return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
890 					   QCA8K_ATU_STATUS_STATIC);
891 }
892 
qca8k_port_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)893 int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
894 		       const struct switchdev_obj_port_mdb *mdb,
895 		       struct dsa_db db)
896 {
897 	struct qca8k_priv *priv = ds->priv;
898 	const u8 *addr = mdb->addr;
899 	u16 vid = mdb->vid;
900 
901 	if (!vid)
902 		vid = QCA8K_PORT_VID_DEF;
903 
904 	return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
905 }
906 
qca8k_port_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)907 int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
908 			  struct dsa_mall_mirror_tc_entry *mirror,
909 			  bool ingress, struct netlink_ext_ack *extack)
910 {
911 	struct qca8k_priv *priv = ds->priv;
912 	int monitor_port, ret;
913 	u32 reg, val;
914 
915 	/* Check for existent entry */
916 	if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
917 		return -EEXIST;
918 
919 	ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
920 	if (ret)
921 		return ret;
922 
923 	/* QCA83xx can have only one port set to mirror mode.
924 	 * Check that the correct port is requested and return error otherwise.
925 	 * When no mirror port is set, the values is set to 0xF
926 	 */
927 	monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
928 	if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
929 		return -EEXIST;
930 
931 	/* Set the monitor port */
932 	val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
933 			 mirror->to_local_port);
934 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
935 				 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
936 	if (ret)
937 		return ret;
938 
939 	if (ingress) {
940 		reg = QCA8K_PORT_LOOKUP_CTRL(port);
941 		val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
942 	} else {
943 		reg = QCA8K_REG_PORT_HOL_CTRL1(port);
944 		val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
945 	}
946 
947 	ret = regmap_update_bits(priv->regmap, reg, val, val);
948 	if (ret)
949 		return ret;
950 
951 	/* Track mirror port for tx and rx to decide when the
952 	 * mirror port has to be disabled.
953 	 */
954 	if (ingress)
955 		priv->mirror_rx |= BIT(port);
956 	else
957 		priv->mirror_tx |= BIT(port);
958 
959 	return 0;
960 }
961 
qca8k_port_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)962 void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
963 			   struct dsa_mall_mirror_tc_entry *mirror)
964 {
965 	struct qca8k_priv *priv = ds->priv;
966 	u32 reg, val;
967 	int ret;
968 
969 	if (mirror->ingress) {
970 		reg = QCA8K_PORT_LOOKUP_CTRL(port);
971 		val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
972 	} else {
973 		reg = QCA8K_REG_PORT_HOL_CTRL1(port);
974 		val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
975 	}
976 
977 	ret = regmap_clear_bits(priv->regmap, reg, val);
978 	if (ret)
979 		goto err;
980 
981 	if (mirror->ingress)
982 		priv->mirror_rx &= ~BIT(port);
983 	else
984 		priv->mirror_tx &= ~BIT(port);
985 
986 	/* No port set to send packet to mirror port. Disable mirror port */
987 	if (!priv->mirror_rx && !priv->mirror_tx) {
988 		val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
989 		ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
990 					 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
991 		if (ret)
992 			goto err;
993 	}
994 err:
995 	dev_err(priv->dev, "Failed to del mirror port from %d", port);
996 }
997 
qca8k_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)998 int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
999 			      bool vlan_filtering,
1000 			      struct netlink_ext_ack *extack)
1001 {
1002 	struct qca8k_priv *priv = ds->priv;
1003 	int ret;
1004 
1005 	if (vlan_filtering) {
1006 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1007 				QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
1008 				QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
1009 	} else {
1010 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
1011 				QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
1012 				QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
1013 	}
1014 
1015 	return ret;
1016 }
1017 
qca8k_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1018 int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
1019 			const struct switchdev_obj_port_vlan *vlan,
1020 			struct netlink_ext_ack *extack)
1021 {
1022 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1023 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1024 	struct qca8k_priv *priv = ds->priv;
1025 	int ret;
1026 
1027 	ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
1028 	if (ret) {
1029 		dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
1030 		return ret;
1031 	}
1032 
1033 	if (pvid) {
1034 		ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
1035 				QCA8K_EGREES_VLAN_PORT_MASK(port),
1036 				QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
1037 		if (ret)
1038 			return ret;
1039 
1040 		ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
1041 				  QCA8K_PORT_VLAN_CVID(vlan->vid) |
1042 				  QCA8K_PORT_VLAN_SVID(vlan->vid));
1043 	}
1044 
1045 	return ret;
1046 }
1047 
qca8k_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1048 int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
1049 			const struct switchdev_obj_port_vlan *vlan)
1050 {
1051 	struct qca8k_priv *priv = ds->priv;
1052 	int ret;
1053 
1054 	ret = qca8k_vlan_del(priv, port, vlan->vid);
1055 	if (ret)
1056 		dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
1057 
1058 	return ret;
1059 }
1060 
qca8k_lag_can_offload(struct dsa_switch * ds,struct dsa_lag lag,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)1061 static bool qca8k_lag_can_offload(struct dsa_switch *ds,
1062 				  struct dsa_lag lag,
1063 				  struct netdev_lag_upper_info *info,
1064 				  struct netlink_ext_ack *extack)
1065 {
1066 	struct dsa_port *dp;
1067 	int members = 0;
1068 
1069 	if (!lag.id)
1070 		return false;
1071 
1072 	dsa_lag_foreach_port(dp, ds->dst, &lag)
1073 		/* Includes the port joining the LAG */
1074 		members++;
1075 
1076 	if (members > QCA8K_NUM_PORTS_FOR_LAG) {
1077 		NL_SET_ERR_MSG_MOD(extack,
1078 				   "Cannot offload more than 4 LAG ports");
1079 		return false;
1080 	}
1081 
1082 	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1083 		NL_SET_ERR_MSG_MOD(extack,
1084 				   "Can only offload LAG using hash TX type");
1085 		return false;
1086 	}
1087 
1088 	if (info->hash_type != NETDEV_LAG_HASH_L2 &&
1089 	    info->hash_type != NETDEV_LAG_HASH_L23) {
1090 		NL_SET_ERR_MSG_MOD(extack,
1091 				   "Can only offload L2 or L2+L3 TX hash");
1092 		return false;
1093 	}
1094 
1095 	return true;
1096 }
1097 
qca8k_lag_setup_hash(struct dsa_switch * ds,struct dsa_lag lag,struct netdev_lag_upper_info * info)1098 static int qca8k_lag_setup_hash(struct dsa_switch *ds,
1099 				struct dsa_lag lag,
1100 				struct netdev_lag_upper_info *info)
1101 {
1102 	struct net_device *lag_dev = lag.dev;
1103 	struct qca8k_priv *priv = ds->priv;
1104 	bool unique_lag = true;
1105 	unsigned int i;
1106 	u32 hash = 0;
1107 
1108 	switch (info->hash_type) {
1109 	case NETDEV_LAG_HASH_L23:
1110 		hash |= QCA8K_TRUNK_HASH_SIP_EN;
1111 		hash |= QCA8K_TRUNK_HASH_DIP_EN;
1112 		fallthrough;
1113 	case NETDEV_LAG_HASH_L2:
1114 		hash |= QCA8K_TRUNK_HASH_SA_EN;
1115 		hash |= QCA8K_TRUNK_HASH_DA_EN;
1116 		break;
1117 	default: /* We should NEVER reach this */
1118 		return -EOPNOTSUPP;
1119 	}
1120 
1121 	/* Check if we are the unique configured LAG */
1122 	dsa_lags_foreach_id(i, ds->dst)
1123 		if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
1124 			unique_lag = false;
1125 			break;
1126 		}
1127 
1128 	/* Hash Mode is global. Make sure the same Hash Mode
1129 	 * is set to all the 4 possible lag.
1130 	 * If we are the unique LAG we can set whatever hash
1131 	 * mode we want.
1132 	 * To change hash mode it's needed to remove all LAG
1133 	 * and change the mode with the latest.
1134 	 */
1135 	if (unique_lag) {
1136 		priv->lag_hash_mode = hash;
1137 	} else if (priv->lag_hash_mode != hash) {
1138 		netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
1139 		return -EOPNOTSUPP;
1140 	}
1141 
1142 	return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
1143 				  QCA8K_TRUNK_HASH_MASK, hash);
1144 }
1145 
qca8k_lag_refresh_portmap(struct dsa_switch * ds,int port,struct dsa_lag lag,bool delete)1146 static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
1147 				     struct dsa_lag lag, bool delete)
1148 {
1149 	struct qca8k_priv *priv = ds->priv;
1150 	int ret, id, i;
1151 	u32 val;
1152 
1153 	/* DSA LAG IDs are one-based, hardware is zero-based */
1154 	id = lag.id - 1;
1155 
1156 	/* Read current port member */
1157 	ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
1158 	if (ret)
1159 		return ret;
1160 
1161 	/* Shift val to the correct trunk */
1162 	val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
1163 	val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
1164 	if (delete)
1165 		val &= ~BIT(port);
1166 	else
1167 		val |= BIT(port);
1168 
1169 	/* Update port member. With empty portmap disable trunk */
1170 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
1171 				 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
1172 				 QCA8K_REG_GOL_TRUNK_EN(id),
1173 				 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
1174 				 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
1175 
1176 	/* Search empty member if adding or port on deleting */
1177 	for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
1178 		ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
1179 		if (ret)
1180 			return ret;
1181 
1182 		val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
1183 		val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
1184 
1185 		if (delete) {
1186 			/* If port flagged to be disabled assume this member is
1187 			 * empty
1188 			 */
1189 			if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1190 				continue;
1191 
1192 			val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
1193 			if (val != port)
1194 				continue;
1195 		} else {
1196 			/* If port flagged to be enabled assume this member is
1197 			 * already set
1198 			 */
1199 			if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1200 				continue;
1201 		}
1202 
1203 		/* We have found the member to add/remove */
1204 		break;
1205 	}
1206 
1207 	/* Set port in the correct port mask or disable port if in delete mode */
1208 	return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
1209 				  QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
1210 				  QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
1211 				  !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
1212 				  port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
1213 }
1214 
qca8k_port_lag_join(struct dsa_switch * ds,int port,struct dsa_lag lag,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)1215 int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
1216 			struct netdev_lag_upper_info *info,
1217 			struct netlink_ext_ack *extack)
1218 {
1219 	int ret;
1220 
1221 	if (!qca8k_lag_can_offload(ds, lag, info, extack))
1222 		return -EOPNOTSUPP;
1223 
1224 	ret = qca8k_lag_setup_hash(ds, lag, info);
1225 	if (ret)
1226 		return ret;
1227 
1228 	return qca8k_lag_refresh_portmap(ds, port, lag, false);
1229 }
1230 
qca8k_port_lag_leave(struct dsa_switch * ds,int port,struct dsa_lag lag)1231 int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
1232 			 struct dsa_lag lag)
1233 {
1234 	return qca8k_lag_refresh_portmap(ds, port, lag, true);
1235 }
1236 
qca8k_read_switch_id(struct qca8k_priv * priv)1237 int qca8k_read_switch_id(struct qca8k_priv *priv)
1238 {
1239 	u32 val;
1240 	u8 id;
1241 	int ret;
1242 
1243 	if (!priv->info)
1244 		return -ENODEV;
1245 
1246 	ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
1247 	if (ret < 0)
1248 		return -ENODEV;
1249 
1250 	id = QCA8K_MASK_CTRL_DEVICE_ID(val);
1251 	if (id != priv->info->id) {
1252 		dev_err(priv->dev,
1253 			"Switch id detected %x but expected %x",
1254 			id, priv->info->id);
1255 		return -ENODEV;
1256 	}
1257 
1258 	priv->switch_id = id;
1259 
1260 	/* Save revision to communicate to the internal PHY driver */
1261 	priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
1262 
1263 	return 0;
1264 }
1265