1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22 
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 			     struct net_device *dev)
25 {
26 	struct stmmac_priv *priv = netdev_priv(dev);
27 	void __iomem *ioaddr = hw->pcsr;
28 	u32 value = readl(ioaddr + GMAC_CONFIG);
29 	u32 clk_rate;
30 
31 	value |= GMAC_CORE_INIT;
32 
33 	if (hw->ps) {
34 		value |= GMAC_CONFIG_TE;
35 
36 		value &= hw->link.speed_mask;
37 		switch (hw->ps) {
38 		case SPEED_1000:
39 			value |= hw->link.speed1000;
40 			break;
41 		case SPEED_100:
42 			value |= hw->link.speed100;
43 			break;
44 		case SPEED_10:
45 			value |= hw->link.speed10;
46 			break;
47 		}
48 	}
49 
50 	writel(value, ioaddr + GMAC_CONFIG);
51 
52 	/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
53 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
54 	writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
55 
56 	/* Enable GMAC interrupts */
57 	value = GMAC_INT_DEFAULT_ENABLE;
58 
59 	if (hw->pcs)
60 		value |= GMAC_PCS_IRQ_DEFAULT;
61 
62 	writel(value, ioaddr + GMAC_INT_EN);
63 
64 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
65 		init_waitqueue_head(&priv->tstamp_busy_wait);
66 }
67 
dwmac4_update_caps(struct stmmac_priv * priv)68 static void dwmac4_update_caps(struct stmmac_priv *priv)
69 {
70 	if (priv->plat->tx_queues_to_use > 1)
71 		priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
72 	else
73 		priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
74 }
75 
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)76 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
77 				   u8 mode, u32 queue)
78 {
79 	void __iomem *ioaddr = hw->pcsr;
80 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
81 
82 	value &= GMAC_RX_QUEUE_CLEAR(queue);
83 	if (mode == MTL_QUEUE_AVB)
84 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
85 	else if (mode == MTL_QUEUE_DCB)
86 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
87 
88 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
89 }
90 
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)91 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
92 				     u32 prio, u32 queue)
93 {
94 	void __iomem *ioaddr = hw->pcsr;
95 	u32 clear_mask = 0;
96 	u32 ctrl2, ctrl3;
97 	int i;
98 
99 	ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
100 	ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
101 
102 	/* The software must ensure that the same priority
103 	 * is not mapped to multiple Rx queues
104 	 */
105 	for (i = 0; i < 4; i++)
106 		clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
107 						GMAC_RXQCTRL_PSRQX_MASK(i));
108 
109 	ctrl2 &= ~clear_mask;
110 	ctrl3 &= ~clear_mask;
111 
112 	/* First assign new priorities to a queue, then
113 	 * clear them from others queues
114 	 */
115 	if (queue < 4) {
116 		ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
117 						GMAC_RXQCTRL_PSRQX_MASK(queue);
118 
119 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
120 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
121 	} else {
122 		queue -= 4;
123 
124 		ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
125 						GMAC_RXQCTRL_PSRQX_MASK(queue);
126 
127 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
128 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
129 	}
130 }
131 
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)132 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
133 				     u32 prio, u32 queue)
134 {
135 	void __iomem *ioaddr = hw->pcsr;
136 	u32 base_register;
137 	u32 value;
138 
139 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
140 	if (queue >= 4)
141 		queue -= 4;
142 
143 	value = readl(ioaddr + base_register);
144 
145 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
146 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
147 						GMAC_TXQCTRL_PSTQX_MASK(queue);
148 
149 	writel(value, ioaddr + base_register);
150 }
151 
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)152 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
153 				    u8 packet, u32 queue)
154 {
155 	void __iomem *ioaddr = hw->pcsr;
156 	u32 value;
157 
158 	static const struct stmmac_rx_routing route_possibilities[] = {
159 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
160 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
161 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
162 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
163 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
164 	};
165 
166 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
167 
168 	/* routing configuration */
169 	value &= ~route_possibilities[packet - 1].reg_mask;
170 	value |= (queue << route_possibilities[packet-1].reg_shift) &
171 		 route_possibilities[packet - 1].reg_mask;
172 
173 	/* some packets require extra ops */
174 	if (packet == PACKET_AVCPQ) {
175 		value &= ~GMAC_RXQCTRL_TACPQE;
176 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
177 	} else if (packet == PACKET_MCBCQ) {
178 		value &= ~GMAC_RXQCTRL_MCBCQEN;
179 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
180 	}
181 
182 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
183 }
184 
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)185 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
186 					  u32 rx_alg)
187 {
188 	void __iomem *ioaddr = hw->pcsr;
189 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
190 
191 	value &= ~MTL_OPERATION_RAA;
192 	switch (rx_alg) {
193 	case MTL_RX_ALGORITHM_SP:
194 		value |= MTL_OPERATION_RAA_SP;
195 		break;
196 	case MTL_RX_ALGORITHM_WSP:
197 		value |= MTL_OPERATION_RAA_WSP;
198 		break;
199 	default:
200 		break;
201 	}
202 
203 	writel(value, ioaddr + MTL_OPERATION_MODE);
204 }
205 
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)206 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
207 					  u32 tx_alg)
208 {
209 	void __iomem *ioaddr = hw->pcsr;
210 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
211 
212 	value &= ~MTL_OPERATION_SCHALG_MASK;
213 	switch (tx_alg) {
214 	case MTL_TX_ALGORITHM_WRR:
215 		value |= MTL_OPERATION_SCHALG_WRR;
216 		break;
217 	case MTL_TX_ALGORITHM_WFQ:
218 		value |= MTL_OPERATION_SCHALG_WFQ;
219 		break;
220 	case MTL_TX_ALGORITHM_DWRR:
221 		value |= MTL_OPERATION_SCHALG_DWRR;
222 		break;
223 	case MTL_TX_ALGORITHM_SP:
224 		value |= MTL_OPERATION_SCHALG_SP;
225 		break;
226 	default:
227 		break;
228 	}
229 
230 	writel(value, ioaddr + MTL_OPERATION_MODE);
231 }
232 
dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)233 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
234 					   struct mac_device_info *hw,
235 					   u32 weight, u32 queue)
236 {
237 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
238 	void __iomem *ioaddr = hw->pcsr;
239 	u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
240 							     queue));
241 
242 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
243 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
244 	writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
245 }
246 
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)247 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
248 {
249 	void __iomem *ioaddr = hw->pcsr;
250 	u32 value;
251 
252 	if (queue < 4) {
253 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
254 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
255 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
256 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
257 	} else {
258 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
259 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
260 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
261 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
262 	}
263 }
264 
dwmac4_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)265 static void dwmac4_config_cbs(struct stmmac_priv *priv,
266 			      struct mac_device_info *hw,
267 			      u32 send_slope, u32 idle_slope,
268 			      u32 high_credit, u32 low_credit, u32 queue)
269 {
270 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
271 	void __iomem *ioaddr = hw->pcsr;
272 	u32 value;
273 
274 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
275 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
276 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
277 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
278 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
279 
280 	/* enable AV algorithm */
281 	value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
282 	value |= MTL_ETS_CTRL_AVALG;
283 	value |= MTL_ETS_CTRL_CC;
284 	writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
285 
286 	/* configure send slope */
287 	value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
288 							    queue));
289 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
290 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
291 	writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
292 							    queue));
293 
294 	/* configure idle slope (same register as tx weight) */
295 	dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
296 
297 	/* configure high credit */
298 	value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
299 	value &= ~MTL_HIGH_CRED_HC_MASK;
300 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
301 	writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
302 
303 	/* configure high credit */
304 	value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
305 	value &= ~MTL_HIGH_CRED_LC_MASK;
306 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
307 	writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
308 }
309 
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)310 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
311 {
312 	void __iomem *ioaddr = hw->pcsr;
313 	int i;
314 
315 	for (i = 0; i < GMAC_REG_NUM; i++)
316 		reg_space[i] = readl(ioaddr + i * 4);
317 }
318 
dwmac4_rx_ipc_enable(struct mac_device_info * hw)319 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
320 {
321 	void __iomem *ioaddr = hw->pcsr;
322 	u32 value = readl(ioaddr + GMAC_CONFIG);
323 
324 	if (hw->rx_csum)
325 		value |= GMAC_CONFIG_IPC;
326 	else
327 		value &= ~GMAC_CONFIG_IPC;
328 
329 	writel(value, ioaddr + GMAC_CONFIG);
330 
331 	value = readl(ioaddr + GMAC_CONFIG);
332 
333 	return !!(value & GMAC_CONFIG_IPC);
334 }
335 
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)336 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
337 {
338 	void __iomem *ioaddr = hw->pcsr;
339 	unsigned int pmt = 0;
340 	u32 config;
341 
342 	if (mode & WAKE_MAGIC) {
343 		pr_debug("GMAC: WOL Magic frame\n");
344 		pmt |= power_down | magic_pkt_en;
345 	}
346 	if (mode & WAKE_UCAST) {
347 		pr_debug("GMAC: WOL on global unicast\n");
348 		pmt |= power_down | global_unicast | wake_up_frame_en;
349 	}
350 
351 	if (pmt) {
352 		/* The receiver must be enabled for WOL before powering down */
353 		config = readl(ioaddr + GMAC_CONFIG);
354 		config |= GMAC_CONFIG_RE;
355 		writel(config, ioaddr + GMAC_CONFIG);
356 	}
357 	writel(pmt, ioaddr + GMAC_PMT);
358 }
359 
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)360 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
361 				 const unsigned char *addr, unsigned int reg_n)
362 {
363 	void __iomem *ioaddr = hw->pcsr;
364 
365 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
366 				   GMAC_ADDR_LOW(reg_n));
367 }
368 
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)369 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
370 				 unsigned char *addr, unsigned int reg_n)
371 {
372 	void __iomem *ioaddr = hw->pcsr;
373 
374 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
375 				   GMAC_ADDR_LOW(reg_n));
376 }
377 
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)378 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
379 				bool en_tx_lpi_clockgating)
380 {
381 	void __iomem *ioaddr = hw->pcsr;
382 	u32 value;
383 
384 	/* Enable the link status receive on RGMII, SGMII ore SMII
385 	 * receive path and instruct the transmit to enter in LPI
386 	 * state.
387 	 */
388 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
389 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
390 
391 	if (en_tx_lpi_clockgating)
392 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
393 
394 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
395 }
396 
dwmac4_reset_eee_mode(struct mac_device_info * hw)397 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
398 {
399 	void __iomem *ioaddr = hw->pcsr;
400 	u32 value;
401 
402 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
403 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
404 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
405 }
406 
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)407 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
408 {
409 	void __iomem *ioaddr = hw->pcsr;
410 	u32 value;
411 
412 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
413 
414 	if (link)
415 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
416 	else
417 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
418 
419 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
420 }
421 
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)422 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
423 {
424 	void __iomem *ioaddr = hw->pcsr;
425 	int value = et & STMMAC_ET_MAX;
426 	int regval;
427 
428 	/* Program LPI entry timer value into register */
429 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
430 
431 	/* Enable/disable LPI entry timer */
432 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
433 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
434 
435 	if (et)
436 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
437 	else
438 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
439 
440 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
441 }
442 
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)443 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
444 {
445 	void __iomem *ioaddr = hw->pcsr;
446 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
447 
448 	/* Program the timers in the LPI timer control register:
449 	 * LS: minimum time (ms) for which the link
450 	 *  status from PHY should be ok before transmitting
451 	 *  the LPI pattern.
452 	 * TW: minimum time (us) for which the core waits
453 	 *  after it has stopped transmitting the LPI pattern.
454 	 */
455 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
456 }
457 
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)458 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
459 {
460 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
461 	u32 val;
462 
463 	val = readl(ioaddr + GMAC_VLAN_TAG);
464 	val &= ~GMAC_VLAN_TAG_VID;
465 	val |= GMAC_VLAN_TAG_ETV | vid;
466 
467 	writel(val, ioaddr + GMAC_VLAN_TAG);
468 }
469 
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)470 static int dwmac4_write_vlan_filter(struct net_device *dev,
471 				    struct mac_device_info *hw,
472 				    u8 index, u32 data)
473 {
474 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
475 	int ret;
476 	u32 val;
477 
478 	if (index >= hw->num_vlan)
479 		return -EINVAL;
480 
481 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
482 
483 	val = readl(ioaddr + GMAC_VLAN_TAG);
484 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
485 		GMAC_VLAN_TAG_CTRL_CT |
486 		GMAC_VLAN_TAG_CTRL_OB);
487 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
488 
489 	writel(val, ioaddr + GMAC_VLAN_TAG);
490 
491 	ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
492 				 !(val & GMAC_VLAN_TAG_CTRL_OB),
493 				 1000, 500000);
494 	if (ret) {
495 		netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
496 		return -EBUSY;
497 	}
498 
499 	return 0;
500 }
501 
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)502 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
503 				      struct mac_device_info *hw,
504 				      __be16 proto, u16 vid)
505 {
506 	int index = -1;
507 	u32 val = 0;
508 	int i, ret;
509 
510 	if (vid > 4095)
511 		return -EINVAL;
512 
513 	/* Single Rx VLAN Filter */
514 	if (hw->num_vlan == 1) {
515 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
516 		if (vid == 0) {
517 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
518 			return -EPERM;
519 		}
520 
521 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
522 			netdev_err(dev, "Only single VLAN ID supported\n");
523 			return -EPERM;
524 		}
525 
526 		hw->vlan_filter[0] = vid;
527 		dwmac4_write_single_vlan(dev, vid);
528 
529 		return 0;
530 	}
531 
532 	/* Extended Rx VLAN Filter Enable */
533 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
534 
535 	for (i = 0; i < hw->num_vlan; i++) {
536 		if (hw->vlan_filter[i] == val)
537 			return 0;
538 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
539 			index = i;
540 	}
541 
542 	if (index == -1) {
543 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
544 			   hw->num_vlan);
545 		return -EPERM;
546 	}
547 
548 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
549 
550 	if (!ret)
551 		hw->vlan_filter[index] = val;
552 
553 	return ret;
554 }
555 
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)556 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
557 				      struct mac_device_info *hw,
558 				      __be16 proto, u16 vid)
559 {
560 	int i, ret = 0;
561 
562 	/* Single Rx VLAN Filter */
563 	if (hw->num_vlan == 1) {
564 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
565 			hw->vlan_filter[0] = 0;
566 			dwmac4_write_single_vlan(dev, 0);
567 		}
568 		return 0;
569 	}
570 
571 	/* Extended Rx VLAN Filter Enable */
572 	for (i = 0; i < hw->num_vlan; i++) {
573 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
574 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
575 
576 			if (!ret)
577 				hw->vlan_filter[i] = 0;
578 			else
579 				return ret;
580 		}
581 	}
582 
583 	return ret;
584 }
585 
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)586 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
587 					   struct mac_device_info *hw)
588 {
589 	void __iomem *ioaddr = hw->pcsr;
590 	u32 value;
591 	u32 hash;
592 	u32 val;
593 	int i;
594 
595 	/* Single Rx VLAN Filter */
596 	if (hw->num_vlan == 1) {
597 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
598 		return;
599 	}
600 
601 	/* Extended Rx VLAN Filter Enable */
602 	for (i = 0; i < hw->num_vlan; i++) {
603 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
604 			val = hw->vlan_filter[i];
605 			dwmac4_write_vlan_filter(dev, hw, i, val);
606 		}
607 	}
608 
609 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
610 	if (hash & GMAC_VLAN_VLHT) {
611 		value = readl(ioaddr + GMAC_VLAN_TAG);
612 		value |= GMAC_VLAN_VTHM;
613 		writel(value, ioaddr + GMAC_VLAN_TAG);
614 	}
615 }
616 
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)617 static void dwmac4_set_filter(struct mac_device_info *hw,
618 			      struct net_device *dev)
619 {
620 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
621 	int numhashregs = (hw->multicast_filter_bins >> 5);
622 	int mcbitslog2 = hw->mcast_bits_log2;
623 	unsigned int value;
624 	u32 mc_filter[8];
625 	int i;
626 
627 	memset(mc_filter, 0, sizeof(mc_filter));
628 
629 	value = readl(ioaddr + GMAC_PACKET_FILTER);
630 	value &= ~GMAC_PACKET_FILTER_HMC;
631 	value &= ~GMAC_PACKET_FILTER_HPF;
632 	value &= ~GMAC_PACKET_FILTER_PCF;
633 	value &= ~GMAC_PACKET_FILTER_PM;
634 	value &= ~GMAC_PACKET_FILTER_PR;
635 	value &= ~GMAC_PACKET_FILTER_RA;
636 	if (dev->flags & IFF_PROMISC) {
637 		/* VLAN Tag Filter Fail Packets Queuing */
638 		if (hw->vlan_fail_q_en) {
639 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
640 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
641 			value |= GMAC_RXQCTRL_VFFQE |
642 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
643 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
644 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
645 		} else {
646 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
647 		}
648 
649 	} else if ((dev->flags & IFF_ALLMULTI) ||
650 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
651 		/* Pass all multi */
652 		value |= GMAC_PACKET_FILTER_PM;
653 		/* Set all the bits of the HASH tab */
654 		memset(mc_filter, 0xff, sizeof(mc_filter));
655 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
656 		struct netdev_hw_addr *ha;
657 
658 		/* Hash filter for multicast */
659 		value |= GMAC_PACKET_FILTER_HMC;
660 
661 		netdev_for_each_mc_addr(ha, dev) {
662 			/* The upper n bits of the calculated CRC are used to
663 			 * index the contents of the hash table. The number of
664 			 * bits used depends on the hardware configuration
665 			 * selected at core configuration time.
666 			 */
667 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
668 					ETH_ALEN)) >> (32 - mcbitslog2);
669 			/* The most significant bit determines the register to
670 			 * use (H/L) while the other 5 bits determine the bit
671 			 * within the register.
672 			 */
673 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
674 		}
675 	}
676 
677 	for (i = 0; i < numhashregs; i++)
678 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
679 
680 	value |= GMAC_PACKET_FILTER_HPF;
681 
682 	/* Handle multiple unicast addresses */
683 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
684 		/* Switch to promiscuous mode if more than 128 addrs
685 		 * are required
686 		 */
687 		value |= GMAC_PACKET_FILTER_PR;
688 	} else {
689 		struct netdev_hw_addr *ha;
690 		int reg = 1;
691 
692 		netdev_for_each_uc_addr(ha, dev) {
693 			dwmac4_set_umac_addr(hw, ha->addr, reg);
694 			reg++;
695 		}
696 
697 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
698 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
699 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
700 			reg++;
701 		}
702 	}
703 
704 	/* VLAN filtering */
705 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
706 		value &= ~GMAC_PACKET_FILTER_VTFE;
707 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
708 		value |= GMAC_PACKET_FILTER_VTFE;
709 
710 	writel(value, ioaddr + GMAC_PACKET_FILTER);
711 }
712 
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)713 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
714 			     unsigned int fc, unsigned int pause_time,
715 			     u32 tx_cnt)
716 {
717 	void __iomem *ioaddr = hw->pcsr;
718 	unsigned int flow = 0;
719 	u32 queue = 0;
720 
721 	pr_debug("GMAC Flow-Control:\n");
722 	if (fc & FLOW_RX) {
723 		pr_debug("\tReceive Flow-Control ON\n");
724 		flow |= GMAC_RX_FLOW_CTRL_RFE;
725 	} else {
726 		pr_debug("\tReceive Flow-Control OFF\n");
727 	}
728 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
729 
730 	if (fc & FLOW_TX) {
731 		pr_debug("\tTransmit Flow-Control ON\n");
732 
733 		if (duplex)
734 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
735 
736 		for (queue = 0; queue < tx_cnt; queue++) {
737 			flow = GMAC_TX_FLOW_CTRL_TFE;
738 
739 			if (duplex)
740 				flow |=
741 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
742 
743 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
744 		}
745 	} else {
746 		for (queue = 0; queue < tx_cnt; queue++)
747 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
748 	}
749 }
750 
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)751 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
752 			    bool loopback)
753 {
754 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
755 }
756 
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)757 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
758 {
759 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
760 }
761 
762 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)763 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
764 {
765 	u32 status;
766 
767 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
768 	x->irq_rgmii_n++;
769 
770 	/* Check the link status */
771 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
772 		int speed_value;
773 
774 		x->pcs_link = 1;
775 
776 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
777 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
778 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
779 			x->pcs_speed = SPEED_1000;
780 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
781 			x->pcs_speed = SPEED_100;
782 		else
783 			x->pcs_speed = SPEED_10;
784 
785 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
786 
787 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
788 			x->pcs_duplex ? "Full" : "Half");
789 	} else {
790 		x->pcs_link = 0;
791 		pr_info("Link is Down\n");
792 	}
793 }
794 
dwmac4_irq_mtl_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)795 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
796 				 struct mac_device_info *hw, u32 chan)
797 {
798 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
799 	void __iomem *ioaddr = hw->pcsr;
800 	u32 mtl_int_qx_status;
801 	int ret = 0;
802 
803 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
804 
805 	/* Check MTL Interrupt */
806 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
807 		/* read Queue x Interrupt status */
808 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
809 							      chan));
810 
811 		if (status & MTL_RX_OVERFLOW_INT) {
812 			/*  clear Interrupt */
813 			writel(status | MTL_RX_OVERFLOW_INT,
814 			       ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
815 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
816 		}
817 	}
818 
819 	return ret;
820 }
821 
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)822 static int dwmac4_irq_status(struct mac_device_info *hw,
823 			     struct stmmac_extra_stats *x)
824 {
825 	void __iomem *ioaddr = hw->pcsr;
826 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
827 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
828 	int ret = 0;
829 
830 	/* Discard disabled bits */
831 	intr_status &= intr_enable;
832 
833 	/* Not used events (e.g. MMC interrupts) are not handled. */
834 	if ((intr_status & mmc_tx_irq))
835 		x->mmc_tx_irq_n++;
836 	if (unlikely(intr_status & mmc_rx_irq))
837 		x->mmc_rx_irq_n++;
838 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
839 		x->mmc_rx_csum_offload_irq_n++;
840 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
841 	if (unlikely(intr_status & pmt_irq)) {
842 		readl(ioaddr + GMAC_PMT);
843 		x->irq_receive_pmt_irq_n++;
844 	}
845 
846 	/* MAC tx/rx EEE LPI entry/exit interrupts */
847 	if (intr_status & lpi_irq) {
848 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
849 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
850 
851 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
852 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
853 			x->irq_tx_path_in_lpi_mode_n++;
854 		}
855 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
856 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
857 			x->irq_tx_path_exit_lpi_mode_n++;
858 		}
859 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
860 			x->irq_rx_path_in_lpi_mode_n++;
861 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
862 			x->irq_rx_path_exit_lpi_mode_n++;
863 	}
864 
865 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
866 	if (intr_status & PCS_RGSMIIIS_IRQ)
867 		dwmac4_phystatus(ioaddr, x);
868 
869 	return ret;
870 }
871 
dwmac4_debug(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)872 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
873 			 struct stmmac_extra_stats *x,
874 			 u32 rx_queues, u32 tx_queues)
875 {
876 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
877 	u32 value;
878 	u32 queue;
879 
880 	for (queue = 0; queue < tx_queues; queue++) {
881 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
882 
883 		if (value & MTL_DEBUG_TXSTSFSTS)
884 			x->mtl_tx_status_fifo_full++;
885 		if (value & MTL_DEBUG_TXFSTS)
886 			x->mtl_tx_fifo_not_empty++;
887 		if (value & MTL_DEBUG_TWCSTS)
888 			x->mmtl_fifo_ctrl++;
889 		if (value & MTL_DEBUG_TRCSTS_MASK) {
890 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
891 				     >> MTL_DEBUG_TRCSTS_SHIFT;
892 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
893 				x->mtl_tx_fifo_read_ctrl_write++;
894 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
895 				x->mtl_tx_fifo_read_ctrl_wait++;
896 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
897 				x->mtl_tx_fifo_read_ctrl_read++;
898 			else
899 				x->mtl_tx_fifo_read_ctrl_idle++;
900 		}
901 		if (value & MTL_DEBUG_TXPAUSED)
902 			x->mac_tx_in_pause++;
903 	}
904 
905 	for (queue = 0; queue < rx_queues; queue++) {
906 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
907 
908 		if (value & MTL_DEBUG_RXFSTS_MASK) {
909 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
910 				     >> MTL_DEBUG_RRCSTS_SHIFT;
911 
912 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
913 				x->mtl_rx_fifo_fill_level_full++;
914 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
915 				x->mtl_rx_fifo_fill_above_thresh++;
916 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
917 				x->mtl_rx_fifo_fill_below_thresh++;
918 			else
919 				x->mtl_rx_fifo_fill_level_empty++;
920 		}
921 		if (value & MTL_DEBUG_RRCSTS_MASK) {
922 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
923 				     MTL_DEBUG_RRCSTS_SHIFT;
924 
925 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
926 				x->mtl_rx_fifo_read_ctrl_flush++;
927 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
928 				x->mtl_rx_fifo_read_ctrl_read_data++;
929 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
930 				x->mtl_rx_fifo_read_ctrl_status++;
931 			else
932 				x->mtl_rx_fifo_read_ctrl_idle++;
933 		}
934 		if (value & MTL_DEBUG_RWCSTS)
935 			x->mtl_rx_fifo_ctrl_active++;
936 	}
937 
938 	/* GMAC debug */
939 	value = readl(ioaddr + GMAC_DEBUG);
940 
941 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
942 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
943 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
944 
945 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
946 			x->mac_tx_frame_ctrl_xfer++;
947 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
948 			x->mac_tx_frame_ctrl_pause++;
949 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
950 			x->mac_tx_frame_ctrl_wait++;
951 		else
952 			x->mac_tx_frame_ctrl_idle++;
953 	}
954 	if (value & GMAC_DEBUG_TPESTS)
955 		x->mac_gmii_tx_proto_engine++;
956 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
957 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
958 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
959 	if (value & GMAC_DEBUG_RPESTS)
960 		x->mac_gmii_rx_proto_engine++;
961 }
962 
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)963 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
964 {
965 	u32 value = readl(ioaddr + GMAC_CONFIG);
966 
967 	if (enable)
968 		value |= GMAC_CONFIG_LM;
969 	else
970 		value &= ~GMAC_CONFIG_LM;
971 
972 	writel(value, ioaddr + GMAC_CONFIG);
973 }
974 
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,u16 perfect_match,bool is_double)975 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
976 				    u16 perfect_match, bool is_double)
977 {
978 	void __iomem *ioaddr = hw->pcsr;
979 	u32 value;
980 
981 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
982 
983 	value = readl(ioaddr + GMAC_VLAN_TAG);
984 
985 	if (hash) {
986 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
987 		if (is_double) {
988 			value |= GMAC_VLAN_EDVLP;
989 			value |= GMAC_VLAN_ESVL;
990 			value |= GMAC_VLAN_DOVLTC;
991 		}
992 
993 		writel(value, ioaddr + GMAC_VLAN_TAG);
994 	} else if (perfect_match) {
995 		u32 value = GMAC_VLAN_ETV;
996 
997 		if (is_double) {
998 			value |= GMAC_VLAN_EDVLP;
999 			value |= GMAC_VLAN_ESVL;
1000 			value |= GMAC_VLAN_DOVLTC;
1001 		}
1002 
1003 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1004 	} else {
1005 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1006 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1007 		value &= ~GMAC_VLAN_DOVLTC;
1008 		value &= ~GMAC_VLAN_VID;
1009 
1010 		writel(value, ioaddr + GMAC_VLAN_TAG);
1011 	}
1012 }
1013 
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1014 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1015 {
1016 	u32 value = readl(ioaddr + GMAC_CONFIG);
1017 
1018 	value &= ~GMAC_CONFIG_SARC;
1019 	value |= val << GMAC_CONFIG_SARC_SHIFT;
1020 
1021 	writel(value, ioaddr + GMAC_CONFIG);
1022 }
1023 
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1024 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1025 {
1026 	void __iomem *ioaddr = hw->pcsr;
1027 	u32 value;
1028 
1029 	value = readl(ioaddr + GMAC_VLAN_INCL);
1030 	value |= GMAC_VLAN_VLTI;
1031 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1032 	value &= ~GMAC_VLAN_VLC;
1033 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1034 	writel(value, ioaddr + GMAC_VLAN_INCL);
1035 }
1036 
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1037 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1038 				   u32 addr)
1039 {
1040 	void __iomem *ioaddr = hw->pcsr;
1041 	u32 value;
1042 
1043 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1044 
1045 	value = readl(ioaddr + GMAC_CONFIG);
1046 	if (en)
1047 		value |= GMAC_CONFIG_ARPEN;
1048 	else
1049 		value &= ~GMAC_CONFIG_ARPEN;
1050 	writel(value, ioaddr + GMAC_CONFIG);
1051 }
1052 
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1053 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1054 				   bool en, bool ipv6, bool sa, bool inv,
1055 				   u32 match)
1056 {
1057 	void __iomem *ioaddr = hw->pcsr;
1058 	u32 value;
1059 
1060 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1061 	value |= GMAC_PACKET_FILTER_IPFE;
1062 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1063 
1064 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1065 
1066 	/* For IPv6 not both SA/DA filters can be active */
1067 	if (ipv6) {
1068 		value |= GMAC_L3PEN0;
1069 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1070 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1071 		if (sa) {
1072 			value |= GMAC_L3SAM0;
1073 			if (inv)
1074 				value |= GMAC_L3SAIM0;
1075 		} else {
1076 			value |= GMAC_L3DAM0;
1077 			if (inv)
1078 				value |= GMAC_L3DAIM0;
1079 		}
1080 	} else {
1081 		value &= ~GMAC_L3PEN0;
1082 		if (sa) {
1083 			value |= GMAC_L3SAM0;
1084 			if (inv)
1085 				value |= GMAC_L3SAIM0;
1086 		} else {
1087 			value |= GMAC_L3DAM0;
1088 			if (inv)
1089 				value |= GMAC_L3DAIM0;
1090 		}
1091 	}
1092 
1093 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1094 
1095 	if (sa) {
1096 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1097 	} else {
1098 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1099 	}
1100 
1101 	if (!en)
1102 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1103 
1104 	return 0;
1105 }
1106 
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1107 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1108 				   bool en, bool udp, bool sa, bool inv,
1109 				   u32 match)
1110 {
1111 	void __iomem *ioaddr = hw->pcsr;
1112 	u32 value;
1113 
1114 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1115 	value |= GMAC_PACKET_FILTER_IPFE;
1116 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1117 
1118 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1119 	if (udp) {
1120 		value |= GMAC_L4PEN0;
1121 	} else {
1122 		value &= ~GMAC_L4PEN0;
1123 	}
1124 
1125 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1126 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1127 	if (sa) {
1128 		value |= GMAC_L4SPM0;
1129 		if (inv)
1130 			value |= GMAC_L4SPIM0;
1131 	} else {
1132 		value |= GMAC_L4DPM0;
1133 		if (inv)
1134 			value |= GMAC_L4DPIM0;
1135 	}
1136 
1137 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1138 
1139 	if (sa) {
1140 		value = match & GMAC_L4SP0;
1141 	} else {
1142 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1143 	}
1144 
1145 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1146 
1147 	if (!en)
1148 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1149 
1150 	return 0;
1151 }
1152 
dwmac4_rx_hw_vlan(struct mac_device_info * hw,struct dma_desc * rx_desc,struct sk_buff * skb)1153 static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
1154 			      struct dma_desc *rx_desc, struct sk_buff *skb)
1155 {
1156 	if (hw->desc->get_rx_vlan_valid(rx_desc)) {
1157 		u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
1158 
1159 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1160 	}
1161 }
1162 
dwmac4_set_hw_vlan_mode(struct mac_device_info * hw)1163 static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
1164 {
1165 	void __iomem *ioaddr = hw->pcsr;
1166 	u32 value = readl(ioaddr + GMAC_VLAN_TAG);
1167 
1168 	value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
1169 
1170 	if (hw->hw_vlan_en)
1171 		/* Always strip VLAN on Receive */
1172 		value |= GMAC_VLAN_TAG_STRIP_ALL;
1173 	else
1174 		/* Do not strip VLAN on Receive */
1175 		value |= GMAC_VLAN_TAG_STRIP_NONE;
1176 
1177 	/* Enable outer VLAN Tag in Rx DMA descriptor */
1178 	value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
1179 	writel(value, ioaddr + GMAC_VLAN_TAG);
1180 }
1181 
1182 const struct stmmac_ops dwmac4_ops = {
1183 	.core_init = dwmac4_core_init,
1184 	.update_caps = dwmac4_update_caps,
1185 	.set_mac = stmmac_set_mac,
1186 	.rx_ipc = dwmac4_rx_ipc_enable,
1187 	.rx_queue_enable = dwmac4_rx_queue_enable,
1188 	.rx_queue_prio = dwmac4_rx_queue_priority,
1189 	.tx_queue_prio = dwmac4_tx_queue_priority,
1190 	.rx_queue_routing = dwmac4_rx_queue_routing,
1191 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1192 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1193 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1194 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1195 	.config_cbs = dwmac4_config_cbs,
1196 	.dump_regs = dwmac4_dump_regs,
1197 	.host_irq_status = dwmac4_irq_status,
1198 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1199 	.flow_ctrl = dwmac4_flow_ctrl,
1200 	.pmt = dwmac4_pmt,
1201 	.set_umac_addr = dwmac4_set_umac_addr,
1202 	.get_umac_addr = dwmac4_get_umac_addr,
1203 	.set_eee_mode = dwmac4_set_eee_mode,
1204 	.reset_eee_mode = dwmac4_reset_eee_mode,
1205 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1206 	.set_eee_timer = dwmac4_set_eee_timer,
1207 	.set_eee_pls = dwmac4_set_eee_pls,
1208 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1209 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1210 	.debug = dwmac4_debug,
1211 	.set_filter = dwmac4_set_filter,
1212 	.set_mac_loopback = dwmac4_set_mac_loopback,
1213 	.update_vlan_hash = dwmac4_update_vlan_hash,
1214 	.sarc_configure = dwmac4_sarc_configure,
1215 	.enable_vlan = dwmac4_enable_vlan,
1216 	.set_arp_offload = dwmac4_set_arp_offload,
1217 	.config_l3_filter = dwmac4_config_l3_filter,
1218 	.config_l4_filter = dwmac4_config_l4_filter,
1219 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1220 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1221 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1222 	.rx_hw_vlan = dwmac4_rx_hw_vlan,
1223 	.set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1224 };
1225 
1226 const struct stmmac_ops dwmac410_ops = {
1227 	.core_init = dwmac4_core_init,
1228 	.update_caps = dwmac4_update_caps,
1229 	.set_mac = stmmac_dwmac4_set_mac,
1230 	.rx_ipc = dwmac4_rx_ipc_enable,
1231 	.rx_queue_enable = dwmac4_rx_queue_enable,
1232 	.rx_queue_prio = dwmac4_rx_queue_priority,
1233 	.tx_queue_prio = dwmac4_tx_queue_priority,
1234 	.rx_queue_routing = dwmac4_rx_queue_routing,
1235 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1236 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1237 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1238 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1239 	.config_cbs = dwmac4_config_cbs,
1240 	.dump_regs = dwmac4_dump_regs,
1241 	.host_irq_status = dwmac4_irq_status,
1242 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1243 	.flow_ctrl = dwmac4_flow_ctrl,
1244 	.pmt = dwmac4_pmt,
1245 	.set_umac_addr = dwmac4_set_umac_addr,
1246 	.get_umac_addr = dwmac4_get_umac_addr,
1247 	.set_eee_mode = dwmac4_set_eee_mode,
1248 	.reset_eee_mode = dwmac4_reset_eee_mode,
1249 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1250 	.set_eee_timer = dwmac4_set_eee_timer,
1251 	.set_eee_pls = dwmac4_set_eee_pls,
1252 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1253 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1254 	.debug = dwmac4_debug,
1255 	.set_filter = dwmac4_set_filter,
1256 	.flex_pps_config = dwmac5_flex_pps_config,
1257 	.set_mac_loopback = dwmac4_set_mac_loopback,
1258 	.update_vlan_hash = dwmac4_update_vlan_hash,
1259 	.sarc_configure = dwmac4_sarc_configure,
1260 	.enable_vlan = dwmac4_enable_vlan,
1261 	.set_arp_offload = dwmac4_set_arp_offload,
1262 	.config_l3_filter = dwmac4_config_l3_filter,
1263 	.config_l4_filter = dwmac4_config_l4_filter,
1264 	.fpe_configure = dwmac5_fpe_configure,
1265 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1266 	.fpe_irq_status = dwmac5_fpe_irq_status,
1267 	.fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
1268 	.fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
1269 	.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1270 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1271 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1272 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1273 	.rx_hw_vlan = dwmac4_rx_hw_vlan,
1274 	.set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1275 };
1276 
1277 const struct stmmac_ops dwmac510_ops = {
1278 	.core_init = dwmac4_core_init,
1279 	.update_caps = dwmac4_update_caps,
1280 	.set_mac = stmmac_dwmac4_set_mac,
1281 	.rx_ipc = dwmac4_rx_ipc_enable,
1282 	.rx_queue_enable = dwmac4_rx_queue_enable,
1283 	.rx_queue_prio = dwmac4_rx_queue_priority,
1284 	.tx_queue_prio = dwmac4_tx_queue_priority,
1285 	.rx_queue_routing = dwmac4_rx_queue_routing,
1286 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1287 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1288 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1289 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1290 	.config_cbs = dwmac4_config_cbs,
1291 	.dump_regs = dwmac4_dump_regs,
1292 	.host_irq_status = dwmac4_irq_status,
1293 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1294 	.flow_ctrl = dwmac4_flow_ctrl,
1295 	.pmt = dwmac4_pmt,
1296 	.set_umac_addr = dwmac4_set_umac_addr,
1297 	.get_umac_addr = dwmac4_get_umac_addr,
1298 	.set_eee_mode = dwmac4_set_eee_mode,
1299 	.reset_eee_mode = dwmac4_reset_eee_mode,
1300 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1301 	.set_eee_timer = dwmac4_set_eee_timer,
1302 	.set_eee_pls = dwmac4_set_eee_pls,
1303 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1304 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1305 	.debug = dwmac4_debug,
1306 	.set_filter = dwmac4_set_filter,
1307 	.safety_feat_config = dwmac5_safety_feat_config,
1308 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1309 	.safety_feat_dump = dwmac5_safety_feat_dump,
1310 	.rxp_config = dwmac5_rxp_config,
1311 	.flex_pps_config = dwmac5_flex_pps_config,
1312 	.set_mac_loopback = dwmac4_set_mac_loopback,
1313 	.update_vlan_hash = dwmac4_update_vlan_hash,
1314 	.sarc_configure = dwmac4_sarc_configure,
1315 	.enable_vlan = dwmac4_enable_vlan,
1316 	.set_arp_offload = dwmac4_set_arp_offload,
1317 	.config_l3_filter = dwmac4_config_l3_filter,
1318 	.config_l4_filter = dwmac4_config_l4_filter,
1319 	.fpe_configure = dwmac5_fpe_configure,
1320 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1321 	.fpe_irq_status = dwmac5_fpe_irq_status,
1322 	.fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
1323 	.fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
1324 	.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1325 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1326 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1327 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1328 	.rx_hw_vlan = dwmac4_rx_hw_vlan,
1329 	.set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
1330 };
1331 
dwmac4_get_num_vlan(void __iomem * ioaddr)1332 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1333 {
1334 	u32 val, num_vlan;
1335 
1336 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1337 	switch (val & GMAC_HW_FEAT_NRVF) {
1338 	case 0:
1339 		num_vlan = 1;
1340 		break;
1341 	case 1:
1342 		num_vlan = 4;
1343 		break;
1344 	case 2:
1345 		num_vlan = 8;
1346 		break;
1347 	case 3:
1348 		num_vlan = 16;
1349 		break;
1350 	case 4:
1351 		num_vlan = 24;
1352 		break;
1353 	case 5:
1354 		num_vlan = 32;
1355 		break;
1356 	default:
1357 		num_vlan = 1;
1358 	}
1359 
1360 	return num_vlan;
1361 }
1362 
dwmac4_setup(struct stmmac_priv * priv)1363 int dwmac4_setup(struct stmmac_priv *priv)
1364 {
1365 	struct mac_device_info *mac = priv->hw;
1366 
1367 	dev_info(priv->device, "\tDWMAC4/5\n");
1368 
1369 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1370 	mac->pcsr = priv->ioaddr;
1371 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1372 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1373 	mac->mcast_bits_log2 = 0;
1374 
1375 	if (mac->multicast_filter_bins)
1376 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1377 
1378 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1379 			 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1380 	mac->link.duplex = GMAC_CONFIG_DM;
1381 	mac->link.speed10 = GMAC_CONFIG_PS;
1382 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1383 	mac->link.speed1000 = 0;
1384 	mac->link.speed2500 = GMAC_CONFIG_FES;
1385 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1386 	mac->mii.addr = GMAC_MDIO_ADDR;
1387 	mac->mii.data = GMAC_MDIO_DATA;
1388 	mac->mii.addr_shift = 21;
1389 	mac->mii.addr_mask = GENMASK(25, 21);
1390 	mac->mii.reg_shift = 16;
1391 	mac->mii.reg_mask = GENMASK(20, 16);
1392 	mac->mii.clk_csr_shift = 8;
1393 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1394 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1395 
1396 	return 0;
1397 }
1398