1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2024 AIROHA Inc
4   * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5   */
6  #include <linux/etherdevice.h>
7  #include <linux/iopoll.h>
8  #include <linux/kernel.h>
9  #include <linux/netdevice.h>
10  #include <linux/of.h>
11  #include <linux/of_net.h>
12  #include <linux/platform_device.h>
13  #include <linux/reset.h>
14  #include <linux/tcp.h>
15  #include <linux/u64_stats_sync.h>
16  #include <net/dsa.h>
17  #include <net/page_pool/helpers.h>
18  #include <uapi/linux/ppp_defs.h>
19  
20  #define AIROHA_MAX_NUM_GDM_PORTS	1
21  #define AIROHA_MAX_NUM_QDMA		2
22  #define AIROHA_MAX_NUM_RSTS		3
23  #define AIROHA_MAX_NUM_XSI_RSTS		5
24  #define AIROHA_MAX_MTU			2000
25  #define AIROHA_MAX_PACKET_SIZE		2048
26  #define AIROHA_NUM_TX_RING		32
27  #define AIROHA_NUM_RX_RING		32
28  #define AIROHA_FE_MC_MAX_VLAN_TABLE	64
29  #define AIROHA_FE_MC_MAX_VLAN_PORT	16
30  #define AIROHA_NUM_TX_IRQ		2
31  #define HW_DSCP_NUM			2048
32  #define IRQ_QUEUE_LEN(_n)		((_n) ? 1024 : 2048)
33  #define TX_DSCP_NUM			1024
34  #define RX_DSCP_NUM(_n)			\
35  	((_n) ==  2 ? 128 :		\
36  	 (_n) == 11 ? 128 :		\
37  	 (_n) == 15 ? 128 :		\
38  	 (_n) ==  0 ? 1024 : 16)
39  
40  #define PSE_RSV_PAGES			128
41  #define PSE_QUEUE_RSV_PAGES		64
42  
43  /* FE */
44  #define PSE_BASE			0x0100
45  #define CSR_IFC_BASE			0x0200
46  #define CDM1_BASE			0x0400
47  #define GDM1_BASE			0x0500
48  #define PPE1_BASE			0x0c00
49  
50  #define CDM2_BASE			0x1400
51  #define GDM2_BASE			0x1500
52  
53  #define GDM3_BASE			0x1100
54  #define GDM4_BASE			0x2500
55  
56  #define GDM_BASE(_n)			\
57  	((_n) == 4 ? GDM4_BASE :	\
58  	 (_n) == 3 ? GDM3_BASE :	\
59  	 (_n) == 2 ? GDM2_BASE : GDM1_BASE)
60  
61  #define REG_FE_DMA_GLO_CFG		0x0000
62  #define FE_DMA_GLO_L2_SPACE_MASK	GENMASK(7, 4)
63  #define FE_DMA_GLO_PG_SZ_MASK		BIT(3)
64  
65  #define REG_FE_RST_GLO_CFG		0x0004
66  #define FE_RST_GDM4_MBI_ARB_MASK	BIT(3)
67  #define FE_RST_GDM3_MBI_ARB_MASK	BIT(2)
68  #define FE_RST_CORE_MASK		BIT(0)
69  
70  #define REG_FE_WAN_MAC_H		0x0030
71  #define REG_FE_LAN_MAC_H		0x0040
72  
73  #define REG_FE_MAC_LMIN(_n)		((_n) + 0x04)
74  #define REG_FE_MAC_LMAX(_n)		((_n) + 0x08)
75  
76  #define REG_FE_CDM1_OQ_MAP0		0x0050
77  #define REG_FE_CDM1_OQ_MAP1		0x0054
78  #define REG_FE_CDM1_OQ_MAP2		0x0058
79  #define REG_FE_CDM1_OQ_MAP3		0x005c
80  
81  #define REG_FE_PCE_CFG			0x0070
82  #define PCE_DPI_EN_MASK			BIT(2)
83  #define PCE_KA_EN_MASK			BIT(1)
84  #define PCE_MC_EN_MASK			BIT(0)
85  
86  #define REG_FE_PSE_QUEUE_CFG_WR		0x0080
87  #define PSE_CFG_PORT_ID_MASK		GENMASK(27, 24)
88  #define PSE_CFG_QUEUE_ID_MASK		GENMASK(20, 16)
89  #define PSE_CFG_WR_EN_MASK		BIT(8)
90  #define PSE_CFG_OQRSV_SEL_MASK		BIT(0)
91  
92  #define REG_FE_PSE_QUEUE_CFG_VAL	0x0084
93  #define PSE_CFG_OQ_RSV_MASK		GENMASK(13, 0)
94  
95  #define PSE_FQ_CFG			0x008c
96  #define PSE_FQ_LIMIT_MASK		GENMASK(14, 0)
97  
98  #define REG_FE_PSE_BUF_SET		0x0090
99  #define PSE_SHARE_USED_LTHD_MASK	GENMASK(31, 16)
100  #define PSE_ALLRSV_MASK			GENMASK(14, 0)
101  
102  #define REG_PSE_SHARE_USED_THD		0x0094
103  #define PSE_SHARE_USED_MTHD_MASK	GENMASK(31, 16)
104  #define PSE_SHARE_USED_HTHD_MASK	GENMASK(15, 0)
105  
106  #define REG_GDM_MISC_CFG		0x0148
107  #define GDM2_RDM_ACK_WAIT_PREF_MASK	BIT(9)
108  #define GDM2_CHN_VLD_MODE_MASK		BIT(5)
109  
110  #define REG_FE_CSR_IFC_CFG		CSR_IFC_BASE
111  #define FE_IFC_EN_MASK			BIT(0)
112  
113  #define REG_FE_VIP_PORT_EN		0x01f0
114  #define REG_FE_IFC_PORT_EN		0x01f4
115  
116  #define REG_PSE_IQ_REV1			(PSE_BASE + 0x08)
117  #define PSE_IQ_RES1_P2_MASK		GENMASK(23, 16)
118  
119  #define REG_PSE_IQ_REV2			(PSE_BASE + 0x0c)
120  #define PSE_IQ_RES2_P5_MASK		GENMASK(15, 8)
121  #define PSE_IQ_RES2_P4_MASK		GENMASK(7, 0)
122  
123  #define REG_FE_VIP_EN(_n)		(0x0300 + ((_n) << 3))
124  #define PATN_FCPU_EN_MASK		BIT(7)
125  #define PATN_SWP_EN_MASK		BIT(6)
126  #define PATN_DP_EN_MASK			BIT(5)
127  #define PATN_SP_EN_MASK			BIT(4)
128  #define PATN_TYPE_MASK			GENMASK(3, 1)
129  #define PATN_EN_MASK			BIT(0)
130  
131  #define REG_FE_VIP_PATN(_n)		(0x0304 + ((_n) << 3))
132  #define PATN_DP_MASK			GENMASK(31, 16)
133  #define PATN_SP_MASK			GENMASK(15, 0)
134  
135  #define REG_CDM1_VLAN_CTRL		CDM1_BASE
136  #define CDM1_VLAN_MASK			GENMASK(31, 16)
137  
138  #define REG_CDM1_FWD_CFG		(CDM1_BASE + 0x08)
139  #define CDM1_VIP_QSEL_MASK		GENMASK(24, 20)
140  
141  #define REG_CDM1_CRSN_QSEL(_n)		(CDM1_BASE + 0x10 + ((_n) << 2))
142  #define CDM1_CRSN_QSEL_REASON_MASK(_n)	\
143  	GENMASK(4 + (((_n) % 4) << 3),	(((_n) % 4) << 3))
144  
145  #define REG_CDM2_FWD_CFG		(CDM2_BASE + 0x08)
146  #define CDM2_OAM_QSEL_MASK		GENMASK(31, 27)
147  #define CDM2_VIP_QSEL_MASK		GENMASK(24, 20)
148  
149  #define REG_CDM2_CRSN_QSEL(_n)		(CDM2_BASE + 0x10 + ((_n) << 2))
150  #define CDM2_CRSN_QSEL_REASON_MASK(_n)	\
151  	GENMASK(4 + (((_n) % 4) << 3),	(((_n) % 4) << 3))
152  
153  #define REG_GDM_FWD_CFG(_n)		GDM_BASE(_n)
154  #define GDM_DROP_CRC_ERR		BIT(23)
155  #define GDM_IP4_CKSUM			BIT(22)
156  #define GDM_TCP_CKSUM			BIT(21)
157  #define GDM_UDP_CKSUM			BIT(20)
158  #define GDM_UCFQ_MASK			GENMASK(15, 12)
159  #define GDM_BCFQ_MASK			GENMASK(11, 8)
160  #define GDM_MCFQ_MASK			GENMASK(7, 4)
161  #define GDM_OCFQ_MASK			GENMASK(3, 0)
162  
163  #define REG_GDM_INGRESS_CFG(_n)		(GDM_BASE(_n) + 0x10)
164  #define GDM_INGRESS_FC_EN_MASK		BIT(1)
165  #define GDM_STAG_EN_MASK		BIT(0)
166  
167  #define REG_GDM_LEN_CFG(_n)		(GDM_BASE(_n) + 0x14)
168  #define GDM_SHORT_LEN_MASK		GENMASK(13, 0)
169  #define GDM_LONG_LEN_MASK		GENMASK(29, 16)
170  
171  #define REG_FE_CPORT_CFG		(GDM1_BASE + 0x40)
172  #define FE_CPORT_PAD			BIT(26)
173  #define FE_CPORT_PORT_XFC_MASK		BIT(25)
174  #define FE_CPORT_QUEUE_XFC_MASK		BIT(24)
175  
176  #define REG_FE_GDM_MIB_CLEAR(_n)	(GDM_BASE(_n) + 0xf0)
177  #define FE_GDM_MIB_RX_CLEAR_MASK	BIT(1)
178  #define FE_GDM_MIB_TX_CLEAR_MASK	BIT(0)
179  
180  #define REG_FE_GDM1_MIB_CFG		(GDM1_BASE + 0xf4)
181  #define FE_STRICT_RFC2819_MODE_MASK	BIT(31)
182  #define FE_GDM1_TX_MIB_SPLIT_EN_MASK	BIT(17)
183  #define FE_GDM1_RX_MIB_SPLIT_EN_MASK	BIT(16)
184  #define FE_TX_MIB_ID_MASK		GENMASK(15, 8)
185  #define FE_RX_MIB_ID_MASK		GENMASK(7, 0)
186  
187  #define REG_FE_GDM_TX_OK_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x104)
188  #define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n)		(GDM_BASE(_n) + 0x10c)
189  #define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x110)
190  #define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n)	(GDM_BASE(_n) + 0x114)
191  #define REG_FE_GDM_TX_ETH_DROP_CNT(_n)		(GDM_BASE(_n) + 0x118)
192  #define REG_FE_GDM_TX_ETH_BC_CNT(_n)		(GDM_BASE(_n) + 0x11c)
193  #define REG_FE_GDM_TX_ETH_MC_CNT(_n)		(GDM_BASE(_n) + 0x120)
194  #define REG_FE_GDM_TX_ETH_RUNT_CNT(_n)		(GDM_BASE(_n) + 0x124)
195  #define REG_FE_GDM_TX_ETH_LONG_CNT(_n)		(GDM_BASE(_n) + 0x128)
196  #define REG_FE_GDM_TX_ETH_E64_CNT_L(_n)		(GDM_BASE(_n) + 0x12c)
197  #define REG_FE_GDM_TX_ETH_L64_CNT_L(_n)		(GDM_BASE(_n) + 0x130)
198  #define REG_FE_GDM_TX_ETH_L127_CNT_L(_n)	(GDM_BASE(_n) + 0x134)
199  #define REG_FE_GDM_TX_ETH_L255_CNT_L(_n)	(GDM_BASE(_n) + 0x138)
200  #define REG_FE_GDM_TX_ETH_L511_CNT_L(_n)	(GDM_BASE(_n) + 0x13c)
201  #define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n)	(GDM_BASE(_n) + 0x140)
202  
203  #define REG_FE_GDM_RX_OK_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x148)
204  #define REG_FE_GDM_RX_FC_DROP_CNT(_n)		(GDM_BASE(_n) + 0x14c)
205  #define REG_FE_GDM_RX_RC_DROP_CNT(_n)		(GDM_BASE(_n) + 0x150)
206  #define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n)	(GDM_BASE(_n) + 0x154)
207  #define REG_FE_GDM_RX_ERROR_DROP_CNT(_n)	(GDM_BASE(_n) + 0x158)
208  #define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n)		(GDM_BASE(_n) + 0x15c)
209  #define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n)		(GDM_BASE(_n) + 0x160)
210  #define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n)	(GDM_BASE(_n) + 0x164)
211  #define REG_FE_GDM_RX_ETH_DROP_CNT(_n)		(GDM_BASE(_n) + 0x168)
212  #define REG_FE_GDM_RX_ETH_BC_CNT(_n)		(GDM_BASE(_n) + 0x16c)
213  #define REG_FE_GDM_RX_ETH_MC_CNT(_n)		(GDM_BASE(_n) + 0x170)
214  #define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n)	(GDM_BASE(_n) + 0x174)
215  #define REG_FE_GDM_RX_ETH_FRAG_CNT(_n)		(GDM_BASE(_n) + 0x178)
216  #define REG_FE_GDM_RX_ETH_JABBER_CNT(_n)	(GDM_BASE(_n) + 0x17c)
217  #define REG_FE_GDM_RX_ETH_RUNT_CNT(_n)		(GDM_BASE(_n) + 0x180)
218  #define REG_FE_GDM_RX_ETH_LONG_CNT(_n)		(GDM_BASE(_n) + 0x184)
219  #define REG_FE_GDM_RX_ETH_E64_CNT_L(_n)		(GDM_BASE(_n) + 0x188)
220  #define REG_FE_GDM_RX_ETH_L64_CNT_L(_n)		(GDM_BASE(_n) + 0x18c)
221  #define REG_FE_GDM_RX_ETH_L127_CNT_L(_n)	(GDM_BASE(_n) + 0x190)
222  #define REG_FE_GDM_RX_ETH_L255_CNT_L(_n)	(GDM_BASE(_n) + 0x194)
223  #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n)	(GDM_BASE(_n) + 0x198)
224  #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n)	(GDM_BASE(_n) + 0x19c)
225  
226  #define REG_PPE1_TB_HASH_CFG		(PPE1_BASE + 0x250)
227  #define PPE1_SRAM_TABLE_EN_MASK		BIT(0)
228  #define PPE1_SRAM_HASH1_EN_MASK		BIT(8)
229  #define PPE1_DRAM_TABLE_EN_MASK		BIT(16)
230  #define PPE1_DRAM_HASH1_EN_MASK		BIT(24)
231  
232  #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x280)
233  #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n)		(GDM_BASE(_n) + 0x284)
234  #define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x288)
235  #define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n)	(GDM_BASE(_n) + 0x28c)
236  
237  #define REG_FE_GDM_RX_OK_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x290)
238  #define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n)		(GDM_BASE(_n) + 0x294)
239  #define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n)		(GDM_BASE(_n) + 0x298)
240  #define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n)	(GDM_BASE(_n) + 0x29c)
241  #define REG_FE_GDM_TX_ETH_E64_CNT_H(_n)		(GDM_BASE(_n) + 0x2b8)
242  #define REG_FE_GDM_TX_ETH_L64_CNT_H(_n)		(GDM_BASE(_n) + 0x2bc)
243  #define REG_FE_GDM_TX_ETH_L127_CNT_H(_n)	(GDM_BASE(_n) + 0x2c0)
244  #define REG_FE_GDM_TX_ETH_L255_CNT_H(_n)	(GDM_BASE(_n) + 0x2c4)
245  #define REG_FE_GDM_TX_ETH_L511_CNT_H(_n)	(GDM_BASE(_n) + 0x2c8)
246  #define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n)	(GDM_BASE(_n) + 0x2cc)
247  #define REG_FE_GDM_RX_ETH_E64_CNT_H(_n)		(GDM_BASE(_n) + 0x2e8)
248  #define REG_FE_GDM_RX_ETH_L64_CNT_H(_n)		(GDM_BASE(_n) + 0x2ec)
249  #define REG_FE_GDM_RX_ETH_L127_CNT_H(_n)	(GDM_BASE(_n) + 0x2f0)
250  #define REG_FE_GDM_RX_ETH_L255_CNT_H(_n)	(GDM_BASE(_n) + 0x2f4)
251  #define REG_FE_GDM_RX_ETH_L511_CNT_H(_n)	(GDM_BASE(_n) + 0x2f8)
252  #define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n)	(GDM_BASE(_n) + 0x2fc)
253  
254  #define REG_GDM2_CHN_RLS		(GDM2_BASE + 0x20)
255  #define MBI_RX_AGE_SEL_MASK		GENMASK(26, 25)
256  #define MBI_TX_AGE_SEL_MASK		GENMASK(18, 17)
257  
258  #define REG_GDM3_FWD_CFG		GDM3_BASE
259  #define GDM3_PAD_EN_MASK		BIT(28)
260  
261  #define REG_GDM4_FWD_CFG		(GDM4_BASE + 0x100)
262  #define GDM4_PAD_EN_MASK		BIT(28)
263  #define GDM4_SPORT_OFFSET0_MASK		GENMASK(11, 8)
264  
265  #define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x33c)
266  #define GDM4_SPORT_OFF2_MASK		GENMASK(19, 16)
267  #define GDM4_SPORT_OFF1_MASK		GENMASK(15, 12)
268  #define GDM4_SPORT_OFF0_MASK		GENMASK(11, 8)
269  
270  #define REG_IP_FRAG_FP			0x2010
271  #define IP_ASSEMBLE_PORT_MASK		GENMASK(24, 21)
272  #define IP_ASSEMBLE_NBQ_MASK		GENMASK(20, 16)
273  #define IP_FRAGMENT_PORT_MASK		GENMASK(8, 5)
274  #define IP_FRAGMENT_NBQ_MASK		GENMASK(4, 0)
275  
276  #define REG_MC_VLAN_EN			0x2100
277  #define MC_VLAN_EN_MASK			BIT(0)
278  
279  #define REG_MC_VLAN_CFG			0x2104
280  #define MC_VLAN_CFG_CMD_DONE_MASK	BIT(31)
281  #define MC_VLAN_CFG_TABLE_ID_MASK	GENMASK(21, 16)
282  #define MC_VLAN_CFG_PORT_ID_MASK	GENMASK(11, 8)
283  #define MC_VLAN_CFG_TABLE_SEL_MASK	BIT(4)
284  #define MC_VLAN_CFG_RW_MASK		BIT(0)
285  
286  #define REG_MC_VLAN_DATA		0x2108
287  
288  #define REG_CDM5_RX_OQ1_DROP_CNT	0x29d4
289  
290  /* QDMA */
291  #define REG_QDMA_GLOBAL_CFG			0x0004
292  #define GLOBAL_CFG_RX_2B_OFFSET_MASK		BIT(31)
293  #define GLOBAL_CFG_DMA_PREFERENCE_MASK		GENMASK(30, 29)
294  #define GLOBAL_CFG_CPU_TXR_RR_MASK		BIT(28)
295  #define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK		BIT(27)
296  #define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK	BIT(26)
297  #define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK	BIT(25)
298  #define GLOBAL_CFG_OAM_MODIFY_MASK		BIT(24)
299  #define GLOBAL_CFG_RESET_MASK			BIT(23)
300  #define GLOBAL_CFG_RESET_DONE_MASK		BIT(22)
301  #define GLOBAL_CFG_MULTICAST_EN_MASK		BIT(21)
302  #define GLOBAL_CFG_IRQ1_EN_MASK			BIT(20)
303  #define GLOBAL_CFG_IRQ0_EN_MASK			BIT(19)
304  #define GLOBAL_CFG_LOOPCNT_EN_MASK		BIT(18)
305  #define GLOBAL_CFG_RD_BYPASS_WR_MASK		BIT(17)
306  #define GLOBAL_CFG_QDMA_LOOPBACK_MASK		BIT(16)
307  #define GLOBAL_CFG_LPBK_RXQ_SEL_MASK		GENMASK(13, 8)
308  #define GLOBAL_CFG_CHECK_DONE_MASK		BIT(7)
309  #define GLOBAL_CFG_TX_WB_DONE_MASK		BIT(6)
310  #define GLOBAL_CFG_MAX_ISSUE_NUM_MASK		GENMASK(5, 4)
311  #define GLOBAL_CFG_RX_DMA_BUSY_MASK		BIT(3)
312  #define GLOBAL_CFG_RX_DMA_EN_MASK		BIT(2)
313  #define GLOBAL_CFG_TX_DMA_BUSY_MASK		BIT(1)
314  #define GLOBAL_CFG_TX_DMA_EN_MASK		BIT(0)
315  
316  #define REG_FWD_DSCP_BASE			0x0010
317  #define REG_FWD_BUF_BASE			0x0014
318  
319  #define REG_HW_FWD_DSCP_CFG			0x0018
320  #define HW_FWD_DSCP_PAYLOAD_SIZE_MASK		GENMASK(29, 28)
321  #define HW_FWD_DSCP_SCATTER_LEN_MASK		GENMASK(17, 16)
322  #define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK	GENMASK(15, 0)
323  
324  #define REG_INT_STATUS(_n)		\
325  	(((_n) == 4) ? 0x0730 :		\
326  	 ((_n) == 3) ? 0x0724 :		\
327  	 ((_n) == 2) ? 0x0720 :		\
328  	 ((_n) == 1) ? 0x0024 : 0x0020)
329  
330  #define REG_INT_ENABLE(_n)		\
331  	(((_n) == 4) ? 0x0750 :		\
332  	 ((_n) == 3) ? 0x0744 :		\
333  	 ((_n) == 2) ? 0x0740 :		\
334  	 ((_n) == 1) ? 0x002c : 0x0028)
335  
336  /* QDMA_CSR_INT_ENABLE1 */
337  #define RX15_COHERENT_INT_MASK		BIT(31)
338  #define RX14_COHERENT_INT_MASK		BIT(30)
339  #define RX13_COHERENT_INT_MASK		BIT(29)
340  #define RX12_COHERENT_INT_MASK		BIT(28)
341  #define RX11_COHERENT_INT_MASK		BIT(27)
342  #define RX10_COHERENT_INT_MASK		BIT(26)
343  #define RX9_COHERENT_INT_MASK		BIT(25)
344  #define RX8_COHERENT_INT_MASK		BIT(24)
345  #define RX7_COHERENT_INT_MASK		BIT(23)
346  #define RX6_COHERENT_INT_MASK		BIT(22)
347  #define RX5_COHERENT_INT_MASK		BIT(21)
348  #define RX4_COHERENT_INT_MASK		BIT(20)
349  #define RX3_COHERENT_INT_MASK		BIT(19)
350  #define RX2_COHERENT_INT_MASK		BIT(18)
351  #define RX1_COHERENT_INT_MASK		BIT(17)
352  #define RX0_COHERENT_INT_MASK		BIT(16)
353  #define TX7_COHERENT_INT_MASK		BIT(15)
354  #define TX6_COHERENT_INT_MASK		BIT(14)
355  #define TX5_COHERENT_INT_MASK		BIT(13)
356  #define TX4_COHERENT_INT_MASK		BIT(12)
357  #define TX3_COHERENT_INT_MASK		BIT(11)
358  #define TX2_COHERENT_INT_MASK		BIT(10)
359  #define TX1_COHERENT_INT_MASK		BIT(9)
360  #define TX0_COHERENT_INT_MASK		BIT(8)
361  #define CNT_OVER_FLOW_INT_MASK		BIT(7)
362  #define IRQ1_FULL_INT_MASK		BIT(5)
363  #define IRQ1_INT_MASK			BIT(4)
364  #define HWFWD_DSCP_LOW_INT_MASK		BIT(3)
365  #define HWFWD_DSCP_EMPTY_INT_MASK	BIT(2)
366  #define IRQ0_FULL_INT_MASK		BIT(1)
367  #define IRQ0_INT_MASK			BIT(0)
368  
369  #define TX_DONE_INT_MASK(_n)					\
370  	((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK		\
371  	      : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
372  
373  #define INT_TX_MASK						\
374  	(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK |			\
375  	 IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
376  
377  #define INT_IDX0_MASK						\
378  	(TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK |	\
379  	 TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK |	\
380  	 TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK |	\
381  	 TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK |	\
382  	 RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK |	\
383  	 RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK |	\
384  	 RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK |	\
385  	 RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK |	\
386  	 RX15_COHERENT_INT_MASK | INT_TX_MASK)
387  
388  /* QDMA_CSR_INT_ENABLE2 */
389  #define RX15_NO_CPU_DSCP_INT_MASK	BIT(31)
390  #define RX14_NO_CPU_DSCP_INT_MASK	BIT(30)
391  #define RX13_NO_CPU_DSCP_INT_MASK	BIT(29)
392  #define RX12_NO_CPU_DSCP_INT_MASK	BIT(28)
393  #define RX11_NO_CPU_DSCP_INT_MASK	BIT(27)
394  #define RX10_NO_CPU_DSCP_INT_MASK	BIT(26)
395  #define RX9_NO_CPU_DSCP_INT_MASK	BIT(25)
396  #define RX8_NO_CPU_DSCP_INT_MASK	BIT(24)
397  #define RX7_NO_CPU_DSCP_INT_MASK	BIT(23)
398  #define RX6_NO_CPU_DSCP_INT_MASK	BIT(22)
399  #define RX5_NO_CPU_DSCP_INT_MASK	BIT(21)
400  #define RX4_NO_CPU_DSCP_INT_MASK	BIT(20)
401  #define RX3_NO_CPU_DSCP_INT_MASK	BIT(19)
402  #define RX2_NO_CPU_DSCP_INT_MASK	BIT(18)
403  #define RX1_NO_CPU_DSCP_INT_MASK	BIT(17)
404  #define RX0_NO_CPU_DSCP_INT_MASK	BIT(16)
405  #define RX15_DONE_INT_MASK		BIT(15)
406  #define RX14_DONE_INT_MASK		BIT(14)
407  #define RX13_DONE_INT_MASK		BIT(13)
408  #define RX12_DONE_INT_MASK		BIT(12)
409  #define RX11_DONE_INT_MASK		BIT(11)
410  #define RX10_DONE_INT_MASK		BIT(10)
411  #define RX9_DONE_INT_MASK		BIT(9)
412  #define RX8_DONE_INT_MASK		BIT(8)
413  #define RX7_DONE_INT_MASK		BIT(7)
414  #define RX6_DONE_INT_MASK		BIT(6)
415  #define RX5_DONE_INT_MASK		BIT(5)
416  #define RX4_DONE_INT_MASK		BIT(4)
417  #define RX3_DONE_INT_MASK		BIT(3)
418  #define RX2_DONE_INT_MASK		BIT(2)
419  #define RX1_DONE_INT_MASK		BIT(1)
420  #define RX0_DONE_INT_MASK		BIT(0)
421  
422  #define RX_DONE_INT_MASK					\
423  	(RX0_DONE_INT_MASK | RX1_DONE_INT_MASK |		\
424  	 RX2_DONE_INT_MASK | RX3_DONE_INT_MASK |		\
425  	 RX4_DONE_INT_MASK | RX7_DONE_INT_MASK |		\
426  	 RX8_DONE_INT_MASK | RX9_DONE_INT_MASK |		\
427  	 RX15_DONE_INT_MASK)
428  #define INT_IDX1_MASK						\
429  	(RX_DONE_INT_MASK |					\
430  	 RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK |	\
431  	 RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK |	\
432  	 RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK |	\
433  	 RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK |	\
434  	 RX15_NO_CPU_DSCP_INT_MASK)
435  
436  /* QDMA_CSR_INT_ENABLE5 */
437  #define TX31_COHERENT_INT_MASK		BIT(31)
438  #define TX30_COHERENT_INT_MASK		BIT(30)
439  #define TX29_COHERENT_INT_MASK		BIT(29)
440  #define TX28_COHERENT_INT_MASK		BIT(28)
441  #define TX27_COHERENT_INT_MASK		BIT(27)
442  #define TX26_COHERENT_INT_MASK		BIT(26)
443  #define TX25_COHERENT_INT_MASK		BIT(25)
444  #define TX24_COHERENT_INT_MASK		BIT(24)
445  #define TX23_COHERENT_INT_MASK		BIT(23)
446  #define TX22_COHERENT_INT_MASK		BIT(22)
447  #define TX21_COHERENT_INT_MASK		BIT(21)
448  #define TX20_COHERENT_INT_MASK		BIT(20)
449  #define TX19_COHERENT_INT_MASK		BIT(19)
450  #define TX18_COHERENT_INT_MASK		BIT(18)
451  #define TX17_COHERENT_INT_MASK		BIT(17)
452  #define TX16_COHERENT_INT_MASK		BIT(16)
453  #define TX15_COHERENT_INT_MASK		BIT(15)
454  #define TX14_COHERENT_INT_MASK		BIT(14)
455  #define TX13_COHERENT_INT_MASK		BIT(13)
456  #define TX12_COHERENT_INT_MASK		BIT(12)
457  #define TX11_COHERENT_INT_MASK		BIT(11)
458  #define TX10_COHERENT_INT_MASK		BIT(10)
459  #define TX9_COHERENT_INT_MASK		BIT(9)
460  #define TX8_COHERENT_INT_MASK		BIT(8)
461  
462  #define INT_IDX4_MASK						\
463  	(TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK |	\
464  	 TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK |	\
465  	 TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK |	\
466  	 TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK |	\
467  	 TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK |	\
468  	 TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK |	\
469  	 TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK |	\
470  	 TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK |	\
471  	 TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK |	\
472  	 TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK |	\
473  	 TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK |	\
474  	 TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
475  
476  #define REG_TX_IRQ_BASE(_n)		((_n) ? 0x0048 : 0x0050)
477  
478  #define REG_TX_IRQ_CFG(_n)		((_n) ? 0x004c : 0x0054)
479  #define TX_IRQ_THR_MASK			GENMASK(27, 16)
480  #define TX_IRQ_DEPTH_MASK		GENMASK(11, 0)
481  
482  #define REG_IRQ_CLEAR_LEN(_n)		((_n) ? 0x0064 : 0x0058)
483  #define IRQ_CLEAR_LEN_MASK		GENMASK(7, 0)
484  
485  #define REG_IRQ_STATUS(_n)		((_n) ? 0x0068 : 0x005c)
486  #define IRQ_ENTRY_LEN_MASK		GENMASK(27, 16)
487  #define IRQ_HEAD_IDX_MASK		GENMASK(11, 0)
488  
489  #define REG_TX_RING_BASE(_n)	\
490  	(((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
491  
492  #define REG_TX_RING_BLOCKING(_n)	\
493  	(((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
494  
495  #define TX_RING_IRQ_BLOCKING_MAP_MASK			BIT(6)
496  #define TX_RING_IRQ_BLOCKING_CFG_MASK			BIT(4)
497  #define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK		BIT(2)
498  #define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK	BIT(1)
499  #define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK	BIT(0)
500  
501  #define REG_TX_CPU_IDX(_n)	\
502  	(((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
503  
504  #define TX_RING_CPU_IDX_MASK		GENMASK(15, 0)
505  
506  #define REG_TX_DMA_IDX(_n)	\
507  	(((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
508  
509  #define TX_RING_DMA_IDX_MASK		GENMASK(15, 0)
510  
511  #define IRQ_RING_IDX_MASK		GENMASK(20, 16)
512  #define IRQ_DESC_IDX_MASK		GENMASK(15, 0)
513  
514  #define REG_RX_RING_BASE(_n)	\
515  	(((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
516  
517  #define REG_RX_RING_SIZE(_n)	\
518  	(((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
519  
520  #define RX_RING_THR_MASK		GENMASK(31, 16)
521  #define RX_RING_SIZE_MASK		GENMASK(15, 0)
522  
523  #define REG_RX_CPU_IDX(_n)	\
524  	(((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
525  
526  #define RX_RING_CPU_IDX_MASK		GENMASK(15, 0)
527  
528  #define REG_RX_DMA_IDX(_n)	\
529  	(((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
530  
531  #define REG_RX_DELAY_INT_IDX(_n)	\
532  	(((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
533  
534  #define RX_DELAY_INT_MASK		GENMASK(15, 0)
535  
536  #define RX_RING_DMA_IDX_MASK		GENMASK(15, 0)
537  
538  #define REG_INGRESS_TRTCM_CFG		0x0070
539  #define INGRESS_TRTCM_EN_MASK		BIT(31)
540  #define INGRESS_TRTCM_MODE_MASK		BIT(30)
541  #define INGRESS_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
542  #define INGRESS_FAST_TICK_MASK		GENMASK(15, 0)
543  
544  #define REG_TXQ_DIS_CFG_BASE(_n)	((_n) ? 0x20a0 : 0x00a0)
545  #define REG_TXQ_DIS_CFG(_n, _m)		(REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
546  
547  #define REG_LMGR_INIT_CFG		0x1000
548  #define LMGR_INIT_START			BIT(31)
549  #define LMGR_SRAM_MODE_MASK		BIT(30)
550  #define HW_FWD_PKTSIZE_OVERHEAD_MASK	GENMASK(27, 20)
551  #define HW_FWD_DESC_NUM_MASK		GENMASK(16, 0)
552  
553  #define REG_FWD_DSCP_LOW_THR		0x1004
554  #define FWD_DSCP_LOW_THR_MASK		GENMASK(17, 0)
555  
556  #define REG_EGRESS_RATE_METER_CFG		0x100c
557  #define EGRESS_RATE_METER_EN_MASK		BIT(29)
558  #define EGRESS_RATE_METER_EQ_RATE_EN_MASK	BIT(17)
559  #define EGRESS_RATE_METER_WINDOW_SZ_MASK	GENMASK(16, 12)
560  #define EGRESS_RATE_METER_TIMESLICE_MASK	GENMASK(10, 0)
561  
562  #define REG_EGRESS_TRTCM_CFG		0x1010
563  #define EGRESS_TRTCM_EN_MASK		BIT(31)
564  #define EGRESS_TRTCM_MODE_MASK		BIT(30)
565  #define EGRESS_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
566  #define EGRESS_FAST_TICK_MASK		GENMASK(15, 0)
567  
568  #define REG_TXWRR_MODE_CFG		0x1020
569  #define TWRR_WEIGHT_SCALE_MASK		BIT(31)
570  #define TWRR_WEIGHT_BASE_MASK		BIT(3)
571  
572  #define REG_PSE_BUF_USAGE_CFG		0x1028
573  #define PSE_BUF_ESTIMATE_EN_MASK	BIT(29)
574  
575  #define REG_GLB_TRTCM_CFG		0x1080
576  #define GLB_TRTCM_EN_MASK		BIT(31)
577  #define GLB_TRTCM_MODE_MASK		BIT(30)
578  #define GLB_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
579  #define GLB_FAST_TICK_MASK		GENMASK(15, 0)
580  
581  #define REG_TXQ_CNGST_CFG		0x10a0
582  #define TXQ_CNGST_DROP_EN		BIT(31)
583  #define TXQ_CNGST_DEI_DROP_EN		BIT(30)
584  
585  #define REG_SLA_TRTCM_CFG		0x1150
586  #define SLA_TRTCM_EN_MASK		BIT(31)
587  #define SLA_TRTCM_MODE_MASK		BIT(30)
588  #define SLA_SLOW_TICK_RATIO_MASK	GENMASK(29, 16)
589  #define SLA_FAST_TICK_MASK		GENMASK(15, 0)
590  
591  /* CTRL */
592  #define QDMA_DESC_DONE_MASK		BIT(31)
593  #define QDMA_DESC_DROP_MASK		BIT(30) /* tx: drop - rx: overflow */
594  #define QDMA_DESC_MORE_MASK		BIT(29) /* more SG elements */
595  #define QDMA_DESC_DEI_MASK		BIT(25)
596  #define QDMA_DESC_NO_DROP_MASK		BIT(24)
597  #define QDMA_DESC_LEN_MASK		GENMASK(15, 0)
598  /* DATA */
599  #define QDMA_DESC_NEXT_ID_MASK		GENMASK(15, 0)
600  /* TX MSG0 */
601  #define QDMA_ETH_TXMSG_MIC_IDX_MASK	BIT(30)
602  #define QDMA_ETH_TXMSG_SP_TAG_MASK	GENMASK(29, 14)
603  #define QDMA_ETH_TXMSG_ICO_MASK		BIT(13)
604  #define QDMA_ETH_TXMSG_UCO_MASK		BIT(12)
605  #define QDMA_ETH_TXMSG_TCO_MASK		BIT(11)
606  #define QDMA_ETH_TXMSG_TSO_MASK		BIT(10)
607  #define QDMA_ETH_TXMSG_FAST_MASK	BIT(9)
608  #define QDMA_ETH_TXMSG_OAM_MASK		BIT(8)
609  #define QDMA_ETH_TXMSG_CHAN_MASK	GENMASK(7, 3)
610  #define QDMA_ETH_TXMSG_QUEUE_MASK	GENMASK(2, 0)
611  /* TX MSG1 */
612  #define QDMA_ETH_TXMSG_NO_DROP		BIT(31)
613  #define QDMA_ETH_TXMSG_METER_MASK	GENMASK(30, 24)	/* 0x7f no meters */
614  #define QDMA_ETH_TXMSG_FPORT_MASK	GENMASK(23, 20)
615  #define QDMA_ETH_TXMSG_NBOQ_MASK	GENMASK(19, 15)
616  #define QDMA_ETH_TXMSG_HWF_MASK		BIT(14)
617  #define QDMA_ETH_TXMSG_HOP_MASK		BIT(13)
618  #define QDMA_ETH_TXMSG_PTP_MASK		BIT(12)
619  #define QDMA_ETH_TXMSG_ACNT_G1_MASK	GENMASK(10, 6)	/* 0x1f do not count */
620  #define QDMA_ETH_TXMSG_ACNT_G0_MASK	GENMASK(5, 0)	/* 0x3f do not count */
621  
622  /* RX MSG1 */
623  #define QDMA_ETH_RXMSG_DEI_MASK		BIT(31)
624  #define QDMA_ETH_RXMSG_IP6_MASK		BIT(30)
625  #define QDMA_ETH_RXMSG_IP4_MASK		BIT(29)
626  #define QDMA_ETH_RXMSG_IP4F_MASK	BIT(28)
627  #define QDMA_ETH_RXMSG_L4_VALID_MASK	BIT(27)
628  #define QDMA_ETH_RXMSG_L4F_MASK		BIT(26)
629  #define QDMA_ETH_RXMSG_SPORT_MASK	GENMASK(25, 21)
630  #define QDMA_ETH_RXMSG_CRSN_MASK	GENMASK(20, 16)
631  #define QDMA_ETH_RXMSG_PPE_ENTRY_MASK	GENMASK(15, 0)
632  
633  struct airoha_qdma_desc {
634  	__le32 rsv;
635  	__le32 ctrl;
636  	__le32 addr;
637  	__le32 data;
638  	__le32 msg0;
639  	__le32 msg1;
640  	__le32 msg2;
641  	__le32 msg3;
642  };
643  
644  /* CTRL0 */
645  #define QDMA_FWD_DESC_CTX_MASK		BIT(31)
646  #define QDMA_FWD_DESC_RING_MASK		GENMASK(30, 28)
647  #define QDMA_FWD_DESC_IDX_MASK		GENMASK(27, 16)
648  #define QDMA_FWD_DESC_LEN_MASK		GENMASK(15, 0)
649  /* CTRL1 */
650  #define QDMA_FWD_DESC_FIRST_IDX_MASK	GENMASK(15, 0)
651  /* CTRL2 */
652  #define QDMA_FWD_DESC_MORE_PKT_NUM_MASK	GENMASK(2, 0)
653  
654  struct airoha_qdma_fwd_desc {
655  	__le32 addr;
656  	__le32 ctrl0;
657  	__le32 ctrl1;
658  	__le32 ctrl2;
659  	__le32 msg0;
660  	__le32 msg1;
661  	__le32 rsv0;
662  	__le32 rsv1;
663  };
664  
665  enum {
666  	QDMA_INT_REG_IDX0,
667  	QDMA_INT_REG_IDX1,
668  	QDMA_INT_REG_IDX2,
669  	QDMA_INT_REG_IDX3,
670  	QDMA_INT_REG_IDX4,
671  	QDMA_INT_REG_MAX
672  };
673  
674  enum {
675  	XSI_PCIE0_PORT,
676  	XSI_PCIE1_PORT,
677  	XSI_USB_PORT,
678  	XSI_AE_PORT,
679  	XSI_ETH_PORT,
680  };
681  
682  enum {
683  	XSI_PCIE0_VIP_PORT_MASK	= BIT(22),
684  	XSI_PCIE1_VIP_PORT_MASK	= BIT(23),
685  	XSI_USB_VIP_PORT_MASK	= BIT(25),
686  	XSI_ETH_VIP_PORT_MASK	= BIT(24),
687  };
688  
689  enum {
690  	DEV_STATE_INITIALIZED,
691  };
692  
693  enum {
694  	CDM_CRSN_QSEL_Q1 = 1,
695  	CDM_CRSN_QSEL_Q5 = 5,
696  	CDM_CRSN_QSEL_Q6 = 6,
697  	CDM_CRSN_QSEL_Q15 = 15,
698  };
699  
700  enum {
701  	CRSN_08 = 0x8,
702  	CRSN_21 = 0x15, /* KA */
703  	CRSN_22 = 0x16, /* hit bind and force route to CPU */
704  	CRSN_24 = 0x18,
705  	CRSN_25 = 0x19,
706  };
707  
708  enum {
709  	FE_PSE_PORT_CDM1,
710  	FE_PSE_PORT_GDM1,
711  	FE_PSE_PORT_GDM2,
712  	FE_PSE_PORT_GDM3,
713  	FE_PSE_PORT_PPE1,
714  	FE_PSE_PORT_CDM2,
715  	FE_PSE_PORT_CDM3,
716  	FE_PSE_PORT_CDM4,
717  	FE_PSE_PORT_PPE2,
718  	FE_PSE_PORT_GDM4,
719  	FE_PSE_PORT_CDM5,
720  	FE_PSE_PORT_DROP = 0xf,
721  };
722  
723  struct airoha_queue_entry {
724  	union {
725  		void *buf;
726  		struct sk_buff *skb;
727  	};
728  	dma_addr_t dma_addr;
729  	u16 dma_len;
730  };
731  
732  struct airoha_queue {
733  	struct airoha_qdma *qdma;
734  
735  	/* protect concurrent queue accesses */
736  	spinlock_t lock;
737  	struct airoha_queue_entry *entry;
738  	struct airoha_qdma_desc *desc;
739  	u16 head;
740  	u16 tail;
741  
742  	int queued;
743  	int ndesc;
744  	int free_thr;
745  	int buf_size;
746  
747  	struct napi_struct napi;
748  	struct page_pool *page_pool;
749  };
750  
751  struct airoha_tx_irq_queue {
752  	struct airoha_qdma *qdma;
753  
754  	struct napi_struct napi;
755  	u32 *q;
756  
757  	int size;
758  	int queued;
759  	u16 head;
760  };
761  
762  struct airoha_hw_stats {
763  	/* protect concurrent hw_stats accesses */
764  	spinlock_t lock;
765  	struct u64_stats_sync syncp;
766  
767  	/* get_stats64 */
768  	u64 rx_ok_pkts;
769  	u64 tx_ok_pkts;
770  	u64 rx_ok_bytes;
771  	u64 tx_ok_bytes;
772  	u64 rx_multicast;
773  	u64 rx_errors;
774  	u64 rx_drops;
775  	u64 tx_drops;
776  	u64 rx_crc_error;
777  	u64 rx_over_errors;
778  	/* ethtool stats */
779  	u64 tx_broadcast;
780  	u64 tx_multicast;
781  	u64 tx_len[7];
782  	u64 rx_broadcast;
783  	u64 rx_fragment;
784  	u64 rx_jabber;
785  	u64 rx_len[7];
786  };
787  
788  struct airoha_qdma {
789  	struct airoha_eth *eth;
790  	void __iomem *regs;
791  
792  	/* protect concurrent irqmask accesses */
793  	spinlock_t irq_lock;
794  	u32 irqmask[QDMA_INT_REG_MAX];
795  	int irq;
796  
797  	struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
798  
799  	struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
800  	struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
801  
802  	/* descriptor and packet buffers for qdma hw forward */
803  	struct {
804  		void *desc;
805  		void *q;
806  	} hfwd;
807  };
808  
809  struct airoha_gdm_port {
810  	struct airoha_qdma *qdma;
811  	struct net_device *dev;
812  	int id;
813  
814  	struct airoha_hw_stats stats;
815  };
816  
817  struct airoha_eth {
818  	struct device *dev;
819  
820  	unsigned long state;
821  	void __iomem *fe_regs;
822  
823  	struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
824  	struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
825  
826  	struct net_device *napi_dev;
827  
828  	struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
829  	struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
830  };
831  
airoha_rr(void __iomem * base,u32 offset)832  static u32 airoha_rr(void __iomem *base, u32 offset)
833  {
834  	return readl(base + offset);
835  }
836  
airoha_wr(void __iomem * base,u32 offset,u32 val)837  static void airoha_wr(void __iomem *base, u32 offset, u32 val)
838  {
839  	writel(val, base + offset);
840  }
841  
airoha_rmw(void __iomem * base,u32 offset,u32 mask,u32 val)842  static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
843  {
844  	val |= (airoha_rr(base, offset) & ~mask);
845  	airoha_wr(base, offset, val);
846  
847  	return val;
848  }
849  
850  #define airoha_fe_rr(eth, offset)				\
851  	airoha_rr((eth)->fe_regs, (offset))
852  #define airoha_fe_wr(eth, offset, val)				\
853  	airoha_wr((eth)->fe_regs, (offset), (val))
854  #define airoha_fe_rmw(eth, offset, mask, val)			\
855  	airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
856  #define airoha_fe_set(eth, offset, val)				\
857  	airoha_rmw((eth)->fe_regs, (offset), 0, (val))
858  #define airoha_fe_clear(eth, offset, val)			\
859  	airoha_rmw((eth)->fe_regs, (offset), (val), 0)
860  
861  #define airoha_qdma_rr(qdma, offset)				\
862  	airoha_rr((qdma)->regs, (offset))
863  #define airoha_qdma_wr(qdma, offset, val)			\
864  	airoha_wr((qdma)->regs, (offset), (val))
865  #define airoha_qdma_rmw(qdma, offset, mask, val)		\
866  	airoha_rmw((qdma)->regs, (offset), (mask), (val))
867  #define airoha_qdma_set(qdma, offset, val)			\
868  	airoha_rmw((qdma)->regs, (offset), 0, (val))
869  #define airoha_qdma_clear(qdma, offset, val)			\
870  	airoha_rmw((qdma)->regs, (offset), (val), 0)
871  
airoha_qdma_set_irqmask(struct airoha_qdma * qdma,int index,u32 clear,u32 set)872  static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
873  				    u32 clear, u32 set)
874  {
875  	unsigned long flags;
876  
877  	if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
878  		return;
879  
880  	spin_lock_irqsave(&qdma->irq_lock, flags);
881  
882  	qdma->irqmask[index] &= ~clear;
883  	qdma->irqmask[index] |= set;
884  	airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
885  	/* Read irq_enable register in order to guarantee the update above
886  	 * completes in the spinlock critical section.
887  	 */
888  	airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
889  
890  	spin_unlock_irqrestore(&qdma->irq_lock, flags);
891  }
892  
airoha_qdma_irq_enable(struct airoha_qdma * qdma,int index,u32 mask)893  static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
894  				   u32 mask)
895  {
896  	airoha_qdma_set_irqmask(qdma, index, 0, mask);
897  }
898  
airoha_qdma_irq_disable(struct airoha_qdma * qdma,int index,u32 mask)899  static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
900  				    u32 mask)
901  {
902  	airoha_qdma_set_irqmask(qdma, index, mask, 0);
903  }
904  
airhoa_is_lan_gdm_port(struct airoha_gdm_port * port)905  static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
906  {
907  	/* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
908  	 * GDM{2,3,4} can be used as wan port connected to an external
909  	 * phy module.
910  	 */
911  	return port->id == 1;
912  }
913  
airoha_set_macaddr(struct airoha_gdm_port * port,const u8 * addr)914  static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
915  {
916  	struct airoha_eth *eth = port->qdma->eth;
917  	u32 val, reg;
918  
919  	reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
920  					   : REG_FE_WAN_MAC_H;
921  	val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
922  	airoha_fe_wr(eth, reg, val);
923  
924  	val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
925  	airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
926  	airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
927  }
928  
airoha_set_gdm_port_fwd_cfg(struct airoha_eth * eth,u32 addr,u32 val)929  static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
930  					u32 val)
931  {
932  	airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
933  		      FIELD_PREP(GDM_OCFQ_MASK, val));
934  	airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
935  		      FIELD_PREP(GDM_MCFQ_MASK, val));
936  	airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
937  		      FIELD_PREP(GDM_BCFQ_MASK, val));
938  	airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
939  		      FIELD_PREP(GDM_UCFQ_MASK, val));
940  }
941  
airoha_set_gdm_port(struct airoha_eth * eth,int port,bool enable)942  static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
943  {
944  	u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
945  	u32 vip_port, cfg_addr;
946  
947  	switch (port) {
948  	case XSI_PCIE0_PORT:
949  		vip_port = XSI_PCIE0_VIP_PORT_MASK;
950  		cfg_addr = REG_GDM_FWD_CFG(3);
951  		break;
952  	case XSI_PCIE1_PORT:
953  		vip_port = XSI_PCIE1_VIP_PORT_MASK;
954  		cfg_addr = REG_GDM_FWD_CFG(3);
955  		break;
956  	case XSI_USB_PORT:
957  		vip_port = XSI_USB_VIP_PORT_MASK;
958  		cfg_addr = REG_GDM_FWD_CFG(4);
959  		break;
960  	case XSI_ETH_PORT:
961  		vip_port = XSI_ETH_VIP_PORT_MASK;
962  		cfg_addr = REG_GDM_FWD_CFG(4);
963  		break;
964  	default:
965  		return -EINVAL;
966  	}
967  
968  	if (enable) {
969  		airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
970  		airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
971  	} else {
972  		airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
973  		airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
974  	}
975  
976  	airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
977  
978  	return 0;
979  }
980  
airoha_set_gdm_ports(struct airoha_eth * eth,bool enable)981  static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
982  {
983  	const int port_list[] = {
984  		XSI_PCIE0_PORT,
985  		XSI_PCIE1_PORT,
986  		XSI_USB_PORT,
987  		XSI_ETH_PORT
988  	};
989  	int i, err;
990  
991  	for (i = 0; i < ARRAY_SIZE(port_list); i++) {
992  		err = airoha_set_gdm_port(eth, port_list[i], enable);
993  		if (err)
994  			goto error;
995  	}
996  
997  	return 0;
998  
999  error:
1000  	for (i--; i >= 0; i--)
1001  		airoha_set_gdm_port(eth, port_list[i], false);
1002  
1003  	return err;
1004  }
1005  
airoha_fe_maccr_init(struct airoha_eth * eth)1006  static void airoha_fe_maccr_init(struct airoha_eth *eth)
1007  {
1008  	int p;
1009  
1010  	for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
1011  		airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
1012  			      GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
1013  			      GDM_DROP_CRC_ERR);
1014  		airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
1015  					    FE_PSE_PORT_CDM1);
1016  		airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
1017  			      GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
1018  			      FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
1019  			      FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
1020  	}
1021  
1022  	airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
1023  		      FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
1024  
1025  	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
1026  }
1027  
airoha_fe_vip_setup(struct airoha_eth * eth)1028  static void airoha_fe_vip_setup(struct airoha_eth *eth)
1029  {
1030  	airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
1031  	airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
1032  
1033  	airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
1034  	airoha_fe_wr(eth, REG_FE_VIP_EN(4),
1035  		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1036  		     PATN_EN_MASK);
1037  
1038  	airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
1039  	airoha_fe_wr(eth, REG_FE_VIP_EN(6),
1040  		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1041  		     PATN_EN_MASK);
1042  
1043  	airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
1044  	airoha_fe_wr(eth, REG_FE_VIP_EN(7),
1045  		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1046  		     PATN_EN_MASK);
1047  
1048  	/* BOOTP (0x43) */
1049  	airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
1050  	airoha_fe_wr(eth, REG_FE_VIP_EN(8),
1051  		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1052  		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1053  
1054  	/* BOOTP (0x44) */
1055  	airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
1056  	airoha_fe_wr(eth, REG_FE_VIP_EN(9),
1057  		     PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
1058  		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1059  
1060  	/* ISAKMP */
1061  	airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
1062  	airoha_fe_wr(eth, REG_FE_VIP_EN(10),
1063  		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1064  		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1065  
1066  	airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
1067  	airoha_fe_wr(eth, REG_FE_VIP_EN(11),
1068  		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1069  		     PATN_EN_MASK);
1070  
1071  	/* DHCPv6 */
1072  	airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
1073  	airoha_fe_wr(eth, REG_FE_VIP_EN(12),
1074  		     PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
1075  		     FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
1076  
1077  	airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
1078  	airoha_fe_wr(eth, REG_FE_VIP_EN(19),
1079  		     PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
1080  		     PATN_EN_MASK);
1081  
1082  	/* ETH->ETH_P_1905 (0x893a) */
1083  	airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
1084  	airoha_fe_wr(eth, REG_FE_VIP_EN(20),
1085  		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
1086  
1087  	airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
1088  	airoha_fe_wr(eth, REG_FE_VIP_EN(21),
1089  		     PATN_FCPU_EN_MASK | PATN_EN_MASK);
1090  }
1091  
airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue)1092  static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
1093  					     u32 port, u32 queue)
1094  {
1095  	u32 val;
1096  
1097  	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1098  		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
1099  		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1100  		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
1101  	val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
1102  
1103  	return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
1104  }
1105  
airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth * eth,u32 port,u32 queue,u32 val)1106  static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
1107  					      u32 port, u32 queue, u32 val)
1108  {
1109  	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
1110  		      FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
1111  	airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
1112  		      PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
1113  		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
1114  		      FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
1115  		      FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
1116  		      PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
1117  }
1118  
airoha_fe_set_pse_oq_rsv(struct airoha_eth * eth,u32 port,u32 queue,u32 val)1119  static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
1120  				    u32 port, u32 queue, u32 val)
1121  {
1122  	u32 orig_val, tmp, all_rsv, fq_limit;
1123  
1124  	airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
1125  
1126  	/* modify all rsv */
1127  	orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
1128  	tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
1129  	all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
1130  	all_rsv += (val - orig_val);
1131  	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
1132  		      FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
1133  
1134  	/* modify hthd */
1135  	tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
1136  	fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
1137  	tmp = fq_limit - all_rsv - 0x20;
1138  	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1139  		      PSE_SHARE_USED_HTHD_MASK,
1140  		      FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
1141  
1142  	tmp = fq_limit - all_rsv - 0x100;
1143  	airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
1144  		      PSE_SHARE_USED_MTHD_MASK,
1145  		      FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
1146  	tmp = (3 * tmp) >> 2;
1147  	airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
1148  		      PSE_SHARE_USED_LTHD_MASK,
1149  		      FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
1150  
1151  	return 0;
1152  }
1153  
airoha_fe_pse_ports_init(struct airoha_eth * eth)1154  static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
1155  {
1156  	const u32 pse_port_num_queues[] = {
1157  		[FE_PSE_PORT_CDM1] = 6,
1158  		[FE_PSE_PORT_GDM1] = 6,
1159  		[FE_PSE_PORT_GDM2] = 32,
1160  		[FE_PSE_PORT_GDM3] = 6,
1161  		[FE_PSE_PORT_PPE1] = 4,
1162  		[FE_PSE_PORT_CDM2] = 6,
1163  		[FE_PSE_PORT_CDM3] = 8,
1164  		[FE_PSE_PORT_CDM4] = 10,
1165  		[FE_PSE_PORT_PPE2] = 4,
1166  		[FE_PSE_PORT_GDM4] = 2,
1167  		[FE_PSE_PORT_CDM5] = 2,
1168  	};
1169  	int q;
1170  
1171  	/* hw misses PPE2 oq rsv */
1172  	airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
1173  		      PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
1174  
1175  	/* CMD1 */
1176  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
1177  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
1178  					 PSE_QUEUE_RSV_PAGES);
1179  	/* GMD1 */
1180  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
1181  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
1182  					 PSE_QUEUE_RSV_PAGES);
1183  	/* GMD2 */
1184  	for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
1185  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
1186  	/* GMD3 */
1187  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
1188  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
1189  					 PSE_QUEUE_RSV_PAGES);
1190  	/* PPE1 */
1191  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
1192  		if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
1193  			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
1194  						 PSE_QUEUE_RSV_PAGES);
1195  		else
1196  			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
1197  	}
1198  	/* CDM2 */
1199  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
1200  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
1201  					 PSE_QUEUE_RSV_PAGES);
1202  	/* CDM3 */
1203  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
1204  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
1205  	/* CDM4 */
1206  	for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
1207  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
1208  					 PSE_QUEUE_RSV_PAGES);
1209  	/* PPE2 */
1210  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
1211  		if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
1212  			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
1213  						 PSE_QUEUE_RSV_PAGES);
1214  		else
1215  			airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
1216  	}
1217  	/* GMD4 */
1218  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
1219  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
1220  					 PSE_QUEUE_RSV_PAGES);
1221  	/* CDM5 */
1222  	for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
1223  		airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
1224  					 PSE_QUEUE_RSV_PAGES);
1225  }
1226  
airoha_fe_mc_vlan_clear(struct airoha_eth * eth)1227  static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
1228  {
1229  	int i;
1230  
1231  	for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
1232  		int err, j;
1233  		u32 val;
1234  
1235  		airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1236  
1237  		val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1238  		      MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
1239  		airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1240  		err = read_poll_timeout(airoha_fe_rr, val,
1241  					val & MC_VLAN_CFG_CMD_DONE_MASK,
1242  					USEC_PER_MSEC, 5 * USEC_PER_MSEC,
1243  					false, eth, REG_MC_VLAN_CFG);
1244  		if (err)
1245  			return err;
1246  
1247  		for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
1248  			airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
1249  
1250  			val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
1251  			      FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
1252  			      MC_VLAN_CFG_RW_MASK;
1253  			airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
1254  			err = read_poll_timeout(airoha_fe_rr, val,
1255  						val & MC_VLAN_CFG_CMD_DONE_MASK,
1256  						USEC_PER_MSEC,
1257  						5 * USEC_PER_MSEC, false, eth,
1258  						REG_MC_VLAN_CFG);
1259  			if (err)
1260  				return err;
1261  		}
1262  	}
1263  
1264  	return 0;
1265  }
1266  
airoha_fe_crsn_qsel_init(struct airoha_eth * eth)1267  static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
1268  {
1269  	/* CDM1_CRSN_QSEL */
1270  	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
1271  		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1272  		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
1273  				 CDM_CRSN_QSEL_Q1));
1274  	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
1275  		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1276  		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
1277  				 CDM_CRSN_QSEL_Q1));
1278  	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
1279  		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1280  		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
1281  				 CDM_CRSN_QSEL_Q1));
1282  	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
1283  		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1284  		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
1285  				 CDM_CRSN_QSEL_Q6));
1286  	airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
1287  		      CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1288  		      FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
1289  				 CDM_CRSN_QSEL_Q1));
1290  	/* CDM2_CRSN_QSEL */
1291  	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
1292  		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1293  		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
1294  				 CDM_CRSN_QSEL_Q1));
1295  	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
1296  		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1297  		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
1298  				 CDM_CRSN_QSEL_Q1));
1299  	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
1300  		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1301  		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
1302  				 CDM_CRSN_QSEL_Q1));
1303  	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
1304  		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1305  		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
1306  				 CDM_CRSN_QSEL_Q6));
1307  	airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
1308  		      CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1309  		      FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
1310  				 CDM_CRSN_QSEL_Q1));
1311  }
1312  
airoha_fe_init(struct airoha_eth * eth)1313  static int airoha_fe_init(struct airoha_eth *eth)
1314  {
1315  	airoha_fe_maccr_init(eth);
1316  
1317  	/* PSE IQ reserve */
1318  	airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
1319  		      FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
1320  	airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
1321  		      PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
1322  		      FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
1323  		      FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
1324  
1325  	/* enable FE copy engine for MC/KA/DPI */
1326  	airoha_fe_wr(eth, REG_FE_PCE_CFG,
1327  		     PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
1328  	/* set vip queue selection to ring 1 */
1329  	airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
1330  		      FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
1331  	airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
1332  		      FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
1333  	/* set GDM4 source interface offset to 8 */
1334  	airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
1335  		      GDM4_SPORT_OFF2_MASK |
1336  		      GDM4_SPORT_OFF1_MASK |
1337  		      GDM4_SPORT_OFF0_MASK,
1338  		      FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
1339  		      FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
1340  		      FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
1341  
1342  	/* set PSE Page as 128B */
1343  	airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
1344  		      FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
1345  		      FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
1346  		      FE_DMA_GLO_PG_SZ_MASK);
1347  	airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
1348  		     FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
1349  		     FE_RST_GDM4_MBI_ARB_MASK);
1350  	usleep_range(1000, 2000);
1351  
1352  	/* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
1353  	 * connect other rings to PSE Port0 OQ-0
1354  	 */
1355  	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
1356  	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
1357  	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
1358  	airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
1359  
1360  	airoha_fe_vip_setup(eth);
1361  	airoha_fe_pse_ports_init(eth);
1362  
1363  	airoha_fe_set(eth, REG_GDM_MISC_CFG,
1364  		      GDM2_RDM_ACK_WAIT_PREF_MASK |
1365  		      GDM2_CHN_VLD_MODE_MASK);
1366  	airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
1367  
1368  	/* init fragment and assemble Force Port */
1369  	/* NPU Core-3, NPU Bridge Channel-3 */
1370  	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1371  		      IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
1372  		      FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
1373  		      FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
1374  	/* QDMA LAN, RX Ring-22 */
1375  	airoha_fe_rmw(eth, REG_IP_FRAG_FP,
1376  		      IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
1377  		      FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
1378  		      FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
1379  
1380  	airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
1381  	airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
1382  
1383  	airoha_fe_crsn_qsel_init(eth);
1384  
1385  	airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
1386  	airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
1387  
1388  	/* default aging mode for mbi unlock issue */
1389  	airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
1390  		      MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
1391  		      FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
1392  		      FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
1393  
1394  	/* disable IFC by default */
1395  	airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
1396  
1397  	/* enable 1:N vlan action, init vlan table */
1398  	airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
1399  
1400  	return airoha_fe_mc_vlan_clear(eth);
1401  }
1402  
airoha_qdma_fill_rx_queue(struct airoha_queue * q)1403  static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
1404  {
1405  	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1406  	struct airoha_qdma *qdma = q->qdma;
1407  	struct airoha_eth *eth = qdma->eth;
1408  	int qid = q - &qdma->q_rx[0];
1409  	int nframes = 0;
1410  
1411  	while (q->queued < q->ndesc - 1) {
1412  		struct airoha_queue_entry *e = &q->entry[q->head];
1413  		struct airoha_qdma_desc *desc = &q->desc[q->head];
1414  		struct page *page;
1415  		int offset;
1416  		u32 val;
1417  
1418  		page = page_pool_dev_alloc_frag(q->page_pool, &offset,
1419  						q->buf_size);
1420  		if (!page)
1421  			break;
1422  
1423  		q->head = (q->head + 1) % q->ndesc;
1424  		q->queued++;
1425  		nframes++;
1426  
1427  		e->buf = page_address(page) + offset;
1428  		e->dma_addr = page_pool_get_dma_addr(page) + offset;
1429  		e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
1430  
1431  		dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
1432  					   dir);
1433  
1434  		val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
1435  		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
1436  		WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
1437  		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
1438  		WRITE_ONCE(desc->data, cpu_to_le32(val));
1439  		WRITE_ONCE(desc->msg0, 0);
1440  		WRITE_ONCE(desc->msg1, 0);
1441  		WRITE_ONCE(desc->msg2, 0);
1442  		WRITE_ONCE(desc->msg3, 0);
1443  
1444  		airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
1445  				RX_RING_CPU_IDX_MASK,
1446  				FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
1447  	}
1448  
1449  	return nframes;
1450  }
1451  
airoha_qdma_get_gdm_port(struct airoha_eth * eth,struct airoha_qdma_desc * desc)1452  static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
1453  				    struct airoha_qdma_desc *desc)
1454  {
1455  	u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
1456  
1457  	sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
1458  	switch (sport) {
1459  	case 0x10 ... 0x13:
1460  		port = 0;
1461  		break;
1462  	case 0x2 ... 0x4:
1463  		port = sport - 1;
1464  		break;
1465  	default:
1466  		return -EINVAL;
1467  	}
1468  
1469  	return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
1470  }
1471  
airoha_qdma_rx_process(struct airoha_queue * q,int budget)1472  static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
1473  {
1474  	enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1475  	struct airoha_qdma *qdma = q->qdma;
1476  	struct airoha_eth *eth = qdma->eth;
1477  	int qid = q - &qdma->q_rx[0];
1478  	int done = 0;
1479  
1480  	while (done < budget) {
1481  		struct airoha_queue_entry *e = &q->entry[q->tail];
1482  		struct airoha_qdma_desc *desc = &q->desc[q->tail];
1483  		dma_addr_t dma_addr = le32_to_cpu(desc->addr);
1484  		u32 desc_ctrl = le32_to_cpu(desc->ctrl);
1485  		struct sk_buff *skb;
1486  		int len, p;
1487  
1488  		if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
1489  			break;
1490  
1491  		if (!dma_addr)
1492  			break;
1493  
1494  		len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
1495  		if (!len)
1496  			break;
1497  
1498  		q->tail = (q->tail + 1) % q->ndesc;
1499  		q->queued--;
1500  
1501  		dma_sync_single_for_cpu(eth->dev, dma_addr,
1502  					SKB_WITH_OVERHEAD(q->buf_size), dir);
1503  
1504  		p = airoha_qdma_get_gdm_port(eth, desc);
1505  		if (p < 0 || !eth->ports[p]) {
1506  			page_pool_put_full_page(q->page_pool,
1507  						virt_to_head_page(e->buf),
1508  						true);
1509  			continue;
1510  		}
1511  
1512  		skb = napi_build_skb(e->buf, q->buf_size);
1513  		if (!skb) {
1514  			page_pool_put_full_page(q->page_pool,
1515  						virt_to_head_page(e->buf),
1516  						true);
1517  			break;
1518  		}
1519  
1520  		skb_reserve(skb, 2);
1521  		__skb_put(skb, len);
1522  		skb_mark_for_recycle(skb);
1523  		skb->dev = eth->ports[p]->dev;
1524  		skb->protocol = eth_type_trans(skb, skb->dev);
1525  		skb->ip_summed = CHECKSUM_UNNECESSARY;
1526  		skb_record_rx_queue(skb, qid);
1527  		napi_gro_receive(&q->napi, skb);
1528  
1529  		done++;
1530  	}
1531  	airoha_qdma_fill_rx_queue(q);
1532  
1533  	return done;
1534  }
1535  
airoha_qdma_rx_napi_poll(struct napi_struct * napi,int budget)1536  static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
1537  {
1538  	struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
1539  	int cur, done = 0;
1540  
1541  	do {
1542  		cur = airoha_qdma_rx_process(q, budget - done);
1543  		done += cur;
1544  	} while (cur && done < budget);
1545  
1546  	if (done < budget && napi_complete(napi))
1547  		airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
1548  				       RX_DONE_INT_MASK);
1549  
1550  	return done;
1551  }
1552  
airoha_qdma_init_rx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int ndesc)1553  static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
1554  				     struct airoha_qdma *qdma, int ndesc)
1555  {
1556  	const struct page_pool_params pp_params = {
1557  		.order = 0,
1558  		.pool_size = 256,
1559  		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1560  		.dma_dir = DMA_FROM_DEVICE,
1561  		.max_len = PAGE_SIZE,
1562  		.nid = NUMA_NO_NODE,
1563  		.dev = qdma->eth->dev,
1564  		.napi = &q->napi,
1565  	};
1566  	struct airoha_eth *eth = qdma->eth;
1567  	int qid = q - &qdma->q_rx[0], thr;
1568  	dma_addr_t dma_addr;
1569  
1570  	q->buf_size = PAGE_SIZE / 2;
1571  	q->ndesc = ndesc;
1572  	q->qdma = qdma;
1573  
1574  	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1575  				GFP_KERNEL);
1576  	if (!q->entry)
1577  		return -ENOMEM;
1578  
1579  	q->page_pool = page_pool_create(&pp_params);
1580  	if (IS_ERR(q->page_pool)) {
1581  		int err = PTR_ERR(q->page_pool);
1582  
1583  		q->page_pool = NULL;
1584  		return err;
1585  	}
1586  
1587  	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1588  				      &dma_addr, GFP_KERNEL);
1589  	if (!q->desc)
1590  		return -ENOMEM;
1591  
1592  	netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
1593  
1594  	airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
1595  	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
1596  			RX_RING_SIZE_MASK,
1597  			FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
1598  
1599  	thr = clamp(ndesc >> 3, 1, 32);
1600  	airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
1601  			FIELD_PREP(RX_RING_THR_MASK, thr));
1602  	airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
1603  			FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
1604  
1605  	airoha_qdma_fill_rx_queue(q);
1606  
1607  	return 0;
1608  }
1609  
airoha_qdma_cleanup_rx_queue(struct airoha_queue * q)1610  static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
1611  {
1612  	struct airoha_eth *eth = q->qdma->eth;
1613  
1614  	while (q->queued) {
1615  		struct airoha_queue_entry *e = &q->entry[q->tail];
1616  		struct page *page = virt_to_head_page(e->buf);
1617  
1618  		dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
1619  					page_pool_get_dma_dir(q->page_pool));
1620  		page_pool_put_full_page(q->page_pool, page, false);
1621  		q->tail = (q->tail + 1) % q->ndesc;
1622  		q->queued--;
1623  	}
1624  }
1625  
airoha_qdma_init_rx(struct airoha_qdma * qdma)1626  static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
1627  {
1628  	int i;
1629  
1630  	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1631  		int err;
1632  
1633  		if (!(RX_DONE_INT_MASK & BIT(i))) {
1634  			/* rx-queue not binded to irq */
1635  			continue;
1636  		}
1637  
1638  		err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
1639  						RX_DSCP_NUM(i));
1640  		if (err)
1641  			return err;
1642  	}
1643  
1644  	return 0;
1645  }
1646  
airoha_qdma_tx_napi_poll(struct napi_struct * napi,int budget)1647  static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1648  {
1649  	struct airoha_tx_irq_queue *irq_q;
1650  	struct airoha_qdma *qdma;
1651  	struct airoha_eth *eth;
1652  	int id, done = 0;
1653  
1654  	irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
1655  	qdma = irq_q->qdma;
1656  	id = irq_q - &qdma->q_tx_irq[0];
1657  	eth = qdma->eth;
1658  
1659  	while (irq_q->queued > 0 && done < budget) {
1660  		u32 qid, last, val = irq_q->q[irq_q->head];
1661  		struct airoha_queue *q;
1662  
1663  		if (val == 0xff)
1664  			break;
1665  
1666  		irq_q->q[irq_q->head] = 0xff; /* mark as done */
1667  		irq_q->head = (irq_q->head + 1) % irq_q->size;
1668  		irq_q->queued--;
1669  		done++;
1670  
1671  		last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
1672  		qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
1673  
1674  		if (qid >= ARRAY_SIZE(qdma->q_tx))
1675  			continue;
1676  
1677  		q = &qdma->q_tx[qid];
1678  		if (!q->ndesc)
1679  			continue;
1680  
1681  		spin_lock_bh(&q->lock);
1682  
1683  		while (q->queued > 0) {
1684  			struct airoha_qdma_desc *desc = &q->desc[q->tail];
1685  			struct airoha_queue_entry *e = &q->entry[q->tail];
1686  			u32 desc_ctrl = le32_to_cpu(desc->ctrl);
1687  			struct sk_buff *skb = e->skb;
1688  			u16 index = q->tail;
1689  
1690  			if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
1691  			    !(desc_ctrl & QDMA_DESC_DROP_MASK))
1692  				break;
1693  
1694  			q->tail = (q->tail + 1) % q->ndesc;
1695  			q->queued--;
1696  
1697  			dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1698  					 DMA_TO_DEVICE);
1699  
1700  			WRITE_ONCE(desc->msg0, 0);
1701  			WRITE_ONCE(desc->msg1, 0);
1702  
1703  			if (skb) {
1704  				struct netdev_queue *txq;
1705  
1706  				txq = netdev_get_tx_queue(skb->dev, qid);
1707  				if (netif_tx_queue_stopped(txq) &&
1708  				    q->ndesc - q->queued >= q->free_thr)
1709  					netif_tx_wake_queue(txq);
1710  
1711  				dev_kfree_skb_any(skb);
1712  				e->skb = NULL;
1713  			}
1714  
1715  			if (index == last)
1716  				break;
1717  		}
1718  
1719  		spin_unlock_bh(&q->lock);
1720  	}
1721  
1722  	if (done) {
1723  		int i, len = done >> 7;
1724  
1725  		for (i = 0; i < len; i++)
1726  			airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1727  					IRQ_CLEAR_LEN_MASK, 0x80);
1728  		airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
1729  				IRQ_CLEAR_LEN_MASK, (done & 0x7f));
1730  	}
1731  
1732  	if (done < budget && napi_complete(napi))
1733  		airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
1734  				       TX_DONE_INT_MASK(id));
1735  
1736  	return done;
1737  }
1738  
airoha_qdma_init_tx_queue(struct airoha_queue * q,struct airoha_qdma * qdma,int size)1739  static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
1740  				     struct airoha_qdma *qdma, int size)
1741  {
1742  	struct airoha_eth *eth = qdma->eth;
1743  	int i, qid = q - &qdma->q_tx[0];
1744  	dma_addr_t dma_addr;
1745  
1746  	spin_lock_init(&q->lock);
1747  	q->ndesc = size;
1748  	q->qdma = qdma;
1749  	q->free_thr = 1 + MAX_SKB_FRAGS;
1750  
1751  	q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
1752  				GFP_KERNEL);
1753  	if (!q->entry)
1754  		return -ENOMEM;
1755  
1756  	q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
1757  				      &dma_addr, GFP_KERNEL);
1758  	if (!q->desc)
1759  		return -ENOMEM;
1760  
1761  	for (i = 0; i < q->ndesc; i++) {
1762  		u32 val;
1763  
1764  		val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
1765  		WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
1766  	}
1767  
1768  	airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
1769  	airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
1770  			FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
1771  	airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
1772  			FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
1773  
1774  	return 0;
1775  }
1776  
airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue * irq_q,struct airoha_qdma * qdma,int size)1777  static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
1778  				   struct airoha_qdma *qdma, int size)
1779  {
1780  	int id = irq_q - &qdma->q_tx_irq[0];
1781  	struct airoha_eth *eth = qdma->eth;
1782  	dma_addr_t dma_addr;
1783  
1784  	netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
1785  			  airoha_qdma_tx_napi_poll);
1786  	irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
1787  				       &dma_addr, GFP_KERNEL);
1788  	if (!irq_q->q)
1789  		return -ENOMEM;
1790  
1791  	memset(irq_q->q, 0xff, size * sizeof(u32));
1792  	irq_q->size = size;
1793  	irq_q->qdma = qdma;
1794  
1795  	airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
1796  	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
1797  			FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
1798  	airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
1799  			FIELD_PREP(TX_IRQ_THR_MASK, 1));
1800  
1801  	return 0;
1802  }
1803  
airoha_qdma_init_tx(struct airoha_qdma * qdma)1804  static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
1805  {
1806  	int i, err;
1807  
1808  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1809  		err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
1810  					      IRQ_QUEUE_LEN(i));
1811  		if (err)
1812  			return err;
1813  	}
1814  
1815  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1816  		err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
1817  						TX_DSCP_NUM);
1818  		if (err)
1819  			return err;
1820  	}
1821  
1822  	return 0;
1823  }
1824  
airoha_qdma_cleanup_tx_queue(struct airoha_queue * q)1825  static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
1826  {
1827  	struct airoha_eth *eth = q->qdma->eth;
1828  
1829  	spin_lock_bh(&q->lock);
1830  	while (q->queued) {
1831  		struct airoha_queue_entry *e = &q->entry[q->tail];
1832  
1833  		dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
1834  				 DMA_TO_DEVICE);
1835  		dev_kfree_skb_any(e->skb);
1836  		e->skb = NULL;
1837  
1838  		q->tail = (q->tail + 1) % q->ndesc;
1839  		q->queued--;
1840  	}
1841  	spin_unlock_bh(&q->lock);
1842  }
1843  
airoha_qdma_init_hfwd_queues(struct airoha_qdma * qdma)1844  static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
1845  {
1846  	struct airoha_eth *eth = qdma->eth;
1847  	dma_addr_t dma_addr;
1848  	u32 status;
1849  	int size;
1850  
1851  	size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
1852  	qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1853  					      GFP_KERNEL);
1854  	if (!qdma->hfwd.desc)
1855  		return -ENOMEM;
1856  
1857  	airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
1858  
1859  	size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
1860  	qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1861  					   GFP_KERNEL);
1862  	if (!qdma->hfwd.q)
1863  		return -ENOMEM;
1864  
1865  	airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
1866  
1867  	airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
1868  			HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
1869  			FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
1870  	airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
1871  			FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
1872  	airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
1873  			LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
1874  			HW_FWD_DESC_NUM_MASK,
1875  			FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
1876  			LMGR_INIT_START);
1877  
1878  	return read_poll_timeout(airoha_qdma_rr, status,
1879  				 !(status & LMGR_INIT_START), USEC_PER_MSEC,
1880  				 30 * USEC_PER_MSEC, true, qdma,
1881  				 REG_LMGR_INIT_CFG);
1882  }
1883  
airoha_qdma_init_qos(struct airoha_qdma * qdma)1884  static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
1885  {
1886  	airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
1887  	airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
1888  
1889  	airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
1890  			  PSE_BUF_ESTIMATE_EN_MASK);
1891  
1892  	airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
1893  			EGRESS_RATE_METER_EN_MASK |
1894  			EGRESS_RATE_METER_EQ_RATE_EN_MASK);
1895  	/* 2047us x 31 = 63.457ms */
1896  	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1897  			EGRESS_RATE_METER_WINDOW_SZ_MASK,
1898  			FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
1899  	airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
1900  			EGRESS_RATE_METER_TIMESLICE_MASK,
1901  			FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
1902  
1903  	/* ratelimit init */
1904  	airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
1905  	/* fast-tick 25us */
1906  	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
1907  			FIELD_PREP(GLB_FAST_TICK_MASK, 25));
1908  	airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
1909  			FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
1910  
1911  	airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
1912  	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
1913  			FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
1914  	airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
1915  			EGRESS_SLOW_TICK_RATIO_MASK,
1916  			FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
1917  
1918  	airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
1919  	airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
1920  			  INGRESS_TRTCM_MODE_MASK);
1921  	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
1922  			FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
1923  	airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
1924  			INGRESS_SLOW_TICK_RATIO_MASK,
1925  			FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
1926  
1927  	airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
1928  	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
1929  			FIELD_PREP(SLA_FAST_TICK_MASK, 25));
1930  	airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
1931  			FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
1932  }
1933  
airoha_qdma_hw_init(struct airoha_qdma * qdma)1934  static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
1935  {
1936  	int i;
1937  
1938  	/* clear pending irqs */
1939  	for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
1940  		airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1941  
1942  	/* setup irqs */
1943  	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
1944  	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
1945  	airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
1946  
1947  	/* setup irq binding */
1948  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1949  		if (!qdma->q_tx[i].ndesc)
1950  			continue;
1951  
1952  		if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
1953  			airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
1954  					TX_RING_IRQ_BLOCKING_CFG_MASK);
1955  		else
1956  			airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
1957  					  TX_RING_IRQ_BLOCKING_CFG_MASK);
1958  	}
1959  
1960  	airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
1961  		       GLOBAL_CFG_RX_2B_OFFSET_MASK |
1962  		       FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
1963  		       GLOBAL_CFG_CPU_TXR_RR_MASK |
1964  		       GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
1965  		       GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
1966  		       GLOBAL_CFG_MULTICAST_EN_MASK |
1967  		       GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
1968  		       GLOBAL_CFG_TX_WB_DONE_MASK |
1969  		       FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
1970  
1971  	airoha_qdma_init_qos(qdma);
1972  
1973  	/* disable qdma rx delay interrupt */
1974  	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1975  		if (!qdma->q_rx[i].ndesc)
1976  			continue;
1977  
1978  		airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
1979  				  RX_DELAY_INT_MASK);
1980  	}
1981  
1982  	airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
1983  			TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
1984  
1985  	return 0;
1986  }
1987  
airoha_irq_handler(int irq,void * dev_instance)1988  static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
1989  {
1990  	struct airoha_qdma *qdma = dev_instance;
1991  	u32 intr[ARRAY_SIZE(qdma->irqmask)];
1992  	int i;
1993  
1994  	for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
1995  		intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
1996  		intr[i] &= qdma->irqmask[i];
1997  		airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
1998  	}
1999  
2000  	if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
2001  		return IRQ_NONE;
2002  
2003  	if (intr[1] & RX_DONE_INT_MASK) {
2004  		airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
2005  					RX_DONE_INT_MASK);
2006  
2007  		for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2008  			if (!qdma->q_rx[i].ndesc)
2009  				continue;
2010  
2011  			if (intr[1] & BIT(i))
2012  				napi_schedule(&qdma->q_rx[i].napi);
2013  		}
2014  	}
2015  
2016  	if (intr[0] & INT_TX_MASK) {
2017  		for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2018  			struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
2019  			u32 status, head;
2020  
2021  			if (!(intr[0] & TX_DONE_INT_MASK(i)))
2022  				continue;
2023  
2024  			airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
2025  						TX_DONE_INT_MASK(i));
2026  
2027  			status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
2028  			head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
2029  			irq_q->head = head % irq_q->size;
2030  			irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
2031  
2032  			napi_schedule(&qdma->q_tx_irq[i].napi);
2033  		}
2034  	}
2035  
2036  	return IRQ_HANDLED;
2037  }
2038  
airoha_qdma_init(struct platform_device * pdev,struct airoha_eth * eth,struct airoha_qdma * qdma)2039  static int airoha_qdma_init(struct platform_device *pdev,
2040  			    struct airoha_eth *eth,
2041  			    struct airoha_qdma *qdma)
2042  {
2043  	int err, id = qdma - &eth->qdma[0];
2044  	const char *res;
2045  
2046  	spin_lock_init(&qdma->irq_lock);
2047  	qdma->eth = eth;
2048  
2049  	res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
2050  	if (!res)
2051  		return -ENOMEM;
2052  
2053  	qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
2054  	if (IS_ERR(qdma->regs))
2055  		return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
2056  				     "failed to iomap qdma%d regs\n", id);
2057  
2058  	qdma->irq = platform_get_irq(pdev, 4 * id);
2059  	if (qdma->irq < 0)
2060  		return qdma->irq;
2061  
2062  	err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
2063  			       IRQF_SHARED, KBUILD_MODNAME, qdma);
2064  	if (err)
2065  		return err;
2066  
2067  	err = airoha_qdma_init_rx(qdma);
2068  	if (err)
2069  		return err;
2070  
2071  	err = airoha_qdma_init_tx(qdma);
2072  	if (err)
2073  		return err;
2074  
2075  	err = airoha_qdma_init_hfwd_queues(qdma);
2076  	if (err)
2077  		return err;
2078  
2079  	return airoha_qdma_hw_init(qdma);
2080  }
2081  
airoha_hw_init(struct platform_device * pdev,struct airoha_eth * eth)2082  static int airoha_hw_init(struct platform_device *pdev,
2083  			  struct airoha_eth *eth)
2084  {
2085  	int err, i;
2086  
2087  	/* disable xsi */
2088  	err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
2089  					eth->xsi_rsts);
2090  	if (err)
2091  		return err;
2092  
2093  	err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
2094  	if (err)
2095  		return err;
2096  
2097  	msleep(20);
2098  	err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
2099  	if (err)
2100  		return err;
2101  
2102  	msleep(20);
2103  	err = airoha_fe_init(eth);
2104  	if (err)
2105  		return err;
2106  
2107  	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
2108  		err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
2109  		if (err)
2110  			return err;
2111  	}
2112  
2113  	set_bit(DEV_STATE_INITIALIZED, &eth->state);
2114  
2115  	return 0;
2116  }
2117  
airoha_hw_cleanup(struct airoha_qdma * qdma)2118  static void airoha_hw_cleanup(struct airoha_qdma *qdma)
2119  {
2120  	int i;
2121  
2122  	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2123  		if (!qdma->q_rx[i].ndesc)
2124  			continue;
2125  
2126  		napi_disable(&qdma->q_rx[i].napi);
2127  		netif_napi_del(&qdma->q_rx[i].napi);
2128  		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
2129  		if (qdma->q_rx[i].page_pool)
2130  			page_pool_destroy(qdma->q_rx[i].page_pool);
2131  	}
2132  
2133  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2134  		napi_disable(&qdma->q_tx_irq[i].napi);
2135  		netif_napi_del(&qdma->q_tx_irq[i].napi);
2136  	}
2137  
2138  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2139  		if (!qdma->q_tx[i].ndesc)
2140  			continue;
2141  
2142  		airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
2143  	}
2144  }
2145  
airoha_qdma_start_napi(struct airoha_qdma * qdma)2146  static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
2147  {
2148  	int i;
2149  
2150  	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2151  		napi_enable(&qdma->q_tx_irq[i].napi);
2152  
2153  	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2154  		if (!qdma->q_rx[i].ndesc)
2155  			continue;
2156  
2157  		napi_enable(&qdma->q_rx[i].napi);
2158  	}
2159  }
2160  
airoha_update_hw_stats(struct airoha_gdm_port * port)2161  static void airoha_update_hw_stats(struct airoha_gdm_port *port)
2162  {
2163  	struct airoha_eth *eth = port->qdma->eth;
2164  	u32 val, i = 0;
2165  
2166  	spin_lock(&port->stats.lock);
2167  	u64_stats_update_begin(&port->stats.syncp);
2168  
2169  	/* TX */
2170  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
2171  	port->stats.tx_ok_pkts += ((u64)val << 32);
2172  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
2173  	port->stats.tx_ok_pkts += val;
2174  
2175  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
2176  	port->stats.tx_ok_bytes += ((u64)val << 32);
2177  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
2178  	port->stats.tx_ok_bytes += val;
2179  
2180  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
2181  	port->stats.tx_drops += val;
2182  
2183  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
2184  	port->stats.tx_broadcast += val;
2185  
2186  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
2187  	port->stats.tx_multicast += val;
2188  
2189  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
2190  	port->stats.tx_len[i] += val;
2191  
2192  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
2193  	port->stats.tx_len[i] += ((u64)val << 32);
2194  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
2195  	port->stats.tx_len[i++] += val;
2196  
2197  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
2198  	port->stats.tx_len[i] += ((u64)val << 32);
2199  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
2200  	port->stats.tx_len[i++] += val;
2201  
2202  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
2203  	port->stats.tx_len[i] += ((u64)val << 32);
2204  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
2205  	port->stats.tx_len[i++] += val;
2206  
2207  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
2208  	port->stats.tx_len[i] += ((u64)val << 32);
2209  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
2210  	port->stats.tx_len[i++] += val;
2211  
2212  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
2213  	port->stats.tx_len[i] += ((u64)val << 32);
2214  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
2215  	port->stats.tx_len[i++] += val;
2216  
2217  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
2218  	port->stats.tx_len[i] += ((u64)val << 32);
2219  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
2220  	port->stats.tx_len[i++] += val;
2221  
2222  	val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
2223  	port->stats.tx_len[i++] += val;
2224  
2225  	/* RX */
2226  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
2227  	port->stats.rx_ok_pkts += ((u64)val << 32);
2228  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
2229  	port->stats.rx_ok_pkts += val;
2230  
2231  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
2232  	port->stats.rx_ok_bytes += ((u64)val << 32);
2233  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
2234  	port->stats.rx_ok_bytes += val;
2235  
2236  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
2237  	port->stats.rx_drops += val;
2238  
2239  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
2240  	port->stats.rx_broadcast += val;
2241  
2242  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
2243  	port->stats.rx_multicast += val;
2244  
2245  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
2246  	port->stats.rx_errors += val;
2247  
2248  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
2249  	port->stats.rx_crc_error += val;
2250  
2251  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
2252  	port->stats.rx_over_errors += val;
2253  
2254  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
2255  	port->stats.rx_fragment += val;
2256  
2257  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
2258  	port->stats.rx_jabber += val;
2259  
2260  	i = 0;
2261  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
2262  	port->stats.rx_len[i] += val;
2263  
2264  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
2265  	port->stats.rx_len[i] += ((u64)val << 32);
2266  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
2267  	port->stats.rx_len[i++] += val;
2268  
2269  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
2270  	port->stats.rx_len[i] += ((u64)val << 32);
2271  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
2272  	port->stats.rx_len[i++] += val;
2273  
2274  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
2275  	port->stats.rx_len[i] += ((u64)val << 32);
2276  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
2277  	port->stats.rx_len[i++] += val;
2278  
2279  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
2280  	port->stats.rx_len[i] += ((u64)val << 32);
2281  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
2282  	port->stats.rx_len[i++] += val;
2283  
2284  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
2285  	port->stats.rx_len[i] += ((u64)val << 32);
2286  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
2287  	port->stats.rx_len[i++] += val;
2288  
2289  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
2290  	port->stats.rx_len[i] += ((u64)val << 32);
2291  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
2292  	port->stats.rx_len[i++] += val;
2293  
2294  	val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
2295  	port->stats.rx_len[i++] += val;
2296  
2297  	/* reset mib counters */
2298  	airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
2299  		      FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
2300  
2301  	u64_stats_update_end(&port->stats.syncp);
2302  	spin_unlock(&port->stats.lock);
2303  }
2304  
airoha_dev_open(struct net_device * dev)2305  static int airoha_dev_open(struct net_device *dev)
2306  {
2307  	struct airoha_gdm_port *port = netdev_priv(dev);
2308  	struct airoha_qdma *qdma = port->qdma;
2309  	int err;
2310  
2311  	netif_tx_start_all_queues(dev);
2312  	err = airoha_set_gdm_ports(qdma->eth, true);
2313  	if (err)
2314  		return err;
2315  
2316  	if (netdev_uses_dsa(dev))
2317  		airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2318  			      GDM_STAG_EN_MASK);
2319  	else
2320  		airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
2321  				GDM_STAG_EN_MASK);
2322  
2323  	airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
2324  			GLOBAL_CFG_TX_DMA_EN_MASK |
2325  			GLOBAL_CFG_RX_DMA_EN_MASK);
2326  
2327  	return 0;
2328  }
2329  
airoha_dev_stop(struct net_device * dev)2330  static int airoha_dev_stop(struct net_device *dev)
2331  {
2332  	struct airoha_gdm_port *port = netdev_priv(dev);
2333  	struct airoha_qdma *qdma = port->qdma;
2334  	int err;
2335  
2336  	netif_tx_disable(dev);
2337  	err = airoha_set_gdm_ports(qdma->eth, false);
2338  	if (err)
2339  		return err;
2340  
2341  	airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
2342  			  GLOBAL_CFG_TX_DMA_EN_MASK |
2343  			  GLOBAL_CFG_RX_DMA_EN_MASK);
2344  
2345  	return 0;
2346  }
2347  
airoha_dev_set_macaddr(struct net_device * dev,void * p)2348  static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
2349  {
2350  	struct airoha_gdm_port *port = netdev_priv(dev);
2351  	int err;
2352  
2353  	err = eth_mac_addr(dev, p);
2354  	if (err)
2355  		return err;
2356  
2357  	airoha_set_macaddr(port, dev->dev_addr);
2358  
2359  	return 0;
2360  }
2361  
airoha_dev_init(struct net_device * dev)2362  static int airoha_dev_init(struct net_device *dev)
2363  {
2364  	struct airoha_gdm_port *port = netdev_priv(dev);
2365  
2366  	airoha_set_macaddr(port, dev->dev_addr);
2367  
2368  	return 0;
2369  }
2370  
airoha_dev_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)2371  static void airoha_dev_get_stats64(struct net_device *dev,
2372  				   struct rtnl_link_stats64 *storage)
2373  {
2374  	struct airoha_gdm_port *port = netdev_priv(dev);
2375  	unsigned int start;
2376  
2377  	airoha_update_hw_stats(port);
2378  	do {
2379  		start = u64_stats_fetch_begin(&port->stats.syncp);
2380  		storage->rx_packets = port->stats.rx_ok_pkts;
2381  		storage->tx_packets = port->stats.tx_ok_pkts;
2382  		storage->rx_bytes = port->stats.rx_ok_bytes;
2383  		storage->tx_bytes = port->stats.tx_ok_bytes;
2384  		storage->multicast = port->stats.rx_multicast;
2385  		storage->rx_errors = port->stats.rx_errors;
2386  		storage->rx_dropped = port->stats.rx_drops;
2387  		storage->tx_dropped = port->stats.tx_drops;
2388  		storage->rx_crc_errors = port->stats.rx_crc_error;
2389  		storage->rx_over_errors = port->stats.rx_over_errors;
2390  	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2391  }
2392  
airoha_dev_xmit(struct sk_buff * skb,struct net_device * dev)2393  static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
2394  				   struct net_device *dev)
2395  {
2396  	struct skb_shared_info *sinfo = skb_shinfo(skb);
2397  	struct airoha_gdm_port *port = netdev_priv(dev);
2398  	u32 msg0 = 0, msg1, len = skb_headlen(skb);
2399  	int i, qid = skb_get_queue_mapping(skb);
2400  	struct airoha_qdma *qdma = port->qdma;
2401  	u32 nr_frags = 1 + sinfo->nr_frags;
2402  	struct netdev_queue *txq;
2403  	struct airoha_queue *q;
2404  	void *data = skb->data;
2405  	u16 index;
2406  	u8 fport;
2407  
2408  	if (skb->ip_summed == CHECKSUM_PARTIAL)
2409  		msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
2410  			FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
2411  			FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
2412  
2413  	/* TSO: fill MSS info in tcp checksum field */
2414  	if (skb_is_gso(skb)) {
2415  		if (skb_cow_head(skb, 0))
2416  			goto error;
2417  
2418  		if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2419  			__be16 csum = cpu_to_be16(sinfo->gso_size);
2420  
2421  			tcp_hdr(skb)->check = (__force __sum16)csum;
2422  			msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
2423  		}
2424  	}
2425  
2426  	fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
2427  	msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
2428  	       FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
2429  
2430  	q = &qdma->q_tx[qid];
2431  	if (WARN_ON_ONCE(!q->ndesc))
2432  		goto error;
2433  
2434  	spin_lock_bh(&q->lock);
2435  
2436  	txq = netdev_get_tx_queue(dev, qid);
2437  	if (q->queued + nr_frags > q->ndesc) {
2438  		/* not enough space in the queue */
2439  		netif_tx_stop_queue(txq);
2440  		spin_unlock_bh(&q->lock);
2441  		return NETDEV_TX_BUSY;
2442  	}
2443  
2444  	index = q->head;
2445  	for (i = 0; i < nr_frags; i++) {
2446  		struct airoha_qdma_desc *desc = &q->desc[index];
2447  		struct airoha_queue_entry *e = &q->entry[index];
2448  		skb_frag_t *frag = &sinfo->frags[i];
2449  		dma_addr_t addr;
2450  		u32 val;
2451  
2452  		addr = dma_map_single(dev->dev.parent, data, len,
2453  				      DMA_TO_DEVICE);
2454  		if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
2455  			goto error_unmap;
2456  
2457  		index = (index + 1) % q->ndesc;
2458  
2459  		val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
2460  		if (i < nr_frags - 1)
2461  			val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
2462  		WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
2463  		WRITE_ONCE(desc->addr, cpu_to_le32(addr));
2464  		val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
2465  		WRITE_ONCE(desc->data, cpu_to_le32(val));
2466  		WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
2467  		WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
2468  		WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
2469  
2470  		e->skb = i ? NULL : skb;
2471  		e->dma_addr = addr;
2472  		e->dma_len = len;
2473  
2474  		data = skb_frag_address(frag);
2475  		len = skb_frag_size(frag);
2476  	}
2477  
2478  	q->head = index;
2479  	q->queued += i;
2480  
2481  	skb_tx_timestamp(skb);
2482  	if (!netdev_xmit_more())
2483  		airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
2484  				TX_RING_CPU_IDX_MASK,
2485  				FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
2486  
2487  	if (q->ndesc - q->queued < q->free_thr)
2488  		netif_tx_stop_queue(txq);
2489  
2490  	spin_unlock_bh(&q->lock);
2491  
2492  	return NETDEV_TX_OK;
2493  
2494  error_unmap:
2495  	for (i--; i >= 0; i--) {
2496  		index = (q->head + i) % q->ndesc;
2497  		dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
2498  				 q->entry[index].dma_len, DMA_TO_DEVICE);
2499  	}
2500  
2501  	spin_unlock_bh(&q->lock);
2502  error:
2503  	dev_kfree_skb_any(skb);
2504  	dev->stats.tx_dropped++;
2505  
2506  	return NETDEV_TX_OK;
2507  }
2508  
airoha_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2509  static void airoha_ethtool_get_drvinfo(struct net_device *dev,
2510  				       struct ethtool_drvinfo *info)
2511  {
2512  	struct airoha_gdm_port *port = netdev_priv(dev);
2513  	struct airoha_eth *eth = port->qdma->eth;
2514  
2515  	strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
2516  	strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
2517  }
2518  
airoha_ethtool_get_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * stats)2519  static void airoha_ethtool_get_mac_stats(struct net_device *dev,
2520  					 struct ethtool_eth_mac_stats *stats)
2521  {
2522  	struct airoha_gdm_port *port = netdev_priv(dev);
2523  	unsigned int start;
2524  
2525  	airoha_update_hw_stats(port);
2526  	do {
2527  		start = u64_stats_fetch_begin(&port->stats.syncp);
2528  		stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
2529  		stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
2530  		stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
2531  	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2532  }
2533  
2534  static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
2535  	{    0,    64 },
2536  	{   65,   127 },
2537  	{  128,   255 },
2538  	{  256,   511 },
2539  	{  512,  1023 },
2540  	{ 1024,  1518 },
2541  	{ 1519, 10239 },
2542  	{},
2543  };
2544  
2545  static void
airoha_ethtool_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * stats,const struct ethtool_rmon_hist_range ** ranges)2546  airoha_ethtool_get_rmon_stats(struct net_device *dev,
2547  			      struct ethtool_rmon_stats *stats,
2548  			      const struct ethtool_rmon_hist_range **ranges)
2549  {
2550  	struct airoha_gdm_port *port = netdev_priv(dev);
2551  	struct airoha_hw_stats *hw_stats = &port->stats;
2552  	unsigned int start;
2553  
2554  	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2555  		     ARRAY_SIZE(hw_stats->tx_len) + 1);
2556  	BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
2557  		     ARRAY_SIZE(hw_stats->rx_len) + 1);
2558  
2559  	*ranges = airoha_ethtool_rmon_ranges;
2560  	airoha_update_hw_stats(port);
2561  	do {
2562  		int i;
2563  
2564  		start = u64_stats_fetch_begin(&port->stats.syncp);
2565  		stats->fragments = hw_stats->rx_fragment;
2566  		stats->jabbers = hw_stats->rx_jabber;
2567  		for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
2568  		     i++) {
2569  			stats->hist[i] = hw_stats->rx_len[i];
2570  			stats->hist_tx[i] = hw_stats->tx_len[i];
2571  		}
2572  	} while (u64_stats_fetch_retry(&port->stats.syncp, start));
2573  }
2574  
2575  static const struct net_device_ops airoha_netdev_ops = {
2576  	.ndo_init		= airoha_dev_init,
2577  	.ndo_open		= airoha_dev_open,
2578  	.ndo_stop		= airoha_dev_stop,
2579  	.ndo_start_xmit		= airoha_dev_xmit,
2580  	.ndo_get_stats64        = airoha_dev_get_stats64,
2581  	.ndo_set_mac_address	= airoha_dev_set_macaddr,
2582  };
2583  
2584  static const struct ethtool_ops airoha_ethtool_ops = {
2585  	.get_drvinfo		= airoha_ethtool_get_drvinfo,
2586  	.get_eth_mac_stats      = airoha_ethtool_get_mac_stats,
2587  	.get_rmon_stats		= airoha_ethtool_get_rmon_stats,
2588  };
2589  
airoha_alloc_gdm_port(struct airoha_eth * eth,struct device_node * np)2590  static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
2591  {
2592  	const __be32 *id_ptr = of_get_property(np, "reg", NULL);
2593  	struct airoha_gdm_port *port;
2594  	struct airoha_qdma *qdma;
2595  	struct net_device *dev;
2596  	int err, index;
2597  	u32 id;
2598  
2599  	if (!id_ptr) {
2600  		dev_err(eth->dev, "missing gdm port id\n");
2601  		return -EINVAL;
2602  	}
2603  
2604  	id = be32_to_cpup(id_ptr);
2605  	index = id - 1;
2606  
2607  	if (!id || id > ARRAY_SIZE(eth->ports)) {
2608  		dev_err(eth->dev, "invalid gdm port id: %d\n", id);
2609  		return -EINVAL;
2610  	}
2611  
2612  	if (eth->ports[index]) {
2613  		dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
2614  		return -EINVAL;
2615  	}
2616  
2617  	dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
2618  				      AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
2619  	if (!dev) {
2620  		dev_err(eth->dev, "alloc_etherdev failed\n");
2621  		return -ENOMEM;
2622  	}
2623  
2624  	qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
2625  	dev->netdev_ops = &airoha_netdev_ops;
2626  	dev->ethtool_ops = &airoha_ethtool_ops;
2627  	dev->max_mtu = AIROHA_MAX_MTU;
2628  	dev->watchdog_timeo = 5 * HZ;
2629  	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2630  			   NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
2631  			   NETIF_F_SG | NETIF_F_TSO;
2632  	dev->features |= dev->hw_features;
2633  	dev->dev.of_node = np;
2634  	dev->irq = qdma->irq;
2635  	SET_NETDEV_DEV(dev, eth->dev);
2636  
2637  	err = of_get_ethdev_address(np, dev);
2638  	if (err) {
2639  		if (err == -EPROBE_DEFER)
2640  			return err;
2641  
2642  		eth_hw_addr_random(dev);
2643  		dev_info(eth->dev, "generated random MAC address %pM\n",
2644  			 dev->dev_addr);
2645  	}
2646  
2647  	port = netdev_priv(dev);
2648  	u64_stats_init(&port->stats.syncp);
2649  	spin_lock_init(&port->stats.lock);
2650  	port->qdma = qdma;
2651  	port->dev = dev;
2652  	port->id = id;
2653  	eth->ports[index] = port;
2654  
2655  	return register_netdev(dev);
2656  }
2657  
airoha_probe(struct platform_device * pdev)2658  static int airoha_probe(struct platform_device *pdev)
2659  {
2660  	struct device_node *np;
2661  	struct airoha_eth *eth;
2662  	int i, err;
2663  
2664  	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2665  	if (!eth)
2666  		return -ENOMEM;
2667  
2668  	eth->dev = &pdev->dev;
2669  
2670  	err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
2671  	if (err) {
2672  		dev_err(eth->dev, "failed configuring DMA mask\n");
2673  		return err;
2674  	}
2675  
2676  	eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
2677  	if (IS_ERR(eth->fe_regs))
2678  		return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
2679  				     "failed to iomap fe regs\n");
2680  
2681  	eth->rsts[0].id = "fe";
2682  	eth->rsts[1].id = "pdma";
2683  	eth->rsts[2].id = "qdma";
2684  	err = devm_reset_control_bulk_get_exclusive(eth->dev,
2685  						    ARRAY_SIZE(eth->rsts),
2686  						    eth->rsts);
2687  	if (err) {
2688  		dev_err(eth->dev, "failed to get bulk reset lines\n");
2689  		return err;
2690  	}
2691  
2692  	eth->xsi_rsts[0].id = "xsi-mac";
2693  	eth->xsi_rsts[1].id = "hsi0-mac";
2694  	eth->xsi_rsts[2].id = "hsi1-mac";
2695  	eth->xsi_rsts[3].id = "hsi-mac";
2696  	eth->xsi_rsts[4].id = "xfp-mac";
2697  	err = devm_reset_control_bulk_get_exclusive(eth->dev,
2698  						    ARRAY_SIZE(eth->xsi_rsts),
2699  						    eth->xsi_rsts);
2700  	if (err) {
2701  		dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
2702  		return err;
2703  	}
2704  
2705  	eth->napi_dev = alloc_netdev_dummy(0);
2706  	if (!eth->napi_dev)
2707  		return -ENOMEM;
2708  
2709  	/* Enable threaded NAPI by default */
2710  	eth->napi_dev->threaded = true;
2711  	strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
2712  	platform_set_drvdata(pdev, eth);
2713  
2714  	err = airoha_hw_init(pdev, eth);
2715  	if (err)
2716  		goto error;
2717  
2718  	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2719  		airoha_qdma_start_napi(&eth->qdma[i]);
2720  
2721  	for_each_child_of_node(pdev->dev.of_node, np) {
2722  		if (!of_device_is_compatible(np, "airoha,eth-mac"))
2723  			continue;
2724  
2725  		if (!of_device_is_available(np))
2726  			continue;
2727  
2728  		err = airoha_alloc_gdm_port(eth, np);
2729  		if (err) {
2730  			of_node_put(np);
2731  			goto error;
2732  		}
2733  	}
2734  
2735  	return 0;
2736  
2737  error:
2738  	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2739  		airoha_hw_cleanup(&eth->qdma[i]);
2740  
2741  	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2742  		struct airoha_gdm_port *port = eth->ports[i];
2743  
2744  		if (port && port->dev->reg_state == NETREG_REGISTERED)
2745  			unregister_netdev(port->dev);
2746  	}
2747  	free_netdev(eth->napi_dev);
2748  	platform_set_drvdata(pdev, NULL);
2749  
2750  	return err;
2751  }
2752  
airoha_remove(struct platform_device * pdev)2753  static void airoha_remove(struct platform_device *pdev)
2754  {
2755  	struct airoha_eth *eth = platform_get_drvdata(pdev);
2756  	int i;
2757  
2758  	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
2759  		airoha_hw_cleanup(&eth->qdma[i]);
2760  
2761  	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
2762  		struct airoha_gdm_port *port = eth->ports[i];
2763  
2764  		if (!port)
2765  			continue;
2766  
2767  		airoha_dev_stop(port->dev);
2768  		unregister_netdev(port->dev);
2769  	}
2770  	free_netdev(eth->napi_dev);
2771  
2772  	platform_set_drvdata(pdev, NULL);
2773  }
2774  
2775  static const struct of_device_id of_airoha_match[] = {
2776  	{ .compatible = "airoha,en7581-eth" },
2777  	{ /* sentinel */ }
2778  };
2779  MODULE_DEVICE_TABLE(of, of_airoha_match);
2780  
2781  static struct platform_driver airoha_driver = {
2782  	.probe = airoha_probe,
2783  	.remove_new = airoha_remove,
2784  	.driver = {
2785  		.name = KBUILD_MODNAME,
2786  		.of_match_table = of_airoha_match,
2787  	},
2788  };
2789  module_platform_driver(airoha_driver);
2790  
2791  MODULE_LICENSE("GPL");
2792  MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
2793  MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
2794