1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
3 *
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 */
7
8 #include <linux/bpf_trace.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/irqdomain.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemleak.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/net_tstamp.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/of_device.h>
23 #include <linux/of_platform.h>
24 #include <linux/phylink.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/regmap.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/mfd/syscon.h>
31 #include <linux/sys_soc.h>
32 #include <linux/dma/ti-cppi5.h>
33 #include <linux/dma/k3-udma-glue.h>
34 #include <net/page_pool/helpers.h>
35 #include <net/switchdev.h>
36
37 #include "cpsw_ale.h"
38 #include "cpsw_sl.h"
39 #include "am65-cpsw-nuss.h"
40 #include "am65-cpsw-switchdev.h"
41 #include "k3-cppi-desc-pool.h"
42 #include "am65-cpts.h"
43
44 #define AM65_CPSW_SS_BASE 0x0
45 #define AM65_CPSW_SGMII_BASE 0x100
46 #define AM65_CPSW_XGMII_BASE 0x2100
47 #define AM65_CPSW_CPSW_NU_BASE 0x20000
48 #define AM65_CPSW_NU_PORTS_BASE 0x1000
49 #define AM65_CPSW_NU_FRAM_BASE 0x12000
50 #define AM65_CPSW_NU_STATS_BASE 0x1a000
51 #define AM65_CPSW_NU_ALE_BASE 0x1e000
52 #define AM65_CPSW_NU_CPTS_BASE 0x1d000
53
54 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000
55 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
57
58 #define AM65_CPSW_MAX_PORTS 8
59
60 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
61 #define AM65_CPSW_MAX_PACKET_SIZE 2024
62
63 #define AM65_CPSW_REG_CTL 0x004
64 #define AM65_CPSW_REG_STAT_PORT_EN 0x014
65 #define AM65_CPSW_REG_PTYPE 0x018
66
67 #define AM65_CPSW_P0_REG_CTL 0x004
68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
69
70 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
72 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
73
74 #define AM65_CPSW_PORTN_REG_SA_L 0x308
75 #define AM65_CPSW_PORTN_REG_SA_H 0x30c
76 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310
77 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
78 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
79 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
80
81 #define AM65_CPSW_SGMII_CONTROL_REG 0x010
82 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
83 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
84
85 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
86 #define AM65_CPSW_CTL_P0_ENABLE BIT(2)
87 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
88 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
89
90 /* AM65_CPSW_P0_REG_CTL */
91 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
92 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16)
93
94 /* AM65_CPSW_PORT_REG_PRI_CTL */
95 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
96
97 /* AM65_CPSW_PN_TS_CTL register fields */
98 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
99 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
100 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
101 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
102 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
103 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
104 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
105
106 #define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
107 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
108 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
109 #define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
110 #define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
111
112 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
113 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
114
115 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
116 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
117 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
118 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
119 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
120 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
121 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
122 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
123 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
124
125 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
126 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
127
128 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
129
130 #define AM65_CPSW_TS_TX_ANX_ALL_EN \
131 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
132 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
133 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
134
135 #define AM65_CPSW_TS_RX_ANX_ALL_EN \
136 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
137 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
138 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
139
140 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
141 /* Number of TX/RX descriptors per channel/flow */
142 #define AM65_CPSW_MAX_TX_DESC 500
143 #define AM65_CPSW_MAX_RX_DESC 500
144
145 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
146 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
147
148 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
149 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
150 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
151
152 #define AM65_CPSW_DEFAULT_TX_CHNS 8
153 #define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
154
155 /* CPPI streaming packet interface */
156 #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
157 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
158
159 /* XDP */
160 #define AM65_CPSW_XDP_CONSUMED BIT(1)
161 #define AM65_CPSW_XDP_REDIRECT BIT(0)
162 #define AM65_CPSW_XDP_PASS 0
163
164 /* Include headroom compatible with both skb and xdpf */
165 #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
166 #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
167
am65_cpsw_port_set_sl_mac(struct am65_cpsw_port * slave,const u8 * dev_addr)168 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
169 const u8 *dev_addr)
170 {
171 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
172 (dev_addr[2] << 16) | (dev_addr[3] << 24);
173 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
174
175 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
176 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
177 }
178
am65_cpsw_sl_ctl_reset(struct am65_cpsw_port * port)179 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
180 {
181 cpsw_sl_reset(port->slave.mac_sl, 100);
182 /* Max length register has to be restored after MAC SL reset */
183 writel(AM65_CPSW_MAX_PACKET_SIZE,
184 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
185 }
186
am65_cpsw_nuss_get_ver(struct am65_cpsw_common * common)187 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
188 {
189 common->nuss_ver = readl(common->ss_base);
190 common->cpsw_ver = readl(common->cpsw_base);
191 dev_info(common->dev,
192 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
193 common->nuss_ver,
194 common->cpsw_ver,
195 common->port_num + 1,
196 common->pdata.quirks);
197 }
198
am65_cpsw_nuss_ndo_slave_add_vid(struct net_device * ndev,__be16 proto,u16 vid)199 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
200 __be16 proto, u16 vid)
201 {
202 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
203 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
204 u32 port_mask, unreg_mcast = 0;
205 int ret;
206
207 if (!common->is_emac_mode)
208 return 0;
209
210 if (!netif_running(ndev) || !vid)
211 return 0;
212
213 ret = pm_runtime_resume_and_get(common->dev);
214 if (ret < 0)
215 return ret;
216
217 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
218 if (!vid)
219 unreg_mcast = port_mask;
220 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
221 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
222 unreg_mcast, port_mask, 0);
223
224 pm_runtime_put(common->dev);
225 return ret;
226 }
227
am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)228 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
229 __be16 proto, u16 vid)
230 {
231 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
232 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
233 int ret;
234
235 if (!common->is_emac_mode)
236 return 0;
237
238 if (!netif_running(ndev) || !vid)
239 return 0;
240
241 ret = pm_runtime_resume_and_get(common->dev);
242 if (ret < 0)
243 return ret;
244
245 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
246 ret = cpsw_ale_del_vlan(common->ale, vid,
247 BIT(port->port_id) | ALE_PORT_HOST);
248
249 pm_runtime_put(common->dev);
250 return ret;
251 }
252
am65_cpsw_slave_set_promisc(struct am65_cpsw_port * port,bool promisc)253 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
254 bool promisc)
255 {
256 struct am65_cpsw_common *common = port->common;
257
258 if (promisc && !common->is_emac_mode) {
259 dev_dbg(common->dev, "promisc mode requested in switch mode");
260 return;
261 }
262
263 if (promisc) {
264 /* Enable promiscuous mode */
265 cpsw_ale_control_set(common->ale, port->port_id,
266 ALE_PORT_MACONLY_CAF, 1);
267 dev_dbg(common->dev, "promisc enabled\n");
268 } else {
269 /* Disable promiscuous mode */
270 cpsw_ale_control_set(common->ale, port->port_id,
271 ALE_PORT_MACONLY_CAF, 0);
272 dev_dbg(common->dev, "promisc disabled\n");
273 }
274 }
275
am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device * ndev)276 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
277 {
278 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
279 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
280 u32 port_mask;
281 bool promisc;
282
283 promisc = !!(ndev->flags & IFF_PROMISC);
284 am65_cpsw_slave_set_promisc(port, promisc);
285
286 if (promisc)
287 return;
288
289 /* Restore allmulti on vlans if necessary */
290 cpsw_ale_set_allmulti(common->ale,
291 ndev->flags & IFF_ALLMULTI, port->port_id);
292
293 port_mask = ALE_PORT_HOST;
294 /* Clear all mcast from ALE */
295 cpsw_ale_flush_multicast(common->ale, port_mask, -1);
296
297 if (!netdev_mc_empty(ndev)) {
298 struct netdev_hw_addr *ha;
299
300 /* program multicast address list into ALE register */
301 netdev_for_each_mc_addr(ha, ndev) {
302 cpsw_ale_add_mcast(common->ale, ha->addr,
303 port_mask, 0, 0, 0);
304 }
305 }
306 }
307
am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device * ndev,unsigned int txqueue)308 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
309 unsigned int txqueue)
310 {
311 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
312 struct am65_cpsw_tx_chn *tx_chn;
313 struct netdev_queue *netif_txq;
314 unsigned long trans_start;
315
316 netif_txq = netdev_get_tx_queue(ndev, txqueue);
317 tx_chn = &common->tx_chns[txqueue];
318 trans_start = READ_ONCE(netif_txq->trans_start);
319
320 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
321 txqueue,
322 netif_tx_queue_stopped(netif_txq),
323 jiffies_to_msecs(jiffies - trans_start),
324 netdev_queue_dql_avail(netif_txq),
325 k3_cppi_desc_pool_avail(tx_chn->desc_pool));
326
327 if (netif_tx_queue_stopped(netif_txq)) {
328 /* try recover if stopped by us */
329 txq_trans_update(netif_txq);
330 netif_tx_wake_queue(netif_txq);
331 }
332 }
333
am65_cpsw_nuss_rx_push(struct am65_cpsw_common * common,struct page * page,u32 flow_idx)334 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
335 struct page *page, u32 flow_idx)
336 {
337 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
338 struct cppi5_host_desc_t *desc_rx;
339 struct device *dev = common->dev;
340 struct am65_cpsw_swdata *swdata;
341 dma_addr_t desc_dma;
342 dma_addr_t buf_dma;
343
344 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
345 if (!desc_rx) {
346 dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
347 return -ENOMEM;
348 }
349 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
350
351 buf_dma = dma_map_single(rx_chn->dma_dev,
352 page_address(page) + AM65_CPSW_HEADROOM,
353 AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
354 if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
355 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
356 dev_err(dev, "Failed to map rx buffer\n");
357 return -EINVAL;
358 }
359
360 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
361 AM65_CPSW_NAV_PS_DATA_SIZE);
362 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
363 cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
364 buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
365 swdata = cppi5_hdesc_get_swdata(desc_rx);
366 swdata->page = page;
367 swdata->flow_id = flow_idx;
368
369 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
370 desc_rx, desc_dma);
371 }
372
am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common * common)373 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
374 {
375 struct am65_cpsw_host *host_p = am65_common_get_host(common);
376 u32 val, pri_map;
377
378 /* P0 set Receive Priority Type */
379 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
380
381 if (common->pf_p0_rx_ptype_rrobin) {
382 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
383 /* Enet Ports fifos works in fixed priority mode only, so
384 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
385 */
386 pri_map = 0x0;
387 } else {
388 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
389 /* restore P0_Rx_Pri_Map */
390 pri_map = 0x76543210;
391 }
392
393 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
394 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
395 }
396
397 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
398 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
399 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
400 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
401
am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common * common)402 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
403 {
404 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
405 struct am65_cpsw_rx_flow *flow;
406 struct xdp_rxq_info *rxq;
407 int id, port;
408
409 for (id = 0; id < common->rx_ch_num_flows; id++) {
410 flow = &rx_chn->flows[id];
411
412 for (port = 0; port < common->port_num; port++) {
413 if (!common->ports[port].ndev)
414 continue;
415
416 rxq = &common->ports[port].xdp_rxq[id];
417
418 if (xdp_rxq_info_is_reg(rxq))
419 xdp_rxq_info_unreg(rxq);
420 }
421
422 if (flow->page_pool) {
423 page_pool_destroy(flow->page_pool);
424 flow->page_pool = NULL;
425 }
426 }
427 }
428
am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common * common)429 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
430 {
431 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
432 struct page_pool_params pp_params = {
433 .flags = PP_FLAG_DMA_MAP,
434 .order = 0,
435 .pool_size = AM65_CPSW_MAX_RX_DESC,
436 .nid = dev_to_node(common->dev),
437 .dev = common->dev,
438 .dma_dir = DMA_BIDIRECTIONAL,
439 /* .napi set dynamically */
440 };
441 struct am65_cpsw_rx_flow *flow;
442 struct xdp_rxq_info *rxq;
443 struct page_pool *pool;
444 int id, port, ret;
445
446 for (id = 0; id < common->rx_ch_num_flows; id++) {
447 flow = &rx_chn->flows[id];
448 pp_params.napi = &flow->napi_rx;
449 pool = page_pool_create(&pp_params);
450 if (IS_ERR(pool)) {
451 ret = PTR_ERR(pool);
452 goto err;
453 }
454
455 flow->page_pool = pool;
456
457 /* using same page pool is allowed as no running rx handlers
458 * simultaneously for both ndevs
459 */
460 for (port = 0; port < common->port_num; port++) {
461 if (!common->ports[port].ndev)
462 continue;
463
464 rxq = &common->ports[port].xdp_rxq[id];
465
466 ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
467 id, flow->napi_rx.napi_id);
468 if (ret)
469 goto err;
470
471 ret = xdp_rxq_info_reg_mem_model(rxq,
472 MEM_TYPE_PAGE_POOL,
473 pool);
474 if (ret)
475 goto err;
476 }
477 }
478
479 return 0;
480
481 err:
482 am65_cpsw_destroy_xdp_rxqs(common);
483 return ret;
484 }
485
am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool * desc_pool,void * desc,unsigned char dsize_log2)486 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
487 void *desc,
488 unsigned char dsize_log2)
489 {
490 void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
491
492 return (desc - pool_addr) >> dsize_log2;
493 }
494
am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn * tx_chn,struct cppi5_host_desc_t * desc,enum am65_cpsw_tx_buf_type buf_type)495 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
496 struct cppi5_host_desc_t *desc,
497 enum am65_cpsw_tx_buf_type buf_type)
498 {
499 int desc_idx;
500
501 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
502 tx_chn->dsize_log2);
503 k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
504 (void *)buf_type);
505 }
506
am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn * tx_chn,dma_addr_t desc_dma)507 static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
508 dma_addr_t desc_dma)
509 {
510 struct cppi5_host_desc_t *desc_tx;
511 int desc_idx;
512
513 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
514 desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
515 tx_chn->dsize_log2);
516
517 return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
518 desc_idx);
519 }
520
am65_cpsw_put_page(struct am65_cpsw_rx_flow * flow,struct page * page,bool allow_direct)521 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
522 struct page *page,
523 bool allow_direct)
524 {
525 page_pool_put_full_page(flow->page_pool, page, allow_direct);
526 }
527
am65_cpsw_nuss_rx_cleanup(void * data,dma_addr_t desc_dma)528 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
529 {
530 struct am65_cpsw_rx_chn *rx_chn = data;
531 struct cppi5_host_desc_t *desc_rx;
532 struct am65_cpsw_swdata *swdata;
533 dma_addr_t buf_dma;
534 struct page *page;
535 u32 buf_dma_len;
536 u32 flow_id;
537
538 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
539 swdata = cppi5_hdesc_get_swdata(desc_rx);
540 page = swdata->page;
541 flow_id = swdata->flow_id;
542 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
543 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
544 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
545 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
546
547 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
548 }
549
am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn * tx_chn,struct cppi5_host_desc_t * desc)550 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
551 struct cppi5_host_desc_t *desc)
552 {
553 struct cppi5_host_desc_t *first_desc, *next_desc;
554 dma_addr_t buf_dma, next_desc_dma;
555 u32 buf_dma_len;
556
557 first_desc = desc;
558 next_desc = first_desc;
559
560 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
561 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
562
563 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
564
565 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
566 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
567 while (next_desc_dma) {
568 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
569 next_desc_dma);
570 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
571 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
572
573 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
574 DMA_TO_DEVICE);
575
576 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
577 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
578
579 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
580 }
581
582 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
583 }
584
am65_cpsw_nuss_tx_cleanup(void * data,dma_addr_t desc_dma)585 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
586 {
587 struct am65_cpsw_tx_chn *tx_chn = data;
588 struct cppi5_host_desc_t *desc_tx;
589 struct sk_buff *skb;
590 void **swdata;
591
592 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
593 swdata = cppi5_hdesc_get_swdata(desc_tx);
594 skb = *(swdata);
595 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
596
597 dev_kfree_skb_any(skb);
598 }
599
am65_cpsw_build_skb(void * page_addr,struct net_device * ndev,unsigned int len)600 static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
601 struct net_device *ndev,
602 unsigned int len)
603 {
604 struct sk_buff *skb;
605
606 len += AM65_CPSW_HEADROOM;
607
608 skb = build_skb(page_addr, len);
609 if (unlikely(!skb))
610 return NULL;
611
612 skb_reserve(skb, AM65_CPSW_HEADROOM);
613 skb->dev = ndev;
614
615 return skb;
616 }
617
am65_cpsw_nuss_common_open(struct am65_cpsw_common * common)618 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
619 {
620 struct am65_cpsw_host *host_p = am65_common_get_host(common);
621 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
622 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
623 int port_idx, i, ret, tx, flow_idx;
624 struct am65_cpsw_rx_flow *flow;
625 u32 val, port_mask;
626 struct page *page;
627
628 if (common->usage_count)
629 return 0;
630
631 /* Control register */
632 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
633 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
634 common->cpsw_base + AM65_CPSW_REG_CTL);
635 /* Max length register */
636 writel(AM65_CPSW_MAX_PACKET_SIZE,
637 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
638 /* set base flow_id */
639 writel(common->rx_flow_id_base,
640 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
641 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN | AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN,
642 host_p->port_base + AM65_CPSW_P0_REG_CTL);
643
644 am65_cpsw_nuss_set_p0_ptype(common);
645
646 /* enable statistic */
647 val = BIT(HOST_PORT_NUM);
648 for (port_idx = 0; port_idx < common->port_num; port_idx++) {
649 struct am65_cpsw_port *port = &common->ports[port_idx];
650
651 if (!port->disabled)
652 val |= BIT(port->port_id);
653 }
654 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
655
656 /* disable priority elevation */
657 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
658
659 cpsw_ale_start(common->ale);
660
661 /* limit to one RX flow only */
662 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
663 ALE_DEFAULT_THREAD_ID, 0);
664 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
665 ALE_DEFAULT_THREAD_ENABLE, 1);
666 /* switch to vlan unaware mode */
667 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
668 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
669 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
670
671 /* default vlan cfg: create mask based on enabled ports */
672 port_mask = GENMASK(common->port_num, 0) &
673 ~common->disabled_ports_mask;
674
675 cpsw_ale_add_vlan(common->ale, 0, port_mask,
676 port_mask, port_mask,
677 port_mask & ~ALE_PORT_HOST);
678
679 if (common->is_emac_mode)
680 am65_cpsw_init_host_port_emac(common);
681 else
682 am65_cpsw_init_host_port_switch(common);
683
684 am65_cpsw_qos_tx_p0_rate_init(common);
685
686 ret = am65_cpsw_create_xdp_rxqs(common);
687 if (ret) {
688 dev_err(common->dev, "Failed to create XDP rx queues\n");
689 return ret;
690 }
691
692 for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
693 flow = &rx_chn->flows[flow_idx];
694 for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
695 page = page_pool_dev_alloc_pages(flow->page_pool);
696 if (!page) {
697 dev_err(common->dev, "cannot allocate page in flow %d\n",
698 flow_idx);
699 ret = -ENOMEM;
700 goto fail_rx;
701 }
702
703 ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
704 if (ret < 0) {
705 dev_err(common->dev,
706 "cannot submit page to rx channel flow %d, error %d\n",
707 flow_idx, ret);
708 am65_cpsw_put_page(flow, page, false);
709 goto fail_rx;
710 }
711 }
712 }
713
714 ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
715 if (ret) {
716 dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
717 goto fail_rx;
718 }
719
720 for (i = 0; i < common->rx_ch_num_flows ; i++) {
721 napi_enable(&rx_chn->flows[i].napi_rx);
722 if (rx_chn->flows[i].irq_disabled) {
723 rx_chn->flows[i].irq_disabled = false;
724 enable_irq(rx_chn->flows[i].irq);
725 }
726 }
727
728 for (tx = 0; tx < common->tx_ch_num; tx++) {
729 ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
730 if (ret) {
731 dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
732 tx, ret);
733 tx--;
734 goto fail_tx;
735 }
736 napi_enable(&tx_chn[tx].napi_tx);
737 }
738
739 dev_dbg(common->dev, "cpsw_nuss started\n");
740 return 0;
741
742 fail_tx:
743 while (tx >= 0) {
744 napi_disable(&tx_chn[tx].napi_tx);
745 k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
746 tx--;
747 }
748
749 for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
750 flow = &rx_chn->flows[flow_idx];
751 if (!flow->irq_disabled) {
752 disable_irq(flow->irq);
753 flow->irq_disabled = true;
754 }
755 napi_disable(&flow->napi_rx);
756 }
757
758 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
759
760 fail_rx:
761 for (i = 0; i < common->rx_ch_num_flows; i++)
762 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
763 am65_cpsw_nuss_rx_cleanup, !!i);
764
765 am65_cpsw_destroy_xdp_rxqs(common);
766
767 return ret;
768 }
769
am65_cpsw_nuss_common_stop(struct am65_cpsw_common * common)770 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
771 {
772 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
773 struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
774 int i;
775
776 if (common->usage_count != 1)
777 return 0;
778
779 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
780 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
781
782 /* shutdown tx channels */
783 atomic_set(&common->tdown_cnt, common->tx_ch_num);
784 /* ensure new tdown_cnt value is visible */
785 smp_mb__after_atomic();
786 reinit_completion(&common->tdown_complete);
787
788 for (i = 0; i < common->tx_ch_num; i++)
789 k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
790
791 i = wait_for_completion_timeout(&common->tdown_complete,
792 msecs_to_jiffies(1000));
793 if (!i)
794 dev_err(common->dev, "tx timeout\n");
795 for (i = 0; i < common->tx_ch_num; i++) {
796 napi_disable(&tx_chn[i].napi_tx);
797 hrtimer_cancel(&tx_chn[i].tx_hrtimer);
798 }
799
800 for (i = 0; i < common->tx_ch_num; i++) {
801 k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
802 am65_cpsw_nuss_tx_cleanup);
803 k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
804 }
805
806 reinit_completion(&common->tdown_complete);
807 k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
808
809 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
810 i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
811 if (!i)
812 dev_err(common->dev, "rx teardown timeout\n");
813 }
814
815 for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
816 napi_disable(&rx_chn->flows[i].napi_rx);
817 hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
818 k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
819 am65_cpsw_nuss_rx_cleanup, !!i);
820 }
821
822 k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
823
824 cpsw_ale_stop(common->ale);
825
826 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
827 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
828
829 am65_cpsw_destroy_xdp_rxqs(common);
830
831 dev_dbg(common->dev, "cpsw_nuss stopped\n");
832 return 0;
833 }
834
am65_cpsw_nuss_ndo_slave_stop(struct net_device * ndev)835 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
836 {
837 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
838 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
839 int ret;
840
841 phylink_stop(port->slave.phylink);
842
843 netif_tx_stop_all_queues(ndev);
844
845 phylink_disconnect_phy(port->slave.phylink);
846
847 ret = am65_cpsw_nuss_common_stop(common);
848 if (ret)
849 return ret;
850
851 common->usage_count--;
852 pm_runtime_put(common->dev);
853 return 0;
854 }
855
cpsw_restore_vlans(struct net_device * vdev,int vid,void * arg)856 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
857 {
858 struct am65_cpsw_port *port = arg;
859
860 if (!vdev)
861 return 0;
862
863 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
864 }
865
am65_cpsw_nuss_ndo_slave_open(struct net_device * ndev)866 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
867 {
868 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
869 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
870 int ret, i;
871 u32 reg;
872
873 ret = pm_runtime_resume_and_get(common->dev);
874 if (ret < 0)
875 return ret;
876
877 /* Idle MAC port */
878 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
879 cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
880 cpsw_sl_ctl_reset(port->slave.mac_sl);
881
882 /* soft reset MAC */
883 cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
884 mdelay(1);
885 reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
886 if (reg) {
887 dev_err(common->dev, "soft RESET didn't complete\n");
888 ret = -ETIMEDOUT;
889 goto runtime_put;
890 }
891
892 /* Notify the stack of the actual queue counts. */
893 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
894 if (ret) {
895 dev_err(common->dev, "cannot set real number of tx queues\n");
896 goto runtime_put;
897 }
898
899 ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
900 if (ret) {
901 dev_err(common->dev, "cannot set real number of rx queues\n");
902 goto runtime_put;
903 }
904
905 for (i = 0; i < common->tx_ch_num; i++) {
906 struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
907
908 netdev_tx_reset_queue(txq);
909 txq->tx_maxrate = common->tx_chns[i].rate_mbps;
910 }
911
912 ret = am65_cpsw_nuss_common_open(common);
913 if (ret)
914 goto runtime_put;
915
916 common->usage_count++;
917
918 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
919
920 if (common->is_emac_mode)
921 am65_cpsw_init_port_emac_ale(port);
922 else
923 am65_cpsw_init_port_switch_ale(port);
924
925 /* mac_sl should be configured via phy-link interface */
926 am65_cpsw_sl_ctl_reset(port);
927
928 ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0);
929 if (ret)
930 goto error_cleanup;
931
932 /* restore vlan configurations */
933 vlan_for_each(ndev, cpsw_restore_vlans, port);
934
935 phylink_start(port->slave.phylink);
936
937 return 0;
938
939 error_cleanup:
940 am65_cpsw_nuss_ndo_slave_stop(ndev);
941 return ret;
942
943 runtime_put:
944 pm_runtime_put(common->dev);
945 return ret;
946 }
947
am65_cpsw_xdp_tx_frame(struct net_device * ndev,struct am65_cpsw_tx_chn * tx_chn,struct xdp_frame * xdpf,enum am65_cpsw_tx_buf_type buf_type)948 static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
949 struct am65_cpsw_tx_chn *tx_chn,
950 struct xdp_frame *xdpf,
951 enum am65_cpsw_tx_buf_type buf_type)
952 {
953 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
954 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
955 struct cppi5_host_desc_t *host_desc;
956 struct netdev_queue *netif_txq;
957 dma_addr_t dma_desc, dma_buf;
958 u32 pkt_len = xdpf->len;
959 void **swdata;
960 int ret;
961
962 host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
963 if (unlikely(!host_desc)) {
964 ndev->stats.tx_dropped++;
965 return AM65_CPSW_XDP_CONSUMED; /* drop */
966 }
967
968 am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
969
970 dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
971 pkt_len, DMA_TO_DEVICE);
972 if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
973 ndev->stats.tx_dropped++;
974 ret = AM65_CPSW_XDP_CONSUMED; /* drop */
975 goto pool_free;
976 }
977
978 cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
979 AM65_CPSW_NAV_PS_DATA_SIZE);
980 cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
981 cppi5_hdesc_set_pktlen(host_desc, pkt_len);
982 cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
983 cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
984
985 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
986 cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
987
988 swdata = cppi5_hdesc_get_swdata(host_desc);
989 *(swdata) = xdpf;
990
991 /* Report BQL before sending the packet */
992 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
993 netdev_tx_sent_queue(netif_txq, pkt_len);
994
995 dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
996 if (AM65_CPSW_IS_CPSW2G(common)) {
997 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
998 dma_desc);
999 } else {
1000 spin_lock_bh(&tx_chn->lock);
1001 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
1002 dma_desc);
1003 spin_unlock_bh(&tx_chn->lock);
1004 }
1005 if (ret) {
1006 /* Inform BQL */
1007 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1008 ndev->stats.tx_errors++;
1009 ret = AM65_CPSW_XDP_CONSUMED; /* drop */
1010 goto dma_unmap;
1011 }
1012
1013 return 0;
1014
1015 dma_unmap:
1016 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
1017 dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
1018 pool_free:
1019 k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
1020 return ret;
1021 }
1022
am65_cpsw_run_xdp(struct am65_cpsw_rx_flow * flow,struct am65_cpsw_port * port,struct xdp_buff * xdp,int cpu,int * len)1023 static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
1024 struct am65_cpsw_port *port,
1025 struct xdp_buff *xdp,
1026 int cpu, int *len)
1027 {
1028 struct am65_cpsw_common *common = flow->common;
1029 struct am65_cpsw_ndev_priv *ndev_priv;
1030 struct net_device *ndev = port->ndev;
1031 struct am65_cpsw_ndev_stats *stats;
1032 int ret = AM65_CPSW_XDP_CONSUMED;
1033 struct am65_cpsw_tx_chn *tx_chn;
1034 struct netdev_queue *netif_txq;
1035 struct xdp_frame *xdpf;
1036 struct bpf_prog *prog;
1037 struct page *page;
1038 u32 act;
1039 int err;
1040
1041 prog = READ_ONCE(port->xdp_prog);
1042 if (!prog)
1043 return AM65_CPSW_XDP_PASS;
1044
1045 act = bpf_prog_run_xdp(prog, xdp);
1046 /* XDP prog might have changed packet data and boundaries */
1047 *len = xdp->data_end - xdp->data;
1048
1049 ndev_priv = netdev_priv(ndev);
1050 stats = this_cpu_ptr(ndev_priv->stats);
1051
1052 switch (act) {
1053 case XDP_PASS:
1054 ret = AM65_CPSW_XDP_PASS;
1055 goto out;
1056 case XDP_TX:
1057 tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
1058 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1059
1060 xdpf = xdp_convert_buff_to_frame(xdp);
1061 if (unlikely(!xdpf))
1062 goto drop;
1063
1064 __netif_tx_lock(netif_txq, cpu);
1065 err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
1066 AM65_CPSW_TX_BUF_TYPE_XDP_TX);
1067 __netif_tx_unlock(netif_txq);
1068 if (err)
1069 goto drop;
1070
1071 u64_stats_update_begin(&stats->syncp);
1072 stats->rx_bytes += *len;
1073 stats->rx_packets++;
1074 u64_stats_update_end(&stats->syncp);
1075 ret = AM65_CPSW_XDP_CONSUMED;
1076 goto out;
1077 case XDP_REDIRECT:
1078 if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
1079 goto drop;
1080
1081 u64_stats_update_begin(&stats->syncp);
1082 stats->rx_bytes += *len;
1083 stats->rx_packets++;
1084 u64_stats_update_end(&stats->syncp);
1085 ret = AM65_CPSW_XDP_REDIRECT;
1086 goto out;
1087 default:
1088 bpf_warn_invalid_xdp_action(ndev, prog, act);
1089 fallthrough;
1090 case XDP_ABORTED:
1091 drop:
1092 trace_xdp_exception(ndev, prog, act);
1093 fallthrough;
1094 case XDP_DROP:
1095 ndev->stats.rx_dropped++;
1096 }
1097
1098 page = virt_to_head_page(xdp->data);
1099 am65_cpsw_put_page(flow, page, true);
1100
1101 out:
1102 return ret;
1103 }
1104
1105 /* RX psdata[2] word format - checksum information */
1106 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
1107 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
1108 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
1109 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
1110 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
1111 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
1112
am65_cpsw_nuss_rx_csum(struct sk_buff * skb,u32 csum_info)1113 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
1114 {
1115 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
1116 * csum information provides in psdata[2] word:
1117 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
1118 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
1119 * bits - indicates IPv4/IPv6 packet
1120 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
1121 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
1122 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
1123 */
1124 skb_checksum_none_assert(skb);
1125
1126 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
1127 return;
1128
1129 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
1130 AM65_CPSW_RX_PSD_IPV4_VALID)) &&
1131 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
1132 /* csum for fragmented packets is unsupported */
1133 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
1134 skb->ip_summed = CHECKSUM_UNNECESSARY;
1135 }
1136 }
1137
am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow * flow,int cpu,int * xdp_state)1138 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1139 int cpu, int *xdp_state)
1140 {
1141 struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
1142 u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
1143 struct am65_cpsw_common *common = flow->common;
1144 struct am65_cpsw_ndev_priv *ndev_priv;
1145 struct am65_cpsw_ndev_stats *stats;
1146 struct cppi5_host_desc_t *desc_rx;
1147 struct device *dev = common->dev;
1148 struct am65_cpsw_swdata *swdata;
1149 struct page *page, *new_page;
1150 dma_addr_t desc_dma, buf_dma;
1151 struct am65_cpsw_port *port;
1152 struct net_device *ndev;
1153 u32 flow_idx = flow->id;
1154 struct sk_buff *skb;
1155 struct xdp_buff xdp;
1156 int headroom, ret;
1157 void *page_addr;
1158 u32 *psdata;
1159
1160 *xdp_state = AM65_CPSW_XDP_PASS;
1161 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
1162 if (ret) {
1163 if (ret != -ENODATA)
1164 dev_err(dev, "RX: pop chn fail %d\n", ret);
1165 return ret;
1166 }
1167
1168 if (cppi5_desc_is_tdcm(desc_dma)) {
1169 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
1170 if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
1171 complete(&common->tdown_complete);
1172 return 0;
1173 }
1174
1175 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
1176 dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
1177 __func__, flow_idx, &desc_dma);
1178
1179 swdata = cppi5_hdesc_get_swdata(desc_rx);
1180 page = swdata->page;
1181 page_addr = page_address(page);
1182 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
1183 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
1184 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
1185 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
1186 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
1187 port = am65_common_get_port(common, port_id);
1188 ndev = port->ndev;
1189 psdata = cppi5_hdesc_get_psdata(desc_rx);
1190 csum_info = psdata[2];
1191 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
1192
1193 dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
1194
1195 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
1196
1197 skb = am65_cpsw_build_skb(page_addr, ndev,
1198 AM65_CPSW_MAX_PACKET_SIZE);
1199 if (unlikely(!skb)) {
1200 new_page = page;
1201 goto requeue;
1202 }
1203
1204 if (port->xdp_prog) {
1205 xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
1206 xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
1207 pkt_len, false);
1208 *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
1209 cpu, &pkt_len);
1210 if (*xdp_state != AM65_CPSW_XDP_PASS)
1211 goto allocate;
1212
1213 /* Compute additional headroom to be reserved */
1214 headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
1215 skb_reserve(skb, headroom);
1216 }
1217
1218 ndev_priv = netdev_priv(ndev);
1219 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
1220 skb_put(skb, pkt_len);
1221 if (port->rx_ts_enabled)
1222 am65_cpts_rx_timestamp(common->cpts, skb);
1223 skb_mark_for_recycle(skb);
1224 skb->protocol = eth_type_trans(skb, ndev);
1225 am65_cpsw_nuss_rx_csum(skb, csum_info);
1226 napi_gro_receive(&flow->napi_rx, skb);
1227
1228 stats = this_cpu_ptr(ndev_priv->stats);
1229
1230 u64_stats_update_begin(&stats->syncp);
1231 stats->rx_packets++;
1232 stats->rx_bytes += pkt_len;
1233 u64_stats_update_end(&stats->syncp);
1234
1235 allocate:
1236 new_page = page_pool_dev_alloc_pages(flow->page_pool);
1237 if (unlikely(!new_page)) {
1238 dev_err(dev, "page alloc failed\n");
1239 return -ENOMEM;
1240 }
1241
1242 if (netif_dormant(ndev)) {
1243 am65_cpsw_put_page(flow, new_page, true);
1244 ndev->stats.rx_dropped++;
1245 return 0;
1246 }
1247
1248 requeue:
1249 ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
1250 if (WARN_ON(ret < 0)) {
1251 am65_cpsw_put_page(flow, new_page, true);
1252 ndev->stats.rx_errors++;
1253 ndev->stats.rx_dropped++;
1254 }
1255
1256 return ret;
1257 }
1258
am65_cpsw_nuss_rx_timer_callback(struct hrtimer * timer)1259 static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
1260 {
1261 struct am65_cpsw_rx_flow *flow = container_of(timer,
1262 struct am65_cpsw_rx_flow,
1263 rx_hrtimer);
1264
1265 enable_irq(flow->irq);
1266 return HRTIMER_NORESTART;
1267 }
1268
am65_cpsw_nuss_rx_poll(struct napi_struct * napi_rx,int budget)1269 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
1270 {
1271 struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
1272 struct am65_cpsw_common *common = flow->common;
1273 int cpu = smp_processor_id();
1274 int xdp_state_or = 0;
1275 int cur_budget, ret;
1276 int xdp_state;
1277 int num_rx = 0;
1278
1279 /* process only this flow */
1280 cur_budget = budget;
1281 while (cur_budget--) {
1282 ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
1283 xdp_state_or |= xdp_state;
1284 if (ret)
1285 break;
1286 num_rx++;
1287 }
1288
1289 if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
1290 xdp_do_flush();
1291
1292 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
1293
1294 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
1295 if (flow->irq_disabled) {
1296 flow->irq_disabled = false;
1297 if (unlikely(flow->rx_pace_timeout)) {
1298 hrtimer_start(&flow->rx_hrtimer,
1299 ns_to_ktime(flow->rx_pace_timeout),
1300 HRTIMER_MODE_REL_PINNED);
1301 } else {
1302 enable_irq(flow->irq);
1303 }
1304 }
1305 }
1306
1307 return num_rx;
1308 }
1309
1310 static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn * tx_chn,dma_addr_t desc_dma)1311 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
1312 dma_addr_t desc_dma)
1313 {
1314 struct am65_cpsw_ndev_priv *ndev_priv;
1315 struct am65_cpsw_ndev_stats *stats;
1316 struct cppi5_host_desc_t *desc_tx;
1317 struct net_device *ndev;
1318 struct sk_buff *skb;
1319 void **swdata;
1320
1321 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
1322 desc_dma);
1323 swdata = cppi5_hdesc_get_swdata(desc_tx);
1324 skb = *(swdata);
1325 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1326
1327 ndev = skb->dev;
1328
1329 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
1330
1331 ndev_priv = netdev_priv(ndev);
1332 stats = this_cpu_ptr(ndev_priv->stats);
1333 u64_stats_update_begin(&stats->syncp);
1334 stats->tx_packets++;
1335 stats->tx_bytes += skb->len;
1336 u64_stats_update_end(&stats->syncp);
1337
1338 return skb;
1339 }
1340
1341 static struct xdp_frame *
am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common * common,struct am65_cpsw_tx_chn * tx_chn,dma_addr_t desc_dma,struct net_device ** ndev)1342 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
1343 struct am65_cpsw_tx_chn *tx_chn,
1344 dma_addr_t desc_dma,
1345 struct net_device **ndev)
1346 {
1347 struct am65_cpsw_ndev_priv *ndev_priv;
1348 struct am65_cpsw_ndev_stats *stats;
1349 struct cppi5_host_desc_t *desc_tx;
1350 struct am65_cpsw_port *port;
1351 struct xdp_frame *xdpf;
1352 u32 port_id = 0;
1353 void **swdata;
1354
1355 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
1356 cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
1357 swdata = cppi5_hdesc_get_swdata(desc_tx);
1358 xdpf = *(swdata);
1359 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
1360
1361 port = am65_common_get_port(common, port_id);
1362 *ndev = port->ndev;
1363
1364 ndev_priv = netdev_priv(*ndev);
1365 stats = this_cpu_ptr(ndev_priv->stats);
1366 u64_stats_update_begin(&stats->syncp);
1367 stats->tx_packets++;
1368 stats->tx_bytes += xdpf->len;
1369 u64_stats_update_end(&stats->syncp);
1370
1371 return xdpf;
1372 }
1373
am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn * tx_chn,struct net_device * ndev,struct netdev_queue * netif_txq)1374 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
1375 struct netdev_queue *netif_txq)
1376 {
1377 if (netif_tx_queue_stopped(netif_txq)) {
1378 /* Check whether the queue is stopped due to stalled
1379 * tx dma, if the queue is stopped then wake the queue
1380 * as we have free desc for tx
1381 */
1382 __netif_tx_lock(netif_txq, smp_processor_id());
1383 if (netif_running(ndev) &&
1384 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
1385 netif_tx_wake_queue(netif_txq);
1386
1387 __netif_tx_unlock(netif_txq);
1388 }
1389 }
1390
am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common * common,int chn,unsigned int budget,bool * tdown)1391 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
1392 int chn, unsigned int budget, bool *tdown)
1393 {
1394 enum am65_cpsw_tx_buf_type buf_type;
1395 struct device *dev = common->dev;
1396 struct am65_cpsw_tx_chn *tx_chn;
1397 struct netdev_queue *netif_txq;
1398 unsigned int total_bytes = 0;
1399 struct net_device *ndev;
1400 struct xdp_frame *xdpf;
1401 struct sk_buff *skb;
1402 dma_addr_t desc_dma;
1403 int res, num_tx = 0;
1404
1405 tx_chn = &common->tx_chns[chn];
1406
1407 while (true) {
1408 spin_lock(&tx_chn->lock);
1409 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1410 spin_unlock(&tx_chn->lock);
1411 if (res == -ENODATA)
1412 break;
1413
1414 if (cppi5_desc_is_tdcm(desc_dma)) {
1415 if (atomic_dec_and_test(&common->tdown_cnt))
1416 complete(&common->tdown_complete);
1417 *tdown = true;
1418 break;
1419 }
1420
1421 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1422 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1423 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1424 ndev = skb->dev;
1425 total_bytes = skb->len;
1426 napi_consume_skb(skb, budget);
1427 } else {
1428 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1429 desc_dma, &ndev);
1430 total_bytes = xdpf->len;
1431 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1432 xdp_return_frame_rx_napi(xdpf);
1433 else
1434 xdp_return_frame(xdpf);
1435 }
1436 num_tx++;
1437
1438 netif_txq = netdev_get_tx_queue(ndev, chn);
1439
1440 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1441
1442 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1443 }
1444
1445 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1446
1447 return num_tx;
1448 }
1449
am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common * common,int chn,unsigned int budget,bool * tdown)1450 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
1451 int chn, unsigned int budget, bool *tdown)
1452 {
1453 enum am65_cpsw_tx_buf_type buf_type;
1454 struct device *dev = common->dev;
1455 struct am65_cpsw_tx_chn *tx_chn;
1456 struct netdev_queue *netif_txq;
1457 unsigned int total_bytes = 0;
1458 struct net_device *ndev;
1459 struct xdp_frame *xdpf;
1460 struct sk_buff *skb;
1461 dma_addr_t desc_dma;
1462 int res, num_tx = 0;
1463
1464 tx_chn = &common->tx_chns[chn];
1465
1466 while (true) {
1467 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1468 if (res == -ENODATA)
1469 break;
1470
1471 if (cppi5_desc_is_tdcm(desc_dma)) {
1472 if (atomic_dec_and_test(&common->tdown_cnt))
1473 complete(&common->tdown_complete);
1474 *tdown = true;
1475 break;
1476 }
1477
1478 buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
1479 if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
1480 skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
1481 ndev = skb->dev;
1482 total_bytes += skb->len;
1483 napi_consume_skb(skb, budget);
1484 } else {
1485 xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
1486 desc_dma, &ndev);
1487 total_bytes += xdpf->len;
1488 if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
1489 xdp_return_frame_rx_napi(xdpf);
1490 else
1491 xdp_return_frame(xdpf);
1492 }
1493 num_tx++;
1494 }
1495
1496 if (!num_tx)
1497 return 0;
1498
1499 netif_txq = netdev_get_tx_queue(ndev, chn);
1500
1501 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1502
1503 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1504
1505 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1506
1507 return num_tx;
1508 }
1509
am65_cpsw_nuss_tx_timer_callback(struct hrtimer * timer)1510 static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
1511 {
1512 struct am65_cpsw_tx_chn *tx_chns =
1513 container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
1514
1515 enable_irq(tx_chns->irq);
1516 return HRTIMER_NORESTART;
1517 }
1518
am65_cpsw_nuss_tx_poll(struct napi_struct * napi_tx,int budget)1519 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
1520 {
1521 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1522 bool tdown = false;
1523 int num_tx;
1524
1525 if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1526 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
1527 budget, &tdown);
1528 else
1529 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
1530 tx_chn->id, budget, &tdown);
1531
1532 if (num_tx >= budget)
1533 return budget;
1534
1535 if (napi_complete_done(napi_tx, num_tx)) {
1536 if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
1537 hrtimer_start(&tx_chn->tx_hrtimer,
1538 ns_to_ktime(tx_chn->tx_pace_timeout),
1539 HRTIMER_MODE_REL_PINNED);
1540 } else {
1541 enable_irq(tx_chn->irq);
1542 }
1543 }
1544
1545 return 0;
1546 }
1547
am65_cpsw_nuss_rx_irq(int irq,void * dev_id)1548 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
1549 {
1550 struct am65_cpsw_rx_flow *flow = dev_id;
1551
1552 flow->irq_disabled = true;
1553 disable_irq_nosync(irq);
1554 napi_schedule(&flow->napi_rx);
1555
1556 return IRQ_HANDLED;
1557 }
1558
am65_cpsw_nuss_tx_irq(int irq,void * dev_id)1559 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
1560 {
1561 struct am65_cpsw_tx_chn *tx_chn = dev_id;
1562
1563 disable_irq_nosync(irq);
1564 napi_schedule(&tx_chn->napi_tx);
1565
1566 return IRQ_HANDLED;
1567 }
1568
am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff * skb,struct net_device * ndev)1569 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1570 struct net_device *ndev)
1571 {
1572 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1573 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1574 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1575 struct device *dev = common->dev;
1576 struct am65_cpsw_tx_chn *tx_chn;
1577 struct netdev_queue *netif_txq;
1578 dma_addr_t desc_dma, buf_dma;
1579 int ret, q_idx, i;
1580 void **swdata;
1581 u32 *psdata;
1582 u32 pkt_len;
1583
1584 /* padding enabled in hw */
1585 pkt_len = skb_headlen(skb);
1586
1587 /* SKB TX timestamp */
1588 if (port->tx_ts_enabled)
1589 am65_cpts_prep_tx_timestamp(common->cpts, skb);
1590
1591 q_idx = skb_get_queue_mapping(skb);
1592 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1593
1594 tx_chn = &common->tx_chns[q_idx];
1595 netif_txq = netdev_get_tx_queue(ndev, q_idx);
1596
1597 /* Map the linear buffer */
1598 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
1599 DMA_TO_DEVICE);
1600 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1601 dev_err(dev, "Failed to map tx skb buffer\n");
1602 ndev->stats.tx_errors++;
1603 goto err_free_skb;
1604 }
1605
1606 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1607 if (!first_desc) {
1608 dev_dbg(dev, "Failed to allocate descriptor\n");
1609 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
1610 DMA_TO_DEVICE);
1611 goto busy_stop_q;
1612 }
1613
1614 am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
1615 AM65_CPSW_TX_BUF_TYPE_SKB);
1616
1617 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1618 AM65_CPSW_NAV_PS_DATA_SIZE);
1619 cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
1620 cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
1621 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1622
1623 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1624 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1625 swdata = cppi5_hdesc_get_swdata(first_desc);
1626 *(swdata) = skb;
1627 psdata = cppi5_hdesc_get_psdata(first_desc);
1628
1629 /* HW csum offload if enabled */
1630 psdata[2] = 0;
1631 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1632 unsigned int cs_start, cs_offset;
1633
1634 cs_start = skb_transport_offset(skb);
1635 cs_offset = cs_start + skb->csum_offset;
1636 /* HW numerates bytes starting from 1 */
1637 psdata[2] = ((cs_offset + 1) << 24) |
1638 ((cs_start + 1) << 16) | (skb->len - cs_start);
1639 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1640 }
1641
1642 if (!skb_is_nonlinear(skb))
1643 goto done_tx;
1644
1645 dev_dbg(dev, "fragmented SKB\n");
1646
1647 /* Handle the case where skb is fragmented in pages */
1648 cur_desc = first_desc;
1649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1650 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1651 u32 frag_size = skb_frag_size(frag);
1652
1653 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1654 if (!next_desc) {
1655 dev_err(dev, "Failed to allocate descriptor\n");
1656 goto busy_free_descs;
1657 }
1658
1659 am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
1660 AM65_CPSW_TX_BUF_TYPE_SKB);
1661
1662 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1663 DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1665 dev_err(dev, "Failed to map tx skb page\n");
1666 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1667 ndev->stats.tx_errors++;
1668 goto err_free_descs;
1669 }
1670
1671 cppi5_hdesc_reset_hbdesc(next_desc);
1672 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1673 cppi5_hdesc_attach_buf(next_desc,
1674 buf_dma, frag_size, buf_dma, frag_size);
1675
1676 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1677 next_desc);
1678 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1679 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1680
1681 pkt_len += frag_size;
1682 cur_desc = next_desc;
1683 }
1684 WARN_ON(pkt_len != skb->len);
1685
1686 done_tx:
1687 skb_tx_timestamp(skb);
1688
1689 /* report bql before sending packet */
1690 netdev_tx_sent_queue(netif_txq, pkt_len);
1691
1692 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1693 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1694 if (AM65_CPSW_IS_CPSW2G(common)) {
1695 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1696 } else {
1697 spin_lock_bh(&tx_chn->lock);
1698 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1699 spin_unlock_bh(&tx_chn->lock);
1700 }
1701 if (ret) {
1702 dev_err(dev, "can't push desc %d\n", ret);
1703 /* inform bql */
1704 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1705 ndev->stats.tx_errors++;
1706 goto err_free_descs;
1707 }
1708
1709 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1710 netif_tx_stop_queue(netif_txq);
1711 /* Barrier, so that stop_queue visible to other cpus */
1712 smp_mb__after_atomic();
1713 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1714
1715 /* re-check for smp */
1716 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1717 MAX_SKB_FRAGS) {
1718 netif_tx_wake_queue(netif_txq);
1719 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1720 }
1721 }
1722
1723 return NETDEV_TX_OK;
1724
1725 err_free_descs:
1726 am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1727 err_free_skb:
1728 ndev->stats.tx_dropped++;
1729 dev_kfree_skb_any(skb);
1730 return NETDEV_TX_OK;
1731
1732 busy_free_descs:
1733 am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1734 busy_stop_q:
1735 netif_tx_stop_queue(netif_txq);
1736 return NETDEV_TX_BUSY;
1737 }
1738
am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device * ndev,void * addr)1739 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1740 void *addr)
1741 {
1742 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1743 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1744 struct sockaddr *sockaddr = (struct sockaddr *)addr;
1745 int ret;
1746
1747 ret = eth_prepare_mac_addr_change(ndev, addr);
1748 if (ret < 0)
1749 return ret;
1750
1751 ret = pm_runtime_resume_and_get(common->dev);
1752 if (ret < 0)
1753 return ret;
1754
1755 cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1756 HOST_PORT_NUM, 0, 0);
1757 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1758 HOST_PORT_NUM, ALE_SECURE, 0);
1759
1760 am65_cpsw_port_set_sl_mac(port, addr);
1761 eth_commit_mac_addr_change(ndev, sockaddr);
1762
1763 pm_runtime_put(common->dev);
1764
1765 return 0;
1766 }
1767
am65_cpsw_nuss_hwtstamp_set(struct net_device * ndev,struct ifreq * ifr)1768 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1769 struct ifreq *ifr)
1770 {
1771 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1772 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1773 struct hwtstamp_config cfg;
1774
1775 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1776 return -EOPNOTSUPP;
1777
1778 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1779 return -EFAULT;
1780
1781 /* TX HW timestamp */
1782 switch (cfg.tx_type) {
1783 case HWTSTAMP_TX_OFF:
1784 case HWTSTAMP_TX_ON:
1785 break;
1786 default:
1787 return -ERANGE;
1788 }
1789
1790 switch (cfg.rx_filter) {
1791 case HWTSTAMP_FILTER_NONE:
1792 port->rx_ts_enabled = false;
1793 break;
1794 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1795 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1796 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1797 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1798 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1799 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1800 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1801 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1802 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1803 port->rx_ts_enabled = true;
1804 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1805 break;
1806 case HWTSTAMP_FILTER_ALL:
1807 case HWTSTAMP_FILTER_SOME:
1808 case HWTSTAMP_FILTER_NTP_ALL:
1809 return -EOPNOTSUPP;
1810 default:
1811 return -ERANGE;
1812 }
1813
1814 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1815
1816 /* cfg TX timestamp */
1817 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1818 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1819
1820 ts_vlan_ltype = ETH_P_8021Q;
1821
1822 ts_ctrl_ltype2 = ETH_P_1588 |
1823 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1824 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1825 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1826 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1827 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1828 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1829 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1830 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1831
1832 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1833 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1834
1835 if (port->tx_ts_enabled)
1836 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1837 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1838
1839 if (port->rx_ts_enabled)
1840 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
1841 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
1842
1843 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1844 writel(ts_vlan_ltype, port->port_base +
1845 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1846 writel(ts_ctrl_ltype2, port->port_base +
1847 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1848 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1849
1850 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1851 }
1852
am65_cpsw_nuss_hwtstamp_get(struct net_device * ndev,struct ifreq * ifr)1853 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1854 struct ifreq *ifr)
1855 {
1856 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1857 struct hwtstamp_config cfg;
1858
1859 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1860 return -EOPNOTSUPP;
1861
1862 cfg.flags = 0;
1863 cfg.tx_type = port->tx_ts_enabled ?
1864 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1865 cfg.rx_filter = port->rx_ts_enabled ?
1866 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
1867
1868 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1869 }
1870
am65_cpsw_nuss_ndo_slave_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1871 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1872 struct ifreq *req, int cmd)
1873 {
1874 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1875
1876 if (!netif_running(ndev))
1877 return -EINVAL;
1878
1879 switch (cmd) {
1880 case SIOCSHWTSTAMP:
1881 return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1882 case SIOCGHWTSTAMP:
1883 return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1884 }
1885
1886 return phylink_mii_ioctl(port->slave.phylink, req, cmd);
1887 }
1888
am65_cpsw_nuss_ndo_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)1889 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1890 struct rtnl_link_stats64 *stats)
1891 {
1892 struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
1893 unsigned int start;
1894 int cpu;
1895
1896 for_each_possible_cpu(cpu) {
1897 struct am65_cpsw_ndev_stats *cpu_stats;
1898 u64 rx_packets;
1899 u64 rx_bytes;
1900 u64 tx_packets;
1901 u64 tx_bytes;
1902
1903 cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
1904 do {
1905 start = u64_stats_fetch_begin(&cpu_stats->syncp);
1906 rx_packets = cpu_stats->rx_packets;
1907 rx_bytes = cpu_stats->rx_bytes;
1908 tx_packets = cpu_stats->tx_packets;
1909 tx_bytes = cpu_stats->tx_bytes;
1910 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1911
1912 stats->rx_packets += rx_packets;
1913 stats->rx_bytes += rx_bytes;
1914 stats->tx_packets += tx_packets;
1915 stats->tx_bytes += tx_bytes;
1916 }
1917
1918 stats->rx_errors = dev->stats.rx_errors;
1919 stats->rx_dropped = dev->stats.rx_dropped;
1920 stats->tx_dropped = dev->stats.tx_dropped;
1921 }
1922
am65_cpsw_xdp_prog_setup(struct net_device * ndev,struct bpf_prog * prog)1923 static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
1924 struct bpf_prog *prog)
1925 {
1926 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1927 bool running = netif_running(ndev);
1928 struct bpf_prog *old_prog;
1929
1930 if (running)
1931 am65_cpsw_nuss_ndo_slave_stop(ndev);
1932
1933 old_prog = xchg(&port->xdp_prog, prog);
1934 if (old_prog)
1935 bpf_prog_put(old_prog);
1936
1937 if (running)
1938 return am65_cpsw_nuss_ndo_slave_open(ndev);
1939
1940 return 0;
1941 }
1942
am65_cpsw_ndo_bpf(struct net_device * ndev,struct netdev_bpf * bpf)1943 static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1944 {
1945 switch (bpf->command) {
1946 case XDP_SETUP_PROG:
1947 return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
1948 default:
1949 return -EINVAL;
1950 }
1951 }
1952
am65_cpsw_ndo_xdp_xmit(struct net_device * ndev,int n,struct xdp_frame ** frames,u32 flags)1953 static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1954 struct xdp_frame **frames, u32 flags)
1955 {
1956 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1957 struct am65_cpsw_tx_chn *tx_chn;
1958 struct netdev_queue *netif_txq;
1959 int cpu = smp_processor_id();
1960 int i, nxmit = 0;
1961
1962 tx_chn = &common->tx_chns[cpu % common->tx_ch_num];
1963 netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
1964
1965 __netif_tx_lock(netif_txq, cpu);
1966 for (i = 0; i < n; i++) {
1967 if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
1968 AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
1969 break;
1970 nxmit++;
1971 }
1972 __netif_tx_unlock(netif_txq);
1973
1974 return nxmit;
1975 }
1976
1977 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
1978 .ndo_open = am65_cpsw_nuss_ndo_slave_open,
1979 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
1980 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
1981 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
1982 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
1983 .ndo_validate_addr = eth_validate_addr,
1984 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
1985 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
1986 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
1987 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
1988 .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
1989 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
1990 .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
1991 .ndo_bpf = am65_cpsw_ndo_bpf,
1992 .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
1993 };
1994
am65_cpsw_disable_phy(struct phy * phy)1995 static void am65_cpsw_disable_phy(struct phy *phy)
1996 {
1997 phy_power_off(phy);
1998 phy_exit(phy);
1999 }
2000
am65_cpsw_enable_phy(struct phy * phy)2001 static int am65_cpsw_enable_phy(struct phy *phy)
2002 {
2003 int ret;
2004
2005 ret = phy_init(phy);
2006 if (ret < 0)
2007 return ret;
2008
2009 ret = phy_power_on(phy);
2010 if (ret < 0) {
2011 phy_exit(phy);
2012 return ret;
2013 }
2014
2015 return 0;
2016 }
2017
am65_cpsw_disable_serdes_phy(struct am65_cpsw_common * common)2018 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
2019 {
2020 struct am65_cpsw_port *port;
2021 struct phy *phy;
2022 int i;
2023
2024 for (i = 0; i < common->port_num; i++) {
2025 port = &common->ports[i];
2026 phy = port->slave.serdes_phy;
2027 if (phy)
2028 am65_cpsw_disable_phy(phy);
2029 }
2030 }
2031
am65_cpsw_init_serdes_phy(struct device * dev,struct device_node * port_np,struct am65_cpsw_port * port)2032 static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
2033 struct am65_cpsw_port *port)
2034 {
2035 const char *name = "serdes";
2036 struct phy *phy;
2037 int ret;
2038
2039 phy = devm_of_phy_optional_get(dev, port_np, name);
2040 if (IS_ERR_OR_NULL(phy))
2041 return PTR_ERR_OR_ZERO(phy);
2042
2043 /* Serdes PHY exists. Store it. */
2044 port->slave.serdes_phy = phy;
2045
2046 ret = am65_cpsw_enable_phy(phy);
2047 if (ret < 0)
2048 goto err_phy;
2049
2050 return 0;
2051
2052 err_phy:
2053 devm_phy_put(dev, phy);
2054 return ret;
2055 }
2056
am65_cpsw_nuss_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2057 static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
2058 const struct phylink_link_state *state)
2059 {
2060 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2061 phylink_config);
2062 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2063 struct am65_cpsw_common *common = port->common;
2064
2065 if (common->pdata.extra_modes & BIT(state->interface)) {
2066 if (state->interface == PHY_INTERFACE_MODE_SGMII) {
2067 writel(ADVERTISE_SGMII,
2068 port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
2069 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2070 } else {
2071 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_EXT_EN);
2072 }
2073
2074 if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
2075 cpsw_sl_ctl_set(port->slave.mac_sl,
2076 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2077 } else {
2078 cpsw_sl_ctl_clr(port->slave.mac_sl,
2079 CPSW_SL_CTL_XGIG | CPSW_SL_CTL_XGMII_EN);
2080 }
2081
2082 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
2083 port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
2084 }
2085 }
2086
am65_cpsw_nuss_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2087 static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
2088 phy_interface_t interface)
2089 {
2090 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2091 phylink_config);
2092 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2093 struct am65_cpsw_common *common = port->common;
2094 struct net_device *ndev = port->ndev;
2095 u32 mac_control;
2096 int tmo;
2097
2098 /* disable forwarding */
2099 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2100
2101 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2102
2103 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
2104 dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
2105 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
2106
2107 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */
2108 mac_control = CPSW_SL_CTL_GMII_EN | CPSW_SL_CTL_GIG | CPSW_SL_CTL_IFCTL_A |
2109 CPSW_SL_CTL_FULLDUPLEX | CPSW_SL_CTL_RX_FLOW_EN | CPSW_SL_CTL_TX_FLOW_EN;
2110 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */
2111 if (phy_interface_mode_is_rgmii(interface))
2112 mac_control |= CPSW_SL_CTL_EXT_EN;
2113 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */
2114 cpsw_sl_ctl_clr(port->slave.mac_sl, mac_control);
2115
2116 am65_cpsw_qos_link_down(ndev);
2117 netif_tx_stop_all_queues(ndev);
2118 }
2119
am65_cpsw_nuss_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2120 static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
2121 unsigned int mode, phy_interface_t interface, int speed,
2122 int duplex, bool tx_pause, bool rx_pause)
2123 {
2124 struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
2125 phylink_config);
2126 struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
2127 struct am65_cpsw_common *common = port->common;
2128 u32 mac_control = CPSW_SL_CTL_GMII_EN;
2129 struct net_device *ndev = port->ndev;
2130
2131 /* Bring the port out of idle state */
2132 cpsw_sl_ctl_clr(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
2133
2134 if (speed == SPEED_1000)
2135 mac_control |= CPSW_SL_CTL_GIG;
2136 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */
2137 if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
2138 /* Can be used with in band mode only */
2139 mac_control |= CPSW_SL_CTL_EXT_EN;
2140 if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
2141 mac_control |= CPSW_SL_CTL_IFCTL_A;
2142 if (duplex)
2143 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
2144
2145 /* rx_pause/tx_pause */
2146 if (rx_pause)
2147 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
2148
2149 if (tx_pause)
2150 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
2151
2152 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
2153
2154 /* enable forwarding */
2155 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2156
2157 am65_cpsw_qos_link_up(ndev, speed);
2158 netif_tx_wake_all_queues(ndev);
2159 }
2160
2161 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
2162 .mac_config = am65_cpsw_nuss_mac_config,
2163 .mac_link_down = am65_cpsw_nuss_mac_link_down,
2164 .mac_link_up = am65_cpsw_nuss_mac_link_up,
2165 };
2166
am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port * port)2167 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
2168 {
2169 struct am65_cpsw_common *common = port->common;
2170
2171 if (!port->disabled)
2172 return;
2173
2174 cpsw_ale_control_set(common->ale, port->port_id,
2175 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2176
2177 cpsw_sl_reset(port->slave.mac_sl, 100);
2178 cpsw_sl_ctl_reset(port->slave.mac_sl);
2179 }
2180
am65_cpsw_nuss_free_tx_chns(void * data)2181 static void am65_cpsw_nuss_free_tx_chns(void *data)
2182 {
2183 struct am65_cpsw_common *common = data;
2184 int i;
2185
2186 for (i = 0; i < common->tx_ch_num; i++) {
2187 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2188
2189 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
2190 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
2191
2192 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
2193 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
2194
2195 memset(tx_chn, 0, sizeof(*tx_chn));
2196 }
2197 }
2198
am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common * common)2199 static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
2200 {
2201 struct device *dev = common->dev;
2202 int i;
2203
2204 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
2205
2206 common->tx_ch_rate_msk = 0;
2207 for (i = 0; i < common->tx_ch_num; i++) {
2208 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2209
2210 if (tx_chn->irq)
2211 devm_free_irq(dev, tx_chn->irq, tx_chn);
2212
2213 netif_napi_del(&tx_chn->napi_tx);
2214 }
2215
2216 am65_cpsw_nuss_free_tx_chns(common);
2217 }
2218
am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common * common)2219 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
2220 {
2221 struct device *dev = common->dev;
2222 int i, ret = 0;
2223
2224 for (i = 0; i < common->tx_ch_num; i++) {
2225 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2226
2227 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
2228 am65_cpsw_nuss_tx_poll);
2229 hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
2230 tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
2231
2232 ret = devm_request_irq(dev, tx_chn->irq,
2233 am65_cpsw_nuss_tx_irq,
2234 IRQF_TRIGGER_HIGH,
2235 tx_chn->tx_chn_name, tx_chn);
2236 if (ret) {
2237 dev_err(dev, "failure requesting tx%u irq %u, %d\n",
2238 tx_chn->id, tx_chn->irq, ret);
2239 goto err;
2240 }
2241 }
2242
2243 err:
2244 return ret;
2245 }
2246
am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common * common)2247 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
2248 {
2249 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
2250 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
2251 struct device *dev = common->dev;
2252 struct k3_ring_cfg ring_cfg = {
2253 .elm_size = K3_RINGACC_RING_ELSIZE_8,
2254 .mode = K3_RINGACC_RING_MODE_RING,
2255 .flags = 0
2256 };
2257 u32 hdesc_size, hdesc_size_out;
2258 int i, ret = 0;
2259
2260 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2261 AM65_CPSW_NAV_SW_DATA_SIZE);
2262
2263 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2264 tx_cfg.tx_cfg = ring_cfg;
2265 tx_cfg.txcq_cfg = ring_cfg;
2266 tx_cfg.tx_cfg.size = max_desc_num;
2267 tx_cfg.txcq_cfg.size = max_desc_num;
2268
2269 for (i = 0; i < common->tx_ch_num; i++) {
2270 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
2271
2272 snprintf(tx_chn->tx_chn_name,
2273 sizeof(tx_chn->tx_chn_name), "tx%d", i);
2274
2275 spin_lock_init(&tx_chn->lock);
2276 tx_chn->common = common;
2277 tx_chn->id = i;
2278 tx_chn->descs_num = max_desc_num;
2279
2280 tx_chn->tx_chn =
2281 k3_udma_glue_request_tx_chn(dev,
2282 tx_chn->tx_chn_name,
2283 &tx_cfg);
2284 if (IS_ERR(tx_chn->tx_chn)) {
2285 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
2286 "Failed to request tx dma channel\n");
2287 goto err;
2288 }
2289 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
2290
2291 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
2292 tx_chn->descs_num,
2293 hdesc_size,
2294 tx_chn->tx_chn_name);
2295 if (IS_ERR(tx_chn->desc_pool)) {
2296 ret = PTR_ERR(tx_chn->desc_pool);
2297 dev_err(dev, "Failed to create poll %d\n", ret);
2298 goto err;
2299 }
2300
2301 hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
2302 tx_chn->dsize_log2 = __fls(hdesc_size_out);
2303 WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
2304
2305 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
2306 if (tx_chn->irq < 0) {
2307 dev_err(dev, "Failed to get tx dma irq %d\n",
2308 tx_chn->irq);
2309 ret = tx_chn->irq;
2310 goto err;
2311 }
2312
2313 snprintf(tx_chn->tx_chn_name,
2314 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
2315 dev_name(dev), tx_chn->id);
2316 }
2317
2318 ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
2319 if (ret) {
2320 dev_err(dev, "Failed to add tx NAPI %d\n", ret);
2321 goto err;
2322 }
2323
2324 err:
2325 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
2326 if (i) {
2327 dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
2328 return i;
2329 }
2330
2331 return ret;
2332 }
2333
am65_cpsw_nuss_free_rx_chns(void * data)2334 static void am65_cpsw_nuss_free_rx_chns(void *data)
2335 {
2336 struct am65_cpsw_common *common = data;
2337 struct am65_cpsw_rx_chn *rx_chn;
2338
2339 rx_chn = &common->rx_chns;
2340
2341 if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
2342 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
2343
2344 if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
2345 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
2346 }
2347
am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common * common)2348 static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
2349 {
2350 struct device *dev = common->dev;
2351 struct am65_cpsw_rx_chn *rx_chn;
2352 struct am65_cpsw_rx_flow *flows;
2353 int i;
2354
2355 rx_chn = &common->rx_chns;
2356 flows = rx_chn->flows;
2357 devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
2358
2359 for (i = 0; i < common->rx_ch_num_flows; i++) {
2360 if (!(flows[i].irq < 0))
2361 devm_free_irq(dev, flows[i].irq, &flows[i]);
2362 netif_napi_del(&flows[i].napi_rx);
2363 }
2364
2365 am65_cpsw_nuss_free_rx_chns(common);
2366
2367 common->rx_flow_id_base = -1;
2368 }
2369
am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common * common)2370 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2371 {
2372 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
2373 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
2374 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
2375 struct device *dev = common->dev;
2376 struct am65_cpsw_rx_flow *flow;
2377 u32 hdesc_size, hdesc_size_out;
2378 u32 fdqring_id;
2379 int i, ret = 0;
2380
2381 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
2382 AM65_CPSW_NAV_SW_DATA_SIZE);
2383
2384 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
2385 rx_cfg.flow_id_num = common->rx_ch_num_flows;
2386 rx_cfg.flow_id_base = common->rx_flow_id_base;
2387
2388 /* init all flows */
2389 rx_chn->dev = dev;
2390 rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
2391
2392 for (i = 0; i < common->rx_ch_num_flows; i++) {
2393 flow = &rx_chn->flows[i];
2394 flow->page_pool = NULL;
2395 }
2396
2397 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
2398 if (IS_ERR(rx_chn->rx_chn)) {
2399 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
2400 "Failed to request rx dma channel\n");
2401 goto err;
2402 }
2403 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
2404
2405 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
2406 rx_chn->descs_num,
2407 hdesc_size, "rx");
2408 if (IS_ERR(rx_chn->desc_pool)) {
2409 ret = PTR_ERR(rx_chn->desc_pool);
2410 dev_err(dev, "Failed to create rx poll %d\n", ret);
2411 goto err;
2412 }
2413
2414 hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
2415 rx_chn->dsize_log2 = __fls(hdesc_size_out);
2416 WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
2417
2418 common->rx_flow_id_base =
2419 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
2420 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
2421
2422 fdqring_id = K3_RINGACC_RING_ID_ANY;
2423 for (i = 0; i < rx_cfg.flow_id_num; i++) {
2424 struct k3_ring_cfg rxring_cfg = {
2425 .elm_size = K3_RINGACC_RING_ELSIZE_8,
2426 .mode = K3_RINGACC_RING_MODE_RING,
2427 .flags = 0,
2428 };
2429 struct k3_ring_cfg fdqring_cfg = {
2430 .elm_size = K3_RINGACC_RING_ELSIZE_8,
2431 .flags = K3_RINGACC_RING_SHARED,
2432 };
2433 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
2434 .rx_cfg = rxring_cfg,
2435 .rxfdq_cfg = fdqring_cfg,
2436 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
2437 .src_tag_lo_sel =
2438 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
2439 };
2440
2441 flow = &rx_chn->flows[i];
2442 flow->id = i;
2443 flow->common = common;
2444 flow->irq = -EINVAL;
2445
2446 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
2447 rx_flow_cfg.rx_cfg.size = max_desc_num;
2448 /* share same FDQ for all flows */
2449 rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
2450 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
2451
2452 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
2453 i, &rx_flow_cfg);
2454 if (ret) {
2455 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
2456 goto err;
2457 }
2458 if (!i)
2459 fdqring_id =
2460 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
2461 i);
2462
2463 flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
2464 if (flow->irq <= 0) {
2465 dev_err(dev, "Failed to get rx dma irq %d\n",
2466 flow->irq);
2467 ret = flow->irq;
2468 goto err;
2469 }
2470
2471 snprintf(flow->name,
2472 sizeof(flow->name), "%s-rx%d",
2473 dev_name(dev), i);
2474 netif_napi_add(common->dma_ndev, &flow->napi_rx,
2475 am65_cpsw_nuss_rx_poll);
2476 hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
2477 HRTIMER_MODE_REL_PINNED);
2478 flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
2479
2480 ret = devm_request_irq(dev, flow->irq,
2481 am65_cpsw_nuss_rx_irq,
2482 IRQF_TRIGGER_HIGH,
2483 flow->name, flow);
2484 if (ret) {
2485 dev_err(dev, "failure requesting rx %d irq %u, %d\n",
2486 i, flow->irq, ret);
2487 flow->irq = -EINVAL;
2488 goto err;
2489 }
2490 }
2491
2492 /* setup classifier to route priorities to flows */
2493 cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
2494
2495 err:
2496 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
2497 if (i) {
2498 dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
2499 return i;
2500 }
2501
2502 return ret;
2503 }
2504
am65_cpsw_nuss_init_host_p(struct am65_cpsw_common * common)2505 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
2506 {
2507 struct am65_cpsw_host *host_p = am65_common_get_host(common);
2508
2509 host_p->common = common;
2510 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
2511 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
2512
2513 return 0;
2514 }
2515
am65_cpsw_am654_get_efuse_macid(struct device_node * of_node,int slave,u8 * mac_addr)2516 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
2517 int slave, u8 *mac_addr)
2518 {
2519 u32 mac_lo, mac_hi, offset;
2520 struct regmap *syscon;
2521 int ret;
2522
2523 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
2524 if (IS_ERR(syscon)) {
2525 if (PTR_ERR(syscon) == -ENODEV)
2526 return 0;
2527 return PTR_ERR(syscon);
2528 }
2529
2530 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
2531 &offset);
2532 if (ret)
2533 return ret;
2534
2535 regmap_read(syscon, offset, &mac_lo);
2536 regmap_read(syscon, offset + 4, &mac_hi);
2537
2538 mac_addr[0] = (mac_hi >> 8) & 0xff;
2539 mac_addr[1] = mac_hi & 0xff;
2540 mac_addr[2] = (mac_lo >> 24) & 0xff;
2541 mac_addr[3] = (mac_lo >> 16) & 0xff;
2542 mac_addr[4] = (mac_lo >> 8) & 0xff;
2543 mac_addr[5] = mac_lo & 0xff;
2544
2545 return 0;
2546 }
2547
am65_cpsw_init_cpts(struct am65_cpsw_common * common)2548 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
2549 {
2550 struct device *dev = common->dev;
2551 struct device_node *node;
2552 struct am65_cpts *cpts;
2553 void __iomem *reg_base;
2554
2555 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
2556 return 0;
2557
2558 node = of_get_child_by_name(dev->of_node, "cpts");
2559 if (!node) {
2560 dev_err(dev, "%s cpts not found\n", __func__);
2561 return -ENOENT;
2562 }
2563
2564 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
2565 cpts = am65_cpts_create(dev, reg_base, node);
2566 if (IS_ERR(cpts)) {
2567 int ret = PTR_ERR(cpts);
2568
2569 of_node_put(node);
2570 dev_err(dev, "cpts create err %d\n", ret);
2571 return ret;
2572 }
2573 common->cpts = cpts;
2574 /* Forbid PM runtime if CPTS is running.
2575 * K3 CPSWxG modules may completely lose context during ON->OFF
2576 * transitions depending on integration.
2577 * AM65x/J721E MCU CPSW2G: false
2578 * J721E MAIN_CPSW9G: true
2579 */
2580 pm_runtime_forbid(dev);
2581
2582 return 0;
2583 }
2584
am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common * common)2585 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
2586 {
2587 struct device_node *node, *port_np;
2588 struct device *dev = common->dev;
2589 int ret;
2590
2591 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2592 if (!node)
2593 return -ENOENT;
2594
2595 for_each_child_of_node(node, port_np) {
2596 struct am65_cpsw_port *port;
2597 u32 port_id;
2598
2599 /* it is not a slave port node, continue */
2600 if (strcmp(port_np->name, "port"))
2601 continue;
2602
2603 ret = of_property_read_u32(port_np, "reg", &port_id);
2604 if (ret < 0) {
2605 dev_err(dev, "%pOF error reading port_id %d\n",
2606 port_np, ret);
2607 goto of_node_put;
2608 }
2609
2610 if (!port_id || port_id > common->port_num) {
2611 dev_err(dev, "%pOF has invalid port_id %u %s\n",
2612 port_np, port_id, port_np->name);
2613 ret = -EINVAL;
2614 goto of_node_put;
2615 }
2616
2617 port = am65_common_get_port(common, port_id);
2618 port->port_id = port_id;
2619 port->common = common;
2620 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
2621 AM65_CPSW_NU_PORTS_OFFSET * (port_id);
2622 if (common->pdata.extra_modes)
2623 port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
2624 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
2625 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
2626 port->name = of_get_property(port_np, "label", NULL);
2627 port->fetch_ram_base =
2628 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
2629 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
2630
2631 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
2632 if (IS_ERR(port->slave.mac_sl)) {
2633 ret = PTR_ERR(port->slave.mac_sl);
2634 goto of_node_put;
2635 }
2636
2637 port->disabled = !of_device_is_available(port_np);
2638 if (port->disabled) {
2639 common->disabled_ports_mask |= BIT(port->port_id);
2640 continue;
2641 }
2642
2643 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
2644 if (IS_ERR(port->slave.ifphy)) {
2645 ret = PTR_ERR(port->slave.ifphy);
2646 dev_err(dev, "%pOF error retrieving port phy: %d\n",
2647 port_np, ret);
2648 goto of_node_put;
2649 }
2650
2651 /* Initialize the Serdes PHY for the port */
2652 ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
2653 if (ret)
2654 goto of_node_put;
2655
2656 port->slave.mac_only =
2657 of_property_read_bool(port_np, "ti,mac-only");
2658
2659 /* get phy/link info */
2660 port->slave.port_np = port_np;
2661 ret = of_get_phy_mode(port_np, &port->slave.phy_if);
2662 if (ret) {
2663 dev_err(dev, "%pOF read phy-mode err %d\n",
2664 port_np, ret);
2665 goto of_node_put;
2666 }
2667
2668 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
2669 if (ret)
2670 goto of_node_put;
2671
2672 ret = of_get_mac_address(port_np, port->slave.mac_addr);
2673 if (ret) {
2674 am65_cpsw_am654_get_efuse_macid(port_np,
2675 port->port_id,
2676 port->slave.mac_addr);
2677 if (!is_valid_ether_addr(port->slave.mac_addr)) {
2678 eth_random_addr(port->slave.mac_addr);
2679 dev_err(dev, "Use random MAC address\n");
2680 }
2681 }
2682
2683 /* Reset all Queue priorities to 0 */
2684 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
2685 }
2686 of_node_put(node);
2687
2688 /* is there at least one ext.port */
2689 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
2690 dev_err(dev, "No Ext. port are available\n");
2691 return -ENODEV;
2692 }
2693
2694 return 0;
2695
2696 of_node_put:
2697 of_node_put(port_np);
2698 of_node_put(node);
2699 return ret;
2700 }
2701
am65_cpsw_pcpu_stats_free(void * data)2702 static void am65_cpsw_pcpu_stats_free(void *data)
2703 {
2704 struct am65_cpsw_ndev_stats __percpu *stats = data;
2705
2706 free_percpu(stats);
2707 }
2708
am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common * common)2709 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
2710 {
2711 struct am65_cpsw_port *port;
2712 int i;
2713
2714 for (i = 0; i < common->port_num; i++) {
2715 port = &common->ports[i];
2716 if (port->slave.phylink)
2717 phylink_destroy(port->slave.phylink);
2718 }
2719 }
2720
2721 static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common * common,u32 port_idx)2722 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
2723 {
2724 struct am65_cpsw_ndev_priv *ndev_priv;
2725 struct device *dev = common->dev;
2726 struct am65_cpsw_port *port;
2727 struct phylink *phylink;
2728 int ret;
2729
2730 port = &common->ports[port_idx];
2731
2732 if (port->disabled)
2733 return 0;
2734
2735 /* alloc netdev */
2736 port->ndev = alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv),
2737 AM65_CPSW_MAX_QUEUES,
2738 AM65_CPSW_MAX_QUEUES);
2739 if (!port->ndev) {
2740 dev_err(dev, "error allocating slave net_device %u\n",
2741 port->port_id);
2742 return -ENOMEM;
2743 }
2744
2745 ndev_priv = netdev_priv(port->ndev);
2746 ndev_priv->port = port;
2747 ndev_priv->msg_enable = AM65_CPSW_DEBUG;
2748 mutex_init(&ndev_priv->mm_lock);
2749 port->qos.link_speed = SPEED_UNKNOWN;
2750 SET_NETDEV_DEV(port->ndev, dev);
2751 port->ndev->dev.of_node = port->slave.port_np;
2752
2753 eth_hw_addr_set(port->ndev, port->slave.mac_addr);
2754
2755 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
2756 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
2757 (VLAN_ETH_HLEN + ETH_FCS_LEN);
2758 port->ndev->hw_features = NETIF_F_SG |
2759 NETIF_F_RXCSUM |
2760 NETIF_F_HW_CSUM |
2761 NETIF_F_HW_TC;
2762 port->ndev->features = port->ndev->hw_features |
2763 NETIF_F_HW_VLAN_CTAG_FILTER;
2764 port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
2765 NETDEV_XDP_ACT_REDIRECT |
2766 NETDEV_XDP_ACT_NDO_XMIT;
2767 port->ndev->vlan_features |= NETIF_F_SG;
2768 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
2769 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
2770
2771 /* Configuring Phylink */
2772 port->slave.phylink_config.dev = &port->ndev->dev;
2773 port->slave.phylink_config.type = PHYLINK_NETDEV;
2774 port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
2775 MAC_1000FD | MAC_5000FD;
2776 port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
2777
2778 switch (port->slave.phy_if) {
2779 case PHY_INTERFACE_MODE_RGMII:
2780 case PHY_INTERFACE_MODE_RGMII_ID:
2781 case PHY_INTERFACE_MODE_RGMII_RXID:
2782 case PHY_INTERFACE_MODE_RGMII_TXID:
2783 phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
2784 break;
2785
2786 case PHY_INTERFACE_MODE_RMII:
2787 __set_bit(PHY_INTERFACE_MODE_RMII,
2788 port->slave.phylink_config.supported_interfaces);
2789 break;
2790
2791 case PHY_INTERFACE_MODE_QSGMII:
2792 case PHY_INTERFACE_MODE_SGMII:
2793 case PHY_INTERFACE_MODE_USXGMII:
2794 if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
2795 __set_bit(port->slave.phy_if,
2796 port->slave.phylink_config.supported_interfaces);
2797 } else {
2798 dev_err(dev, "selected phy-mode is not supported\n");
2799 return -EOPNOTSUPP;
2800 }
2801 break;
2802
2803 default:
2804 dev_err(dev, "selected phy-mode is not supported\n");
2805 return -EOPNOTSUPP;
2806 }
2807
2808 phylink = phylink_create(&port->slave.phylink_config,
2809 of_fwnode_handle(port->slave.port_np),
2810 port->slave.phy_if,
2811 &am65_cpsw_phylink_mac_ops);
2812 if (IS_ERR(phylink))
2813 return PTR_ERR(phylink);
2814
2815 port->slave.phylink = phylink;
2816
2817 /* Disable TX checksum offload by default due to HW bug */
2818 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
2819 port->ndev->features &= ~NETIF_F_HW_CSUM;
2820
2821 ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
2822 if (!ndev_priv->stats)
2823 return -ENOMEM;
2824
2825 ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
2826 ndev_priv->stats);
2827 if (ret)
2828 dev_err(dev, "failed to add percpu stat free action %d\n", ret);
2829
2830 port->xdp_prog = NULL;
2831
2832 if (!common->dma_ndev)
2833 common->dma_ndev = port->ndev;
2834
2835 return ret;
2836 }
2837
am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common * common)2838 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
2839 {
2840 int ret;
2841 int i;
2842
2843 for (i = 0; i < common->port_num; i++) {
2844 ret = am65_cpsw_nuss_init_port_ndev(common, i);
2845 if (ret)
2846 return ret;
2847 }
2848
2849 return ret;
2850 }
2851
am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common * common)2852 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
2853 {
2854 struct am65_cpsw_port *port;
2855 int i;
2856
2857 for (i = 0; i < common->port_num; i++) {
2858 port = &common->ports[i];
2859 if (!port->ndev)
2860 continue;
2861 if (port->ndev->reg_state == NETREG_REGISTERED)
2862 unregister_netdev(port->ndev);
2863 free_netdev(port->ndev);
2864 port->ndev = NULL;
2865 }
2866 }
2867
am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common * common)2868 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
2869 {
2870 int set_val = 0;
2871 int i;
2872
2873 if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
2874 set_val = 1;
2875
2876 dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
2877
2878 for (i = 1; i <= common->port_num; i++) {
2879 struct am65_cpsw_port *port = am65_common_get_port(common, i);
2880 struct am65_cpsw_ndev_priv *priv;
2881
2882 if (!port->ndev)
2883 continue;
2884
2885 priv = am65_ndev_to_priv(port->ndev);
2886 priv->offload_fwd_mark = set_val;
2887 }
2888 }
2889
am65_cpsw_port_dev_check(const struct net_device * ndev)2890 bool am65_cpsw_port_dev_check(const struct net_device *ndev)
2891 {
2892 if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
2893 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2894
2895 return !common->is_emac_mode;
2896 }
2897
2898 return false;
2899 }
2900
am65_cpsw_netdevice_port_link(struct net_device * ndev,struct net_device * br_ndev,struct netlink_ext_ack * extack)2901 static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
2902 struct net_device *br_ndev,
2903 struct netlink_ext_ack *extack)
2904 {
2905 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2906 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2907 int err;
2908
2909 if (!common->br_members) {
2910 common->hw_bridge_dev = br_ndev;
2911 } else {
2912 /* This is adding the port to a second bridge, this is
2913 * unsupported
2914 */
2915 if (common->hw_bridge_dev != br_ndev)
2916 return -EOPNOTSUPP;
2917 }
2918
2919 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
2920 false, extack);
2921 if (err)
2922 return err;
2923
2924 common->br_members |= BIT(priv->port->port_id);
2925
2926 am65_cpsw_port_offload_fwd_mark_update(common);
2927
2928 return NOTIFY_DONE;
2929 }
2930
am65_cpsw_netdevice_port_unlink(struct net_device * ndev)2931 static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
2932 {
2933 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
2934 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
2935
2936 switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
2937
2938 common->br_members &= ~BIT(priv->port->port_id);
2939
2940 am65_cpsw_port_offload_fwd_mark_update(common);
2941
2942 if (!common->br_members)
2943 common->hw_bridge_dev = NULL;
2944 }
2945
2946 /* netdev notifier */
am65_cpsw_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)2947 static int am65_cpsw_netdevice_event(struct notifier_block *unused,
2948 unsigned long event, void *ptr)
2949 {
2950 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
2951 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2952 struct netdev_notifier_changeupper_info *info;
2953 int ret = NOTIFY_DONE;
2954
2955 if (!am65_cpsw_port_dev_check(ndev))
2956 return NOTIFY_DONE;
2957
2958 switch (event) {
2959 case NETDEV_CHANGEUPPER:
2960 info = ptr;
2961
2962 if (netif_is_bridge_master(info->upper_dev)) {
2963 if (info->linking)
2964 ret = am65_cpsw_netdevice_port_link(ndev,
2965 info->upper_dev,
2966 extack);
2967 else
2968 am65_cpsw_netdevice_port_unlink(ndev);
2969 }
2970 break;
2971 default:
2972 return NOTIFY_DONE;
2973 }
2974
2975 return notifier_from_errno(ret);
2976 }
2977
am65_cpsw_register_notifiers(struct am65_cpsw_common * cpsw)2978 static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
2979 {
2980 int ret = 0;
2981
2982 if (AM65_CPSW_IS_CPSW2G(cpsw) ||
2983 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
2984 return 0;
2985
2986 cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
2987 ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2988 if (ret) {
2989 dev_err(cpsw->dev, "can't register netdevice notifier\n");
2990 return ret;
2991 }
2992
2993 ret = am65_cpsw_switchdev_register_notifiers(cpsw);
2994 if (ret)
2995 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
2996
2997 return ret;
2998 }
2999
am65_cpsw_unregister_notifiers(struct am65_cpsw_common * cpsw)3000 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
3001 {
3002 if (AM65_CPSW_IS_CPSW2G(cpsw) ||
3003 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3004 return;
3005
3006 am65_cpsw_switchdev_unregister_notifiers(cpsw);
3007 unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
3008 }
3009
3010 static const struct devlink_ops am65_cpsw_devlink_ops = {};
3011
am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common * cpsw)3012 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
3013 {
3014 cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
3015 ALE_MCAST_BLOCK_LEARN_FWD);
3016 }
3017
am65_cpsw_init_host_port_switch(struct am65_cpsw_common * common)3018 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
3019 {
3020 struct am65_cpsw_host *host = am65_common_get_host(common);
3021
3022 writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3023
3024 am65_cpsw_init_stp_ale_entry(common);
3025
3026 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
3027 dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
3028 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
3029 }
3030
am65_cpsw_init_host_port_emac(struct am65_cpsw_common * common)3031 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
3032 {
3033 struct am65_cpsw_host *host = am65_common_get_host(common);
3034
3035 writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3036
3037 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
3038 dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
3039
3040 /* learning make no sense in multi-mac mode */
3041 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
3042 }
3043
am65_cpsw_dl_switch_mode_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx)3044 static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
3045 struct devlink_param_gset_ctx *ctx)
3046 {
3047 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
3048 struct am65_cpsw_common *common = dl_priv->common;
3049
3050 dev_dbg(common->dev, "%s id:%u\n", __func__, id);
3051
3052 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3053 return -EOPNOTSUPP;
3054
3055 ctx->val.vbool = !common->is_emac_mode;
3056
3057 return 0;
3058 }
3059
am65_cpsw_init_port_emac_ale(struct am65_cpsw_port * port)3060 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
3061 {
3062 struct am65_cpsw_slave_data *slave = &port->slave;
3063 struct am65_cpsw_common *common = port->common;
3064 u32 port_mask;
3065
3066 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3067
3068 if (slave->mac_only)
3069 /* enable mac-only mode on port */
3070 cpsw_ale_control_set(common->ale, port->port_id,
3071 ALE_PORT_MACONLY, 1);
3072
3073 cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
3074
3075 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3076
3077 cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
3078 HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
3079 cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
3080 port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
3081 }
3082
am65_cpsw_init_port_switch_ale(struct am65_cpsw_port * port)3083 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
3084 {
3085 struct am65_cpsw_slave_data *slave = &port->slave;
3086 struct am65_cpsw_common *cpsw = port->common;
3087 u32 port_mask;
3088
3089 cpsw_ale_control_set(cpsw->ale, port->port_id,
3090 ALE_PORT_NOLEARN, 0);
3091
3092 cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
3093 HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
3094 slave->port_vlan);
3095
3096 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
3097
3098 cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
3099 port_mask, ALE_VLAN, slave->port_vlan,
3100 ALE_MCAST_FWD_2);
3101
3102 writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3103
3104 cpsw_ale_control_set(cpsw->ale, port->port_id,
3105 ALE_PORT_MACONLY, 0);
3106 }
3107
am65_cpsw_dl_switch_mode_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)3108 static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
3109 struct devlink_param_gset_ctx *ctx,
3110 struct netlink_ext_ack *extack)
3111 {
3112 struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
3113 struct am65_cpsw_common *cpsw = dl_priv->common;
3114 bool switch_en = ctx->val.vbool;
3115 bool if_running = false;
3116 int i;
3117
3118 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
3119
3120 if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
3121 return -EOPNOTSUPP;
3122
3123 if (switch_en == !cpsw->is_emac_mode)
3124 return 0;
3125
3126 if (!switch_en && cpsw->br_members) {
3127 dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
3128 return -EINVAL;
3129 }
3130
3131 rtnl_lock();
3132
3133 cpsw->is_emac_mode = !switch_en;
3134
3135 for (i = 0; i < cpsw->port_num; i++) {
3136 struct net_device *sl_ndev = cpsw->ports[i].ndev;
3137
3138 if (!sl_ndev || !netif_running(sl_ndev))
3139 continue;
3140
3141 if_running = true;
3142 }
3143
3144 if (!if_running) {
3145 /* all ndevs are down */
3146 for (i = 0; i < cpsw->port_num; i++) {
3147 struct net_device *sl_ndev = cpsw->ports[i].ndev;
3148 struct am65_cpsw_slave_data *slave;
3149
3150 if (!sl_ndev)
3151 continue;
3152
3153 slave = am65_ndev_to_slave(sl_ndev);
3154 if (switch_en)
3155 slave->port_vlan = cpsw->default_vlan;
3156 else
3157 slave->port_vlan = 0;
3158 }
3159
3160 goto exit;
3161 }
3162
3163 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
3164 /* clean up ALE table */
3165 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
3166 cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
3167
3168 if (switch_en) {
3169 dev_info(cpsw->dev, "Enable switch mode\n");
3170
3171 am65_cpsw_init_host_port_switch(cpsw);
3172
3173 for (i = 0; i < cpsw->port_num; i++) {
3174 struct net_device *sl_ndev = cpsw->ports[i].ndev;
3175 struct am65_cpsw_slave_data *slave;
3176 struct am65_cpsw_port *port;
3177
3178 if (!sl_ndev)
3179 continue;
3180
3181 port = am65_ndev_to_port(sl_ndev);
3182 slave = am65_ndev_to_slave(sl_ndev);
3183 slave->port_vlan = cpsw->default_vlan;
3184
3185 if (netif_running(sl_ndev))
3186 am65_cpsw_init_port_switch_ale(port);
3187 }
3188
3189 } else {
3190 dev_info(cpsw->dev, "Disable switch mode\n");
3191
3192 am65_cpsw_init_host_port_emac(cpsw);
3193
3194 for (i = 0; i < cpsw->port_num; i++) {
3195 struct net_device *sl_ndev = cpsw->ports[i].ndev;
3196 struct am65_cpsw_port *port;
3197
3198 if (!sl_ndev)
3199 continue;
3200
3201 port = am65_ndev_to_port(sl_ndev);
3202 port->slave.port_vlan = 0;
3203 if (netif_running(sl_ndev))
3204 am65_cpsw_init_port_emac_ale(port);
3205 }
3206 }
3207 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
3208 exit:
3209 rtnl_unlock();
3210
3211 return 0;
3212 }
3213
3214 static const struct devlink_param am65_cpsw_devlink_params[] = {
3215 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
3216 DEVLINK_PARAM_TYPE_BOOL,
3217 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3218 am65_cpsw_dl_switch_mode_get,
3219 am65_cpsw_dl_switch_mode_set, NULL),
3220 };
3221
am65_cpsw_nuss_register_devlink(struct am65_cpsw_common * common)3222 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
3223 {
3224 struct devlink_port_attrs attrs = {};
3225 struct am65_cpsw_devlink *dl_priv;
3226 struct device *dev = common->dev;
3227 struct devlink_port *dl_port;
3228 struct am65_cpsw_port *port;
3229 int ret = 0;
3230 int i;
3231
3232 common->devlink =
3233 devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
3234 if (!common->devlink)
3235 return -ENOMEM;
3236
3237 dl_priv = devlink_priv(common->devlink);
3238 dl_priv->common = common;
3239
3240 /* Provide devlink hook to switch mode when multiple external ports
3241 * are present NUSS switchdev driver is enabled.
3242 */
3243 if (!AM65_CPSW_IS_CPSW2G(common) &&
3244 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
3245 ret = devlink_params_register(common->devlink,
3246 am65_cpsw_devlink_params,
3247 ARRAY_SIZE(am65_cpsw_devlink_params));
3248 if (ret) {
3249 dev_err(dev, "devlink params reg fail ret:%d\n", ret);
3250 goto dl_unreg;
3251 }
3252 }
3253
3254 for (i = 1; i <= common->port_num; i++) {
3255 port = am65_common_get_port(common, i);
3256 dl_port = &port->devlink_port;
3257
3258 if (port->ndev)
3259 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
3260 else
3261 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
3262 attrs.phys.port_number = port->port_id;
3263 attrs.switch_id.id_len = sizeof(resource_size_t);
3264 memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
3265 devlink_port_attrs_set(dl_port, &attrs);
3266
3267 ret = devlink_port_register(common->devlink, dl_port, port->port_id);
3268 if (ret) {
3269 dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
3270 port->port_id, ret);
3271 goto dl_port_unreg;
3272 }
3273 }
3274 devlink_register(common->devlink);
3275 return ret;
3276
3277 dl_port_unreg:
3278 for (i = i - 1; i >= 1; i--) {
3279 port = am65_common_get_port(common, i);
3280 dl_port = &port->devlink_port;
3281
3282 devlink_port_unregister(dl_port);
3283 }
3284 dl_unreg:
3285 devlink_free(common->devlink);
3286 return ret;
3287 }
3288
am65_cpsw_unregister_devlink(struct am65_cpsw_common * common)3289 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
3290 {
3291 struct devlink_port *dl_port;
3292 struct am65_cpsw_port *port;
3293 int i;
3294
3295 devlink_unregister(common->devlink);
3296
3297 for (i = 1; i <= common->port_num; i++) {
3298 port = am65_common_get_port(common, i);
3299 dl_port = &port->devlink_port;
3300
3301 devlink_port_unregister(dl_port);
3302 }
3303
3304 if (!AM65_CPSW_IS_CPSW2G(common) &&
3305 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
3306 devlink_params_unregister(common->devlink,
3307 am65_cpsw_devlink_params,
3308 ARRAY_SIZE(am65_cpsw_devlink_params));
3309
3310 devlink_free(common->devlink);
3311 }
3312
am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common * common)3313 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
3314 {
3315 struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
3316 struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
3317 struct device *dev = common->dev;
3318 struct am65_cpsw_port *port;
3319 int ret = 0, i;
3320
3321 /* init tx channels */
3322 ret = am65_cpsw_nuss_init_tx_chns(common);
3323 if (ret)
3324 return ret;
3325 ret = am65_cpsw_nuss_init_rx_chns(common);
3326 if (ret)
3327 return ret;
3328
3329 /* The DMA Channels are not guaranteed to be in a clean state.
3330 * Reset and disable them to ensure that they are back to the
3331 * clean state and ready to be used.
3332 */
3333 for (i = 0; i < common->tx_ch_num; i++) {
3334 k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
3335 am65_cpsw_nuss_tx_cleanup);
3336 k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
3337 }
3338
3339 for (i = 0; i < common->rx_ch_num_flows; i++)
3340 k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
3341 rx_chan,
3342 am65_cpsw_nuss_rx_cleanup, !!i);
3343
3344 k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
3345
3346 ret = am65_cpsw_nuss_register_devlink(common);
3347 if (ret)
3348 return ret;
3349
3350 for (i = 0; i < common->port_num; i++) {
3351 port = &common->ports[i];
3352
3353 if (!port->ndev)
3354 continue;
3355
3356 SET_NETDEV_DEVLINK_PORT(port->ndev, &port->devlink_port);
3357
3358 ret = register_netdev(port->ndev);
3359 if (ret) {
3360 dev_err(dev, "error registering slave net device%i %d\n",
3361 i, ret);
3362 goto err_cleanup_ndev;
3363 }
3364 }
3365
3366 ret = am65_cpsw_register_notifiers(common);
3367 if (ret)
3368 goto err_cleanup_ndev;
3369
3370 /* can't auto unregister ndev using devm_add_action() due to
3371 * devres release sequence in DD core for DMA
3372 */
3373
3374 return 0;
3375
3376 err_cleanup_ndev:
3377 am65_cpsw_nuss_cleanup_ndev(common);
3378 am65_cpsw_unregister_devlink(common);
3379
3380 return ret;
3381 }
3382
am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common * common,int num_tx,int num_rx)3383 int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
3384 int num_tx, int num_rx)
3385 {
3386 int ret;
3387
3388 am65_cpsw_nuss_remove_tx_chns(common);
3389 am65_cpsw_nuss_remove_rx_chns(common);
3390
3391 common->tx_ch_num = num_tx;
3392 common->rx_ch_num_flows = num_rx;
3393 ret = am65_cpsw_nuss_init_tx_chns(common);
3394 if (ret)
3395 return ret;
3396
3397 ret = am65_cpsw_nuss_init_rx_chns(common);
3398
3399 return ret;
3400 }
3401
3402 struct am65_cpsw_soc_pdata {
3403 u32 quirks_dis;
3404 };
3405
3406 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
3407 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3408 };
3409
3410 static const struct soc_device_attribute am65_cpsw_socinfo[] = {
3411 { .family = "AM65X",
3412 .revision = "SR2.0",
3413 .data = &am65x_soc_sr2_0
3414 },
3415 {/* sentinel */}
3416 };
3417
3418 static const struct am65_cpsw_pdata am65x_sr1_0 = {
3419 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
3420 .ale_dev_id = "am65x-cpsw2g",
3421 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3422 };
3423
3424 static const struct am65_cpsw_pdata j721e_pdata = {
3425 .quirks = 0,
3426 .ale_dev_id = "am65x-cpsw2g",
3427 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3428 };
3429
3430 static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
3431 .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
3432 .ale_dev_id = "am64-cpswxg",
3433 .fdqring_mode = K3_RINGACC_RING_MODE_RING,
3434 };
3435
3436 static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
3437 .quirks = 0,
3438 .ale_dev_id = "am64-cpswxg",
3439 .fdqring_mode = K3_RINGACC_RING_MODE_RING,
3440 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
3441 };
3442
3443 static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
3444 .quirks = 0,
3445 .ale_dev_id = "am64-cpswxg",
3446 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3447 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
3448 };
3449
3450 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
3451 .quirks = 0,
3452 .ale_dev_id = "am64-cpswxg",
3453 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
3454 .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
3455 BIT(PHY_INTERFACE_MODE_USXGMII),
3456 };
3457
3458 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
3459 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
3460 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
3461 { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
3462 { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
3463 { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
3464 { .compatible = "ti,j784s4-cpswxg-nuss", .data = &j784s4_cpswxg_pdata},
3465 { /* sentinel */ },
3466 };
3467 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
3468
am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common * common)3469 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
3470 {
3471 const struct soc_device_attribute *soc;
3472
3473 soc = soc_device_match(am65_cpsw_socinfo);
3474 if (soc && soc->data) {
3475 const struct am65_cpsw_soc_pdata *socdata = soc->data;
3476
3477 /* disable quirks */
3478 common->pdata.quirks &= ~socdata->quirks_dis;
3479 }
3480 }
3481
am65_cpsw_nuss_probe(struct platform_device * pdev)3482 static int am65_cpsw_nuss_probe(struct platform_device *pdev)
3483 {
3484 struct cpsw_ale_params ale_params = { 0 };
3485 const struct of_device_id *of_id;
3486 struct device *dev = &pdev->dev;
3487 struct am65_cpsw_common *common;
3488 struct device_node *node;
3489 struct resource *res;
3490 struct clk *clk;
3491 int ale_entries;
3492 u64 id_temp;
3493 int ret, i;
3494
3495 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
3496 if (!common)
3497 return -ENOMEM;
3498 common->dev = dev;
3499
3500 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
3501 if (!of_id)
3502 return -EINVAL;
3503 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
3504
3505 am65_cpsw_nuss_apply_socinfo(common);
3506
3507 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
3508 common->ss_base = devm_ioremap_resource(&pdev->dev, res);
3509 if (IS_ERR(common->ss_base))
3510 return PTR_ERR(common->ss_base);
3511 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
3512 /* Use device's physical base address as switch id */
3513 id_temp = cpu_to_be64(res->start);
3514 memcpy(common->switch_id, &id_temp, sizeof(res->start));
3515
3516 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
3517 if (!node)
3518 return -ENOENT;
3519 common->port_num = of_get_child_count(node);
3520 of_node_put(node);
3521 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
3522 return -ENOENT;
3523
3524 common->rx_flow_id_base = -1;
3525 init_completion(&common->tdown_complete);
3526 common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
3527 common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
3528 common->pf_p0_rx_ptype_rrobin = false;
3529 common->default_vlan = 1;
3530
3531 common->ports = devm_kcalloc(dev, common->port_num,
3532 sizeof(*common->ports),
3533 GFP_KERNEL);
3534 if (!common->ports)
3535 return -ENOMEM;
3536
3537 clk = devm_clk_get(dev, "fck");
3538 if (IS_ERR(clk))
3539 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
3540 common->bus_freq = clk_get_rate(clk);
3541
3542 pm_runtime_enable(dev);
3543 ret = pm_runtime_resume_and_get(dev);
3544 if (ret < 0) {
3545 pm_runtime_disable(dev);
3546 return ret;
3547 }
3548
3549 node = of_get_child_by_name(dev->of_node, "mdio");
3550 if (!node) {
3551 dev_warn(dev, "MDIO node not found\n");
3552 } else if (of_device_is_available(node)) {
3553 struct platform_device *mdio_pdev;
3554
3555 mdio_pdev = of_platform_device_create(node, NULL, dev);
3556 if (!mdio_pdev) {
3557 ret = -ENODEV;
3558 goto err_pm_clear;
3559 }
3560
3561 common->mdio_dev = &mdio_pdev->dev;
3562 }
3563 of_node_put(node);
3564
3565 am65_cpsw_nuss_get_ver(common);
3566
3567 ret = am65_cpsw_nuss_init_host_p(common);
3568 if (ret)
3569 goto err_of_clear;
3570
3571 ret = am65_cpsw_nuss_init_slave_ports(common);
3572 if (ret)
3573 goto err_of_clear;
3574
3575 /* init common data */
3576 ale_params.dev = dev;
3577 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
3578 ale_params.ale_ports = common->port_num + 1;
3579 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
3580 ale_params.dev_id = common->pdata.ale_dev_id;
3581 ale_params.bus_freq = common->bus_freq;
3582
3583 common->ale = cpsw_ale_create(&ale_params);
3584 if (IS_ERR(common->ale)) {
3585 dev_err(dev, "error initializing ale engine\n");
3586 ret = PTR_ERR(common->ale);
3587 goto err_of_clear;
3588 }
3589
3590 ale_entries = common->ale->params.ale_entries;
3591 common->ale_context = devm_kzalloc(dev,
3592 ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
3593 GFP_KERNEL);
3594 ret = am65_cpsw_init_cpts(common);
3595 if (ret)
3596 goto err_of_clear;
3597
3598 /* init ports */
3599 for (i = 0; i < common->port_num; i++)
3600 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
3601
3602 dev_set_drvdata(dev, common);
3603
3604 common->is_emac_mode = true;
3605
3606 ret = am65_cpsw_nuss_init_ndevs(common);
3607 if (ret)
3608 goto err_ndevs_clear;
3609
3610 ret = am65_cpsw_nuss_register_ndevs(common);
3611 if (ret)
3612 goto err_ndevs_clear;
3613
3614 pm_runtime_put(dev);
3615 return 0;
3616
3617 err_ndevs_clear:
3618 am65_cpsw_nuss_cleanup_ndev(common);
3619 am65_cpsw_nuss_phylink_cleanup(common);
3620 am65_cpts_release(common->cpts);
3621 err_of_clear:
3622 if (common->mdio_dev)
3623 of_platform_device_destroy(common->mdio_dev, NULL);
3624 err_pm_clear:
3625 pm_runtime_put_sync(dev);
3626 pm_runtime_disable(dev);
3627 return ret;
3628 }
3629
am65_cpsw_nuss_remove(struct platform_device * pdev)3630 static void am65_cpsw_nuss_remove(struct platform_device *pdev)
3631 {
3632 struct device *dev = &pdev->dev;
3633 struct am65_cpsw_common *common;
3634 int ret;
3635
3636 common = dev_get_drvdata(dev);
3637
3638 ret = pm_runtime_resume_and_get(&pdev->dev);
3639 if (ret < 0) {
3640 /* Note, if this error path is taken, we're leaking some
3641 * resources.
3642 */
3643 dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
3644 ERR_PTR(ret));
3645 return;
3646 }
3647
3648 am65_cpsw_unregister_notifiers(common);
3649
3650 /* must unregister ndevs here because DD release_driver routine calls
3651 * dma_deconfigure(dev) before devres_release_all(dev)
3652 */
3653 am65_cpsw_nuss_cleanup_ndev(common);
3654 am65_cpsw_unregister_devlink(common);
3655 am65_cpsw_nuss_phylink_cleanup(common);
3656 am65_cpts_release(common->cpts);
3657 am65_cpsw_disable_serdes_phy(common);
3658
3659 if (common->mdio_dev)
3660 of_platform_device_destroy(common->mdio_dev, NULL);
3661
3662 pm_runtime_put_sync(&pdev->dev);
3663 pm_runtime_disable(&pdev->dev);
3664 }
3665
am65_cpsw_nuss_suspend(struct device * dev)3666 static int am65_cpsw_nuss_suspend(struct device *dev)
3667 {
3668 struct am65_cpsw_common *common = dev_get_drvdata(dev);
3669 struct am65_cpsw_host *host_p = am65_common_get_host(common);
3670 struct am65_cpsw_port *port;
3671 struct net_device *ndev;
3672 int i, ret;
3673
3674 cpsw_ale_dump(common->ale, common->ale_context);
3675 host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3676 for (i = 0; i < common->port_num; i++) {
3677 port = &common->ports[i];
3678 ndev = port->ndev;
3679
3680 if (!ndev)
3681 continue;
3682
3683 port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3684 netif_device_detach(ndev);
3685 if (netif_running(ndev)) {
3686 rtnl_lock();
3687 ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
3688 rtnl_unlock();
3689 if (ret < 0) {
3690 netdev_err(ndev, "failed to stop: %d", ret);
3691 return ret;
3692 }
3693 }
3694 }
3695
3696 am65_cpts_suspend(common->cpts);
3697
3698 am65_cpsw_nuss_remove_rx_chns(common);
3699 am65_cpsw_nuss_remove_tx_chns(common);
3700
3701 return 0;
3702 }
3703
am65_cpsw_nuss_resume(struct device * dev)3704 static int am65_cpsw_nuss_resume(struct device *dev)
3705 {
3706 struct am65_cpsw_common *common = dev_get_drvdata(dev);
3707 struct am65_cpsw_host *host_p = am65_common_get_host(common);
3708 struct am65_cpsw_port *port;
3709 struct net_device *ndev;
3710 int i, ret;
3711
3712 ret = am65_cpsw_nuss_init_tx_chns(common);
3713 if (ret)
3714 return ret;
3715 ret = am65_cpsw_nuss_init_rx_chns(common);
3716 if (ret)
3717 return ret;
3718
3719 /* If RX IRQ was disabled before suspend, keep it disabled */
3720 for (i = 0; i < common->rx_ch_num_flows; i++) {
3721 if (common->rx_chns.flows[i].irq_disabled)
3722 disable_irq(common->rx_chns.flows[i].irq);
3723 }
3724
3725 am65_cpts_resume(common->cpts);
3726
3727 for (i = 0; i < common->port_num; i++) {
3728 port = &common->ports[i];
3729 ndev = port->ndev;
3730
3731 if (!ndev)
3732 continue;
3733
3734 if (netif_running(ndev)) {
3735 rtnl_lock();
3736 ret = am65_cpsw_nuss_ndo_slave_open(ndev);
3737 rtnl_unlock();
3738 if (ret < 0) {
3739 netdev_err(ndev, "failed to start: %d", ret);
3740 return ret;
3741 }
3742 }
3743
3744 netif_device_attach(ndev);
3745 writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3746 }
3747
3748 writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
3749 cpsw_ale_restore(common->ale, common->ale_context);
3750
3751 return 0;
3752 }
3753
3754 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
3755 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
3756 };
3757
3758 static struct platform_driver am65_cpsw_nuss_driver = {
3759 .driver = {
3760 .name = AM65_CPSW_DRV_NAME,
3761 .of_match_table = am65_cpsw_nuss_of_mtable,
3762 .pm = &am65_cpsw_nuss_dev_pm_ops,
3763 },
3764 .probe = am65_cpsw_nuss_probe,
3765 .remove_new = am65_cpsw_nuss_remove,
3766 };
3767
3768 module_platform_driver(am65_cpsw_nuss_driver);
3769
3770 MODULE_LICENSE("GPL v2");
3771 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
3772 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");
3773