1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Check if MAC core supports the EEE feature. */
475 if (!priv->dma_cap.eee)
476 return false;
477
478 mutex_lock(&priv->lock);
479
480 /* Check if it needs to be deactivated */
481 if (!priv->eee_active) {
482 if (priv->eee_enabled) {
483 netdev_dbg(priv->dev, "disable EEE\n");
484 stmmac_lpi_entry_timer_config(priv, 0);
485 del_timer_sync(&priv->eee_ctrl_timer);
486 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 if (priv->hw->xpcs)
488 xpcs_config_eee(priv->hw->xpcs,
489 priv->plat->mult_fact_100ns,
490 false);
491 }
492 mutex_unlock(&priv->lock);
493 return false;
494 }
495
496 if (priv->eee_active && !priv->eee_enabled) {
497 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 eee_tw_timer);
500 if (priv->hw->xpcs)
501 xpcs_config_eee(priv->hw->xpcs,
502 priv->plat->mult_fact_100ns,
503 true);
504 }
505
506 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 del_timer_sync(&priv->eee_ctrl_timer);
508 priv->tx_path_in_lpi_mode = false;
509 stmmac_lpi_entry_timer_config(priv, 1);
510 } else {
511 stmmac_lpi_entry_timer_config(priv, 0);
512 mod_timer(&priv->eee_ctrl_timer,
513 STMMAC_LPI_T(priv->tx_lpi_timer));
514 }
515
516 mutex_unlock(&priv->lock);
517 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 return true;
519 }
520
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522 * @priv: driver private structure
523 * @p : descriptor pointer
524 * @skb : the socket buffer
525 * Description :
526 * This function will read timestamp from the descriptor & pass it to stack.
527 * and also perform some sanity checks.
528 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 struct dma_desc *p, struct sk_buff *skb)
531 {
532 struct skb_shared_hwtstamps shhwtstamp;
533 bool found = false;
534 u64 ns = 0;
535
536 if (!priv->hwts_tx_en)
537 return;
538
539 /* exit if skb doesn't support hw tstamp */
540 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 return;
542
543 /* check tx tstamp status */
544 if (stmmac_get_tx_timestamp_status(priv, p)) {
545 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 found = true;
547 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 found = true;
549 }
550
551 if (found) {
552 ns -= priv->plat->cdc_error_adj;
553
554 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 shhwtstamp.hwtstamp = ns_to_ktime(ns);
556
557 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 /* pass tstamp to stack */
559 skb_tstamp_tx(skb, &shhwtstamp);
560 }
561 }
562
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564 * @priv: driver private structure
565 * @p : descriptor pointer
566 * @np : next descriptor pointer
567 * @skb : the socket buffer
568 * Description :
569 * This function will read received packet's timestamp from the descriptor
570 * and pass it to stack. It also perform some sanity checks.
571 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 struct dma_desc *np, struct sk_buff *skb)
574 {
575 struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 struct dma_desc *desc = p;
577 u64 ns = 0;
578
579 if (!priv->hwts_rx_en)
580 return;
581 /* For GMAC4, the valid timestamp is from CTX next desc. */
582 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 desc = np;
584
585 /* Check if timestamp is available */
586 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588
589 ns -= priv->plat->cdc_error_adj;
590
591 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 shhwtstamp = skb_hwtstamps(skb);
593 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 } else {
596 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 }
598 }
599
600 /**
601 * stmmac_hwtstamp_set - control hardware timestamping.
602 * @dev: device pointer.
603 * @ifr: An IOCTL specific structure, that can contain a pointer to
604 * a proprietary structure used to pass information to the driver.
605 * Description:
606 * This function configures the MAC to enable/disable both outgoing(TX)
607 * and incoming(RX) packets time stamping based on user input.
608 * Return Value:
609 * 0 on success and an appropriate -ve integer on failure.
610 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 struct stmmac_priv *priv = netdev_priv(dev);
614 struct hwtstamp_config config;
615 u32 ptp_v2 = 0;
616 u32 tstamp_all = 0;
617 u32 ptp_over_ipv4_udp = 0;
618 u32 ptp_over_ipv6_udp = 0;
619 u32 ptp_over_ethernet = 0;
620 u32 snap_type_sel = 0;
621 u32 ts_master_en = 0;
622 u32 ts_event_en = 0;
623
624 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 netdev_alert(priv->dev, "No support for HW time stamping\n");
626 priv->hwts_tx_en = 0;
627 priv->hwts_rx_en = 0;
628
629 return -EOPNOTSUPP;
630 }
631
632 if (copy_from_user(&config, ifr->ifr_data,
633 sizeof(config)))
634 return -EFAULT;
635
636 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 __func__, config.flags, config.tx_type, config.rx_filter);
638
639 if (config.tx_type != HWTSTAMP_TX_OFF &&
640 config.tx_type != HWTSTAMP_TX_ON)
641 return -ERANGE;
642
643 if (priv->adv_ts) {
644 switch (config.rx_filter) {
645 case HWTSTAMP_FILTER_NONE:
646 /* time stamp no incoming packet at all */
647 config.rx_filter = HWTSTAMP_FILTER_NONE;
648 break;
649
650 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 /* PTP v1, UDP, any kind of event packet */
652 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 /* 'xmac' hardware can support Sync, Pdelay_Req and
654 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 * This leaves Delay_Req timestamps out.
656 * Enable all events *and* general purpose message
657 * timestamping
658 */
659 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 break;
663
664 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 /* PTP v1, UDP, Sync packet */
666 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 /* take time stamp for SYNC messages only */
668 ts_event_en = PTP_TCR_TSEVNTENA;
669
670 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 break;
673
674 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 /* PTP v1, UDP, Delay_req packet */
676 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 /* take time stamp for Delay_Req messages only */
678 ts_master_en = PTP_TCR_TSMSTRENA;
679 ts_event_en = PTP_TCR_TSEVNTENA;
680
681 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 break;
684
685 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 /* PTP v2, UDP, any kind of event packet */
687 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 ptp_v2 = PTP_TCR_TSVER2ENA;
689 /* take time stamp for all event messages */
690 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691
692 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 break;
695
696 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 /* PTP v2, UDP, Sync packet */
698 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 ptp_v2 = PTP_TCR_TSVER2ENA;
700 /* take time stamp for SYNC messages only */
701 ts_event_en = PTP_TCR_TSEVNTENA;
702
703 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 break;
706
707 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 /* PTP v2, UDP, Delay_req packet */
709 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 ptp_v2 = PTP_TCR_TSVER2ENA;
711 /* take time stamp for Delay_Req messages only */
712 ts_master_en = PTP_TCR_TSMSTRENA;
713 ts_event_en = PTP_TCR_TSEVNTENA;
714
715 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 break;
718
719 case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 /* PTP v2/802.AS1 any layer, any kind of event packet */
721 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 ptp_v2 = PTP_TCR_TSVER2ENA;
723 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 if (priv->synopsys_id < DWMAC_CORE_4_10)
725 ts_event_en = PTP_TCR_TSEVNTENA;
726 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 ptp_over_ethernet = PTP_TCR_TSIPENA;
729 break;
730
731 case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 /* PTP v2/802.AS1, any layer, Sync packet */
733 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 ptp_v2 = PTP_TCR_TSVER2ENA;
735 /* take time stamp for SYNC messages only */
736 ts_event_en = PTP_TCR_TSEVNTENA;
737
738 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 ptp_over_ethernet = PTP_TCR_TSIPENA;
741 break;
742
743 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 /* PTP v2/802.AS1, any layer, Delay_req packet */
745 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 ptp_v2 = PTP_TCR_TSVER2ENA;
747 /* take time stamp for Delay_Req messages only */
748 ts_master_en = PTP_TCR_TSMSTRENA;
749 ts_event_en = PTP_TCR_TSEVNTENA;
750
751 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 ptp_over_ethernet = PTP_TCR_TSIPENA;
754 break;
755
756 case HWTSTAMP_FILTER_NTP_ALL:
757 case HWTSTAMP_FILTER_ALL:
758 /* time stamp any incoming packet */
759 config.rx_filter = HWTSTAMP_FILTER_ALL;
760 tstamp_all = PTP_TCR_TSENALL;
761 break;
762
763 default:
764 return -ERANGE;
765 }
766 } else {
767 switch (config.rx_filter) {
768 case HWTSTAMP_FILTER_NONE:
769 config.rx_filter = HWTSTAMP_FILTER_NONE;
770 break;
771 default:
772 /* PTP v1, UDP, any kind of event packet */
773 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 break;
775 }
776 }
777 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779
780 priv->systime_flags = STMMAC_HWTS_ACTIVE;
781
782 if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 priv->systime_flags |= tstamp_all | ptp_v2 |
784 ptp_over_ethernet | ptp_over_ipv6_udp |
785 ptp_over_ipv4_udp | ts_event_en |
786 ts_master_en | snap_type_sel;
787 }
788
789 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790
791 memcpy(&priv->tstamp_config, &config, sizeof(config));
792
793 return copy_to_user(ifr->ifr_data, &config,
794 sizeof(config)) ? -EFAULT : 0;
795 }
796
797 /**
798 * stmmac_hwtstamp_get - read hardware timestamping.
799 * @dev: device pointer.
800 * @ifr: An IOCTL specific structure, that can contain a pointer to
801 * a proprietary structure used to pass information to the driver.
802 * Description:
803 * This function obtain the current hardware timestamping settings
804 * as requested.
805 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 struct stmmac_priv *priv = netdev_priv(dev);
809 struct hwtstamp_config *config = &priv->tstamp_config;
810
811 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 return -EOPNOTSUPP;
813
814 return copy_to_user(ifr->ifr_data, config,
815 sizeof(*config)) ? -EFAULT : 0;
816 }
817
818 /**
819 * stmmac_init_tstamp_counter - init hardware timestamping counter
820 * @priv: driver private structure
821 * @systime_flags: timestamping flags
822 * Description:
823 * Initialize hardware counter for packet timestamping.
824 * This is valid as long as the interface is open and not suspended.
825 * Will be rerun after resuming from suspend, case in which the timestamping
826 * flags updated by stmmac_hwtstamp_set() also need to be restored.
827 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 struct timespec64 now;
832 u32 sec_inc = 0;
833 u64 temp = 0;
834
835 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 return -EOPNOTSUPP;
837
838 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 priv->systime_flags = systime_flags;
840
841 /* program Sub Second Increment reg */
842 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 priv->plat->clk_ptp_rate,
844 xmac, &sec_inc);
845 temp = div_u64(1000000000ULL, sec_inc);
846
847 /* Store sub second increment for later use */
848 priv->sub_second_inc = sec_inc;
849
850 /* calculate default added value:
851 * formula is :
852 * addend = (2^32)/freq_div_ratio;
853 * where, freq_div_ratio = 1e9ns/sec_inc
854 */
855 temp = (u64)(temp << 32);
856 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858
859 /* initialize system time */
860 ktime_get_real_ts64(&now);
861
862 /* lower 32 bits of tv_sec are safe until y2106 */
863 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864
865 return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868
869 /**
870 * stmmac_init_ptp - init PTP
871 * @priv: driver private structure
872 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873 * This is done by looking at the HW cap. register.
874 * This function also registers the ptp driver.
875 */
stmmac_init_ptp(struct stmmac_priv * priv)876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 int ret;
880
881 if (priv->plat->ptp_clk_freq_config)
882 priv->plat->ptp_clk_freq_config(priv);
883
884 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 if (ret)
886 return ret;
887
888 priv->adv_ts = 0;
889 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 if (xmac && priv->dma_cap.atime_stamp)
891 priv->adv_ts = 1;
892 /* Dwmac 3.x core with extend_desc can support adv_ts */
893 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 priv->adv_ts = 1;
895
896 if (priv->dma_cap.time_stamp)
897 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898
899 if (priv->adv_ts)
900 netdev_info(priv->dev,
901 "IEEE 1588-2008 Advanced Timestamp supported\n");
902
903 priv->hwts_tx_en = 0;
904 priv->hwts_rx_en = 0;
905
906 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 stmmac_hwtstamp_correct_latency(priv, priv);
908
909 return 0;
910 }
911
stmmac_release_ptp(struct stmmac_priv * priv)912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 stmmac_ptp_unregister(priv);
916 }
917
918 /**
919 * stmmac_mac_flow_ctrl - Configure flow control in all queues
920 * @priv: driver private structure
921 * @duplex: duplex passed to the next function
922 * Description: It is used for configuring the flow control in all queues
923 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 u32 tx_cnt = priv->plat->tx_queues_to_use;
927
928 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 priv->pause, tx_cnt);
930 }
931
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 phy_interface_t interface)
934 {
935 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936
937 /* Refresh the MAC-specific capabilities */
938 stmmac_mac_update_caps(priv);
939
940 config->mac_capabilities = priv->hw->link.caps;
941
942 if (priv->plat->max_speed)
943 phylink_limit_mac_speed(config, priv->plat->max_speed);
944
945 return config->mac_capabilities;
946 }
947
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 phy_interface_t interface)
950 {
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 struct phylink_pcs *pcs;
953
954 if (priv->plat->select_pcs) {
955 pcs = priv->plat->select_pcs(priv, interface);
956 if (!IS_ERR(pcs))
957 return pcs;
958 }
959
960 return NULL;
961 }
962
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)963 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
964 const struct phylink_link_state *state)
965 {
966 /* Nothing to do, xpcs_config() handles everything */
967 }
968
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)969 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
970 {
971 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
972 unsigned long flags;
973
974 timer_shutdown_sync(&fpe_cfg->verify_timer);
975
976 spin_lock_irqsave(&fpe_cfg->lock, flags);
977
978 if (is_up && fpe_cfg->pmac_enabled) {
979 /* VERIFY process requires pmac enabled when NIC comes up */
980 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
981 priv->plat->tx_queues_to_use,
982 priv->plat->rx_queues_to_use,
983 false, true);
984
985 /* New link => maybe new partner => new verification process */
986 stmmac_fpe_apply(priv);
987 } else {
988 /* No link => turn off EFPE */
989 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
990 priv->plat->tx_queues_to_use,
991 priv->plat->rx_queues_to_use,
992 false, false);
993 }
994
995 spin_unlock_irqrestore(&fpe_cfg->lock, flags);
996 }
997
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)998 static void stmmac_mac_link_down(struct phylink_config *config,
999 unsigned int mode, phy_interface_t interface)
1000 {
1001 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1002
1003 stmmac_mac_set(priv, priv->ioaddr, false);
1004 priv->eee_active = false;
1005 priv->tx_lpi_enabled = false;
1006 priv->eee_enabled = stmmac_eee_init(priv);
1007 stmmac_set_eee_pls(priv, priv->hw, false);
1008
1009 if (priv->dma_cap.fpesel)
1010 stmmac_fpe_link_state_handle(priv, false);
1011 }
1012
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1013 static void stmmac_mac_link_up(struct phylink_config *config,
1014 struct phy_device *phy,
1015 unsigned int mode, phy_interface_t interface,
1016 int speed, int duplex,
1017 bool tx_pause, bool rx_pause)
1018 {
1019 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 u32 old_ctrl, ctrl;
1021
1022 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1023 priv->plat->serdes_powerup)
1024 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1025
1026 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1027 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1028
1029 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1030 switch (speed) {
1031 case SPEED_10000:
1032 ctrl |= priv->hw->link.xgmii.speed10000;
1033 break;
1034 case SPEED_5000:
1035 ctrl |= priv->hw->link.xgmii.speed5000;
1036 break;
1037 case SPEED_2500:
1038 ctrl |= priv->hw->link.xgmii.speed2500;
1039 break;
1040 default:
1041 return;
1042 }
1043 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1044 switch (speed) {
1045 case SPEED_100000:
1046 ctrl |= priv->hw->link.xlgmii.speed100000;
1047 break;
1048 case SPEED_50000:
1049 ctrl |= priv->hw->link.xlgmii.speed50000;
1050 break;
1051 case SPEED_40000:
1052 ctrl |= priv->hw->link.xlgmii.speed40000;
1053 break;
1054 case SPEED_25000:
1055 ctrl |= priv->hw->link.xlgmii.speed25000;
1056 break;
1057 case SPEED_10000:
1058 ctrl |= priv->hw->link.xgmii.speed10000;
1059 break;
1060 case SPEED_2500:
1061 ctrl |= priv->hw->link.speed2500;
1062 break;
1063 case SPEED_1000:
1064 ctrl |= priv->hw->link.speed1000;
1065 break;
1066 default:
1067 return;
1068 }
1069 } else {
1070 switch (speed) {
1071 case SPEED_2500:
1072 ctrl |= priv->hw->link.speed2500;
1073 break;
1074 case SPEED_1000:
1075 ctrl |= priv->hw->link.speed1000;
1076 break;
1077 case SPEED_100:
1078 ctrl |= priv->hw->link.speed100;
1079 break;
1080 case SPEED_10:
1081 ctrl |= priv->hw->link.speed10;
1082 break;
1083 default:
1084 return;
1085 }
1086 }
1087
1088 priv->speed = speed;
1089
1090 if (priv->plat->fix_mac_speed)
1091 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1092
1093 if (!duplex)
1094 ctrl &= ~priv->hw->link.duplex;
1095 else
1096 ctrl |= priv->hw->link.duplex;
1097
1098 /* Flow Control operation */
1099 if (rx_pause && tx_pause)
1100 priv->flow_ctrl = FLOW_AUTO;
1101 else if (rx_pause && !tx_pause)
1102 priv->flow_ctrl = FLOW_RX;
1103 else if (!rx_pause && tx_pause)
1104 priv->flow_ctrl = FLOW_TX;
1105 else
1106 priv->flow_ctrl = FLOW_OFF;
1107
1108 stmmac_mac_flow_ctrl(priv, duplex);
1109
1110 if (ctrl != old_ctrl)
1111 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1112
1113 stmmac_mac_set(priv, priv->ioaddr, true);
1114 if (phy && priv->dma_cap.eee) {
1115 priv->eee_active =
1116 phy_init_eee(phy, !(priv->plat->flags &
1117 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1118 priv->eee_enabled = stmmac_eee_init(priv);
1119 priv->tx_lpi_enabled = priv->eee_enabled;
1120 stmmac_set_eee_pls(priv, priv->hw, true);
1121 }
1122
1123 if (priv->dma_cap.fpesel)
1124 stmmac_fpe_link_state_handle(priv, true);
1125
1126 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1127 stmmac_hwtstamp_correct_latency(priv, priv);
1128 }
1129
1130 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1131 .mac_get_caps = stmmac_mac_get_caps,
1132 .mac_select_pcs = stmmac_mac_select_pcs,
1133 .mac_config = stmmac_mac_config,
1134 .mac_link_down = stmmac_mac_link_down,
1135 .mac_link_up = stmmac_mac_link_up,
1136 };
1137
1138 /**
1139 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1140 * @priv: driver private structure
1141 * Description: this is to verify if the HW supports the PCS.
1142 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1143 * configured for the TBI, RTBI, or SGMII PHY interface.
1144 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1145 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1146 {
1147 int interface = priv->plat->mac_interface;
1148
1149 if (priv->dma_cap.pcs) {
1150 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1151 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1152 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1153 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1154 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1155 priv->hw->pcs = STMMAC_PCS_RGMII;
1156 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1157 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1158 priv->hw->pcs = STMMAC_PCS_SGMII;
1159 }
1160 }
1161 }
1162
1163 /**
1164 * stmmac_init_phy - PHY initialization
1165 * @dev: net device structure
1166 * Description: it initializes the driver's PHY state, and attaches the PHY
1167 * to the mac driver.
1168 * Return value:
1169 * 0 on success
1170 */
stmmac_init_phy(struct net_device * dev)1171 static int stmmac_init_phy(struct net_device *dev)
1172 {
1173 struct stmmac_priv *priv = netdev_priv(dev);
1174 struct fwnode_handle *phy_fwnode;
1175 struct fwnode_handle *fwnode;
1176 int ret;
1177
1178 if (!phylink_expects_phy(priv->phylink))
1179 return 0;
1180
1181 fwnode = priv->plat->port_node;
1182 if (!fwnode)
1183 fwnode = dev_fwnode(priv->device);
1184
1185 if (fwnode)
1186 phy_fwnode = fwnode_get_phy_node(fwnode);
1187 else
1188 phy_fwnode = NULL;
1189
1190 /* Some DT bindings do not set-up the PHY handle. Let's try to
1191 * manually parse it
1192 */
1193 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1194 int addr = priv->plat->phy_addr;
1195 struct phy_device *phydev;
1196
1197 if (addr < 0) {
1198 netdev_err(priv->dev, "no phy found\n");
1199 return -ENODEV;
1200 }
1201
1202 phydev = mdiobus_get_phy(priv->mii, addr);
1203 if (!phydev) {
1204 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1205 return -ENODEV;
1206 }
1207
1208 ret = phylink_connect_phy(priv->phylink, phydev);
1209 } else {
1210 fwnode_handle_put(phy_fwnode);
1211 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1212 }
1213
1214 if (!priv->plat->pmt) {
1215 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1216
1217 phylink_ethtool_get_wol(priv->phylink, &wol);
1218 device_set_wakeup_capable(priv->device, !!wol.supported);
1219 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1220 }
1221
1222 return ret;
1223 }
1224
stmmac_phy_setup(struct stmmac_priv * priv)1225 static int stmmac_phy_setup(struct stmmac_priv *priv)
1226 {
1227 struct stmmac_mdio_bus_data *mdio_bus_data;
1228 int mode = priv->plat->phy_interface;
1229 struct fwnode_handle *fwnode;
1230 struct phylink *phylink;
1231
1232 priv->phylink_config.dev = &priv->dev->dev;
1233 priv->phylink_config.type = PHYLINK_NETDEV;
1234 priv->phylink_config.mac_managed_pm = true;
1235
1236 /* Stmmac always requires an RX clock for hardware initialization */
1237 priv->phylink_config.mac_requires_rxc = true;
1238
1239 mdio_bus_data = priv->plat->mdio_bus_data;
1240 if (mdio_bus_data)
1241 priv->phylink_config.default_an_inband =
1242 mdio_bus_data->default_an_inband;
1243
1244 /* Set the platform/firmware specified interface mode. Note, phylink
1245 * deals with the PHY interface mode, not the MAC interface mode.
1246 */
1247 __set_bit(mode, priv->phylink_config.supported_interfaces);
1248
1249 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1250 if (priv->hw->xpcs)
1251 xpcs_get_interfaces(priv->hw->xpcs,
1252 priv->phylink_config.supported_interfaces);
1253
1254 fwnode = priv->plat->port_node;
1255 if (!fwnode)
1256 fwnode = dev_fwnode(priv->device);
1257
1258 phylink = phylink_create(&priv->phylink_config, fwnode,
1259 mode, &stmmac_phylink_mac_ops);
1260 if (IS_ERR(phylink))
1261 return PTR_ERR(phylink);
1262
1263 priv->phylink = phylink;
1264 return 0;
1265 }
1266
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1267 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1268 struct stmmac_dma_conf *dma_conf)
1269 {
1270 u32 rx_cnt = priv->plat->rx_queues_to_use;
1271 unsigned int desc_size;
1272 void *head_rx;
1273 u32 queue;
1274
1275 /* Display RX rings */
1276 for (queue = 0; queue < rx_cnt; queue++) {
1277 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1278
1279 pr_info("\tRX Queue %u rings\n", queue);
1280
1281 if (priv->extend_desc) {
1282 head_rx = (void *)rx_q->dma_erx;
1283 desc_size = sizeof(struct dma_extended_desc);
1284 } else {
1285 head_rx = (void *)rx_q->dma_rx;
1286 desc_size = sizeof(struct dma_desc);
1287 }
1288
1289 /* Display RX ring */
1290 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1291 rx_q->dma_rx_phy, desc_size);
1292 }
1293 }
1294
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1295 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1296 struct stmmac_dma_conf *dma_conf)
1297 {
1298 u32 tx_cnt = priv->plat->tx_queues_to_use;
1299 unsigned int desc_size;
1300 void *head_tx;
1301 u32 queue;
1302
1303 /* Display TX rings */
1304 for (queue = 0; queue < tx_cnt; queue++) {
1305 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1306
1307 pr_info("\tTX Queue %d rings\n", queue);
1308
1309 if (priv->extend_desc) {
1310 head_tx = (void *)tx_q->dma_etx;
1311 desc_size = sizeof(struct dma_extended_desc);
1312 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1313 head_tx = (void *)tx_q->dma_entx;
1314 desc_size = sizeof(struct dma_edesc);
1315 } else {
1316 head_tx = (void *)tx_q->dma_tx;
1317 desc_size = sizeof(struct dma_desc);
1318 }
1319
1320 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1321 tx_q->dma_tx_phy, desc_size);
1322 }
1323 }
1324
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1325 static void stmmac_display_rings(struct stmmac_priv *priv,
1326 struct stmmac_dma_conf *dma_conf)
1327 {
1328 /* Display RX ring */
1329 stmmac_display_rx_rings(priv, dma_conf);
1330
1331 /* Display TX ring */
1332 stmmac_display_tx_rings(priv, dma_conf);
1333 }
1334
stmmac_set_bfsize(int mtu,int bufsize)1335 static int stmmac_set_bfsize(int mtu, int bufsize)
1336 {
1337 int ret = bufsize;
1338
1339 if (mtu >= BUF_SIZE_8KiB)
1340 ret = BUF_SIZE_16KiB;
1341 else if (mtu >= BUF_SIZE_4KiB)
1342 ret = BUF_SIZE_8KiB;
1343 else if (mtu >= BUF_SIZE_2KiB)
1344 ret = BUF_SIZE_4KiB;
1345 else if (mtu > DEFAULT_BUFSIZE)
1346 ret = BUF_SIZE_2KiB;
1347 else
1348 ret = DEFAULT_BUFSIZE;
1349
1350 return ret;
1351 }
1352
1353 /**
1354 * stmmac_clear_rx_descriptors - clear RX descriptors
1355 * @priv: driver private structure
1356 * @dma_conf: structure to take the dma data
1357 * @queue: RX queue index
1358 * Description: this function is called to clear the RX descriptors
1359 * in case of both basic and extended descriptors are used.
1360 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1361 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1362 struct stmmac_dma_conf *dma_conf,
1363 u32 queue)
1364 {
1365 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1366 int i;
1367
1368 /* Clear the RX descriptors */
1369 for (i = 0; i < dma_conf->dma_rx_size; i++)
1370 if (priv->extend_desc)
1371 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1372 priv->use_riwt, priv->mode,
1373 (i == dma_conf->dma_rx_size - 1),
1374 dma_conf->dma_buf_sz);
1375 else
1376 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1377 priv->use_riwt, priv->mode,
1378 (i == dma_conf->dma_rx_size - 1),
1379 dma_conf->dma_buf_sz);
1380 }
1381
1382 /**
1383 * stmmac_clear_tx_descriptors - clear tx descriptors
1384 * @priv: driver private structure
1385 * @dma_conf: structure to take the dma data
1386 * @queue: TX queue index.
1387 * Description: this function is called to clear the TX descriptors
1388 * in case of both basic and extended descriptors are used.
1389 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1390 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1391 struct stmmac_dma_conf *dma_conf,
1392 u32 queue)
1393 {
1394 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1395 int i;
1396
1397 /* Clear the TX descriptors */
1398 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1399 int last = (i == (dma_conf->dma_tx_size - 1));
1400 struct dma_desc *p;
1401
1402 if (priv->extend_desc)
1403 p = &tx_q->dma_etx[i].basic;
1404 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1405 p = &tx_q->dma_entx[i].basic;
1406 else
1407 p = &tx_q->dma_tx[i];
1408
1409 stmmac_init_tx_desc(priv, p, priv->mode, last);
1410 }
1411 }
1412
1413 /**
1414 * stmmac_clear_descriptors - clear descriptors
1415 * @priv: driver private structure
1416 * @dma_conf: structure to take the dma data
1417 * Description: this function is called to clear the TX and RX descriptors
1418 * in case of both basic and extended descriptors are used.
1419 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1420 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1421 struct stmmac_dma_conf *dma_conf)
1422 {
1423 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1424 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1425 u32 queue;
1426
1427 /* Clear the RX descriptors */
1428 for (queue = 0; queue < rx_queue_cnt; queue++)
1429 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1430
1431 /* Clear the TX descriptors */
1432 for (queue = 0; queue < tx_queue_cnt; queue++)
1433 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1434 }
1435
1436 /**
1437 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1438 * @priv: driver private structure
1439 * @dma_conf: structure to take the dma data
1440 * @p: descriptor pointer
1441 * @i: descriptor index
1442 * @flags: gfp flag
1443 * @queue: RX queue index
1444 * Description: this function is called to allocate a receive buffer, perform
1445 * the DMA mapping and init the descriptor.
1446 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1447 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1448 struct stmmac_dma_conf *dma_conf,
1449 struct dma_desc *p,
1450 int i, gfp_t flags, u32 queue)
1451 {
1452 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1453 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1454 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1455
1456 if (priv->dma_cap.host_dma_width <= 32)
1457 gfp |= GFP_DMA32;
1458
1459 if (!buf->page) {
1460 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1461 if (!buf->page)
1462 return -ENOMEM;
1463 buf->page_offset = stmmac_rx_offset(priv);
1464 }
1465
1466 if (priv->sph && !buf->sec_page) {
1467 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 if (!buf->sec_page)
1469 return -ENOMEM;
1470
1471 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1472 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1473 } else {
1474 buf->sec_page = NULL;
1475 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1476 }
1477
1478 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1479
1480 stmmac_set_desc_addr(priv, p, buf->addr);
1481 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1482 stmmac_init_desc3(priv, p);
1483
1484 return 0;
1485 }
1486
1487 /**
1488 * stmmac_free_rx_buffer - free RX dma buffers
1489 * @priv: private structure
1490 * @rx_q: RX queue
1491 * @i: buffer index.
1492 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1493 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1494 struct stmmac_rx_queue *rx_q,
1495 int i)
1496 {
1497 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1498
1499 if (buf->page)
1500 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1501 buf->page = NULL;
1502
1503 if (buf->sec_page)
1504 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1505 buf->sec_page = NULL;
1506 }
1507
1508 /**
1509 * stmmac_free_tx_buffer - free RX dma buffers
1510 * @priv: private structure
1511 * @dma_conf: structure to take the dma data
1512 * @queue: RX queue index
1513 * @i: buffer index.
1514 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1515 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1516 struct stmmac_dma_conf *dma_conf,
1517 u32 queue, int i)
1518 {
1519 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1520
1521 if (tx_q->tx_skbuff_dma[i].buf &&
1522 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1523 if (tx_q->tx_skbuff_dma[i].map_as_page)
1524 dma_unmap_page(priv->device,
1525 tx_q->tx_skbuff_dma[i].buf,
1526 tx_q->tx_skbuff_dma[i].len,
1527 DMA_TO_DEVICE);
1528 else
1529 dma_unmap_single(priv->device,
1530 tx_q->tx_skbuff_dma[i].buf,
1531 tx_q->tx_skbuff_dma[i].len,
1532 DMA_TO_DEVICE);
1533 }
1534
1535 if (tx_q->xdpf[i] &&
1536 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1537 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1538 xdp_return_frame(tx_q->xdpf[i]);
1539 tx_q->xdpf[i] = NULL;
1540 }
1541
1542 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1543 tx_q->xsk_frames_done++;
1544
1545 if (tx_q->tx_skbuff[i] &&
1546 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1547 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1548 tx_q->tx_skbuff[i] = NULL;
1549 }
1550
1551 tx_q->tx_skbuff_dma[i].buf = 0;
1552 tx_q->tx_skbuff_dma[i].map_as_page = false;
1553 }
1554
1555 /**
1556 * dma_free_rx_skbufs - free RX dma buffers
1557 * @priv: private structure
1558 * @dma_conf: structure to take the dma data
1559 * @queue: RX queue index
1560 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1561 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1562 struct stmmac_dma_conf *dma_conf,
1563 u32 queue)
1564 {
1565 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 int i;
1567
1568 for (i = 0; i < dma_conf->dma_rx_size; i++)
1569 stmmac_free_rx_buffer(priv, rx_q, i);
1570 }
1571
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1572 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1573 struct stmmac_dma_conf *dma_conf,
1574 u32 queue, gfp_t flags)
1575 {
1576 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1577 int i;
1578
1579 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1580 struct dma_desc *p;
1581 int ret;
1582
1583 if (priv->extend_desc)
1584 p = &((rx_q->dma_erx + i)->basic);
1585 else
1586 p = rx_q->dma_rx + i;
1587
1588 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1589 queue);
1590 if (ret)
1591 return ret;
1592
1593 rx_q->buf_alloc_num++;
1594 }
1595
1596 return 0;
1597 }
1598
1599 /**
1600 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1601 * @priv: private structure
1602 * @dma_conf: structure to take the dma data
1603 * @queue: RX queue index
1604 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1605 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1606 struct stmmac_dma_conf *dma_conf,
1607 u32 queue)
1608 {
1609 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1610 int i;
1611
1612 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1613 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1614
1615 if (!buf->xdp)
1616 continue;
1617
1618 xsk_buff_free(buf->xdp);
1619 buf->xdp = NULL;
1620 }
1621 }
1622
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1623 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1624 struct stmmac_dma_conf *dma_conf,
1625 u32 queue)
1626 {
1627 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1628 int i;
1629
1630 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1631 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1632 * use this macro to make sure no size violations.
1633 */
1634 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1635
1636 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1637 struct stmmac_rx_buffer *buf;
1638 dma_addr_t dma_addr;
1639 struct dma_desc *p;
1640
1641 if (priv->extend_desc)
1642 p = (struct dma_desc *)(rx_q->dma_erx + i);
1643 else
1644 p = rx_q->dma_rx + i;
1645
1646 buf = &rx_q->buf_pool[i];
1647
1648 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1649 if (!buf->xdp)
1650 return -ENOMEM;
1651
1652 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1653 stmmac_set_desc_addr(priv, p, dma_addr);
1654 rx_q->buf_alloc_num++;
1655 }
1656
1657 return 0;
1658 }
1659
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1660 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1661 {
1662 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1663 return NULL;
1664
1665 return xsk_get_pool_from_qid(priv->dev, queue);
1666 }
1667
1668 /**
1669 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1670 * @priv: driver private structure
1671 * @dma_conf: structure to take the dma data
1672 * @queue: RX queue index
1673 * @flags: gfp flag.
1674 * Description: this function initializes the DMA RX descriptors
1675 * and allocates the socket buffers. It supports the chained and ring
1676 * modes.
1677 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1678 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1679 struct stmmac_dma_conf *dma_conf,
1680 u32 queue, gfp_t flags)
1681 {
1682 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1683 int ret;
1684
1685 netif_dbg(priv, probe, priv->dev,
1686 "(%s) dma_rx_phy=0x%08x\n", __func__,
1687 (u32)rx_q->dma_rx_phy);
1688
1689 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1690
1691 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1692
1693 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1694
1695 if (rx_q->xsk_pool) {
1696 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1697 MEM_TYPE_XSK_BUFF_POOL,
1698 NULL));
1699 netdev_info(priv->dev,
1700 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1701 rx_q->queue_index);
1702 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1703 } else {
1704 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1705 MEM_TYPE_PAGE_POOL,
1706 rx_q->page_pool));
1707 netdev_info(priv->dev,
1708 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1709 rx_q->queue_index);
1710 }
1711
1712 if (rx_q->xsk_pool) {
1713 /* RX XDP ZC buffer pool may not be populated, e.g.
1714 * xdpsock TX-only.
1715 */
1716 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1717 } else {
1718 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1719 if (ret < 0)
1720 return -ENOMEM;
1721 }
1722
1723 /* Setup the chained descriptor addresses */
1724 if (priv->mode == STMMAC_CHAIN_MODE) {
1725 if (priv->extend_desc)
1726 stmmac_mode_init(priv, rx_q->dma_erx,
1727 rx_q->dma_rx_phy,
1728 dma_conf->dma_rx_size, 1);
1729 else
1730 stmmac_mode_init(priv, rx_q->dma_rx,
1731 rx_q->dma_rx_phy,
1732 dma_conf->dma_rx_size, 0);
1733 }
1734
1735 return 0;
1736 }
1737
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1738 static int init_dma_rx_desc_rings(struct net_device *dev,
1739 struct stmmac_dma_conf *dma_conf,
1740 gfp_t flags)
1741 {
1742 struct stmmac_priv *priv = netdev_priv(dev);
1743 u32 rx_count = priv->plat->rx_queues_to_use;
1744 int queue;
1745 int ret;
1746
1747 /* RX INITIALIZATION */
1748 netif_dbg(priv, probe, priv->dev,
1749 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1750
1751 for (queue = 0; queue < rx_count; queue++) {
1752 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1753 if (ret)
1754 goto err_init_rx_buffers;
1755 }
1756
1757 return 0;
1758
1759 err_init_rx_buffers:
1760 while (queue >= 0) {
1761 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1762
1763 if (rx_q->xsk_pool)
1764 dma_free_rx_xskbufs(priv, dma_conf, queue);
1765 else
1766 dma_free_rx_skbufs(priv, dma_conf, queue);
1767
1768 rx_q->buf_alloc_num = 0;
1769 rx_q->xsk_pool = NULL;
1770
1771 queue--;
1772 }
1773
1774 return ret;
1775 }
1776
1777 /**
1778 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1779 * @priv: driver private structure
1780 * @dma_conf: structure to take the dma data
1781 * @queue: TX queue index
1782 * Description: this function initializes the DMA TX descriptors
1783 * and allocates the socket buffers. It supports the chained and ring
1784 * modes.
1785 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1786 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1787 struct stmmac_dma_conf *dma_conf,
1788 u32 queue)
1789 {
1790 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1791 int i;
1792
1793 netif_dbg(priv, probe, priv->dev,
1794 "(%s) dma_tx_phy=0x%08x\n", __func__,
1795 (u32)tx_q->dma_tx_phy);
1796
1797 /* Setup the chained descriptor addresses */
1798 if (priv->mode == STMMAC_CHAIN_MODE) {
1799 if (priv->extend_desc)
1800 stmmac_mode_init(priv, tx_q->dma_etx,
1801 tx_q->dma_tx_phy,
1802 dma_conf->dma_tx_size, 1);
1803 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1804 stmmac_mode_init(priv, tx_q->dma_tx,
1805 tx_q->dma_tx_phy,
1806 dma_conf->dma_tx_size, 0);
1807 }
1808
1809 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1810
1811 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1812 struct dma_desc *p;
1813
1814 if (priv->extend_desc)
1815 p = &((tx_q->dma_etx + i)->basic);
1816 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1817 p = &((tx_q->dma_entx + i)->basic);
1818 else
1819 p = tx_q->dma_tx + i;
1820
1821 stmmac_clear_desc(priv, p);
1822
1823 tx_q->tx_skbuff_dma[i].buf = 0;
1824 tx_q->tx_skbuff_dma[i].map_as_page = false;
1825 tx_q->tx_skbuff_dma[i].len = 0;
1826 tx_q->tx_skbuff_dma[i].last_segment = false;
1827 tx_q->tx_skbuff[i] = NULL;
1828 }
1829
1830 return 0;
1831 }
1832
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1833 static int init_dma_tx_desc_rings(struct net_device *dev,
1834 struct stmmac_dma_conf *dma_conf)
1835 {
1836 struct stmmac_priv *priv = netdev_priv(dev);
1837 u32 tx_queue_cnt;
1838 u32 queue;
1839
1840 tx_queue_cnt = priv->plat->tx_queues_to_use;
1841
1842 for (queue = 0; queue < tx_queue_cnt; queue++)
1843 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1844
1845 return 0;
1846 }
1847
1848 /**
1849 * init_dma_desc_rings - init the RX/TX descriptor rings
1850 * @dev: net device structure
1851 * @dma_conf: structure to take the dma data
1852 * @flags: gfp flag.
1853 * Description: this function initializes the DMA RX/TX descriptors
1854 * and allocates the socket buffers. It supports the chained and ring
1855 * modes.
1856 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1857 static int init_dma_desc_rings(struct net_device *dev,
1858 struct stmmac_dma_conf *dma_conf,
1859 gfp_t flags)
1860 {
1861 struct stmmac_priv *priv = netdev_priv(dev);
1862 int ret;
1863
1864 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1865 if (ret)
1866 return ret;
1867
1868 ret = init_dma_tx_desc_rings(dev, dma_conf);
1869
1870 stmmac_clear_descriptors(priv, dma_conf);
1871
1872 if (netif_msg_hw(priv))
1873 stmmac_display_rings(priv, dma_conf);
1874
1875 return ret;
1876 }
1877
1878 /**
1879 * dma_free_tx_skbufs - free TX dma buffers
1880 * @priv: private structure
1881 * @dma_conf: structure to take the dma data
1882 * @queue: TX queue index
1883 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1884 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1885 struct stmmac_dma_conf *dma_conf,
1886 u32 queue)
1887 {
1888 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1889 int i;
1890
1891 tx_q->xsk_frames_done = 0;
1892
1893 for (i = 0; i < dma_conf->dma_tx_size; i++)
1894 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1895
1896 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1897 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1898 tx_q->xsk_frames_done = 0;
1899 tx_q->xsk_pool = NULL;
1900 }
1901 }
1902
1903 /**
1904 * stmmac_free_tx_skbufs - free TX skb buffers
1905 * @priv: private structure
1906 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1907 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1908 {
1909 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1910 u32 queue;
1911
1912 for (queue = 0; queue < tx_queue_cnt; queue++)
1913 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1914 }
1915
1916 /**
1917 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1918 * @priv: private structure
1919 * @dma_conf: structure to take the dma data
1920 * @queue: RX queue index
1921 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1922 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1923 struct stmmac_dma_conf *dma_conf,
1924 u32 queue)
1925 {
1926 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1927
1928 /* Release the DMA RX socket buffers */
1929 if (rx_q->xsk_pool)
1930 dma_free_rx_xskbufs(priv, dma_conf, queue);
1931 else
1932 dma_free_rx_skbufs(priv, dma_conf, queue);
1933
1934 rx_q->buf_alloc_num = 0;
1935 rx_q->xsk_pool = NULL;
1936
1937 /* Free DMA regions of consistent memory previously allocated */
1938 if (!priv->extend_desc)
1939 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1940 sizeof(struct dma_desc),
1941 rx_q->dma_rx, rx_q->dma_rx_phy);
1942 else
1943 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1944 sizeof(struct dma_extended_desc),
1945 rx_q->dma_erx, rx_q->dma_rx_phy);
1946
1947 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1948 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1949
1950 kfree(rx_q->buf_pool);
1951 if (rx_q->page_pool)
1952 page_pool_destroy(rx_q->page_pool);
1953 }
1954
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1955 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1956 struct stmmac_dma_conf *dma_conf)
1957 {
1958 u32 rx_count = priv->plat->rx_queues_to_use;
1959 u32 queue;
1960
1961 /* Free RX queue resources */
1962 for (queue = 0; queue < rx_count; queue++)
1963 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1964 }
1965
1966 /**
1967 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1968 * @priv: private structure
1969 * @dma_conf: structure to take the dma data
1970 * @queue: TX queue index
1971 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1972 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1973 struct stmmac_dma_conf *dma_conf,
1974 u32 queue)
1975 {
1976 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1977 size_t size;
1978 void *addr;
1979
1980 /* Release the DMA TX socket buffers */
1981 dma_free_tx_skbufs(priv, dma_conf, queue);
1982
1983 if (priv->extend_desc) {
1984 size = sizeof(struct dma_extended_desc);
1985 addr = tx_q->dma_etx;
1986 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1987 size = sizeof(struct dma_edesc);
1988 addr = tx_q->dma_entx;
1989 } else {
1990 size = sizeof(struct dma_desc);
1991 addr = tx_q->dma_tx;
1992 }
1993
1994 size *= dma_conf->dma_tx_size;
1995
1996 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1997
1998 kfree(tx_q->tx_skbuff_dma);
1999 kfree(tx_q->tx_skbuff);
2000 }
2001
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2002 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2003 struct stmmac_dma_conf *dma_conf)
2004 {
2005 u32 tx_count = priv->plat->tx_queues_to_use;
2006 u32 queue;
2007
2008 /* Free TX queue resources */
2009 for (queue = 0; queue < tx_count; queue++)
2010 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2011 }
2012
2013 /**
2014 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2015 * @priv: private structure
2016 * @dma_conf: structure to take the dma data
2017 * @queue: RX queue index
2018 * Description: according to which descriptor can be used (extend or basic)
2019 * this function allocates the resources for TX and RX paths. In case of
2020 * reception, for example, it pre-allocated the RX socket buffer in order to
2021 * allow zero-copy mechanism.
2022 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2023 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2024 struct stmmac_dma_conf *dma_conf,
2025 u32 queue)
2026 {
2027 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2028 struct stmmac_channel *ch = &priv->channel[queue];
2029 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2030 struct page_pool_params pp_params = { 0 };
2031 unsigned int num_pages;
2032 unsigned int napi_id;
2033 int ret;
2034
2035 rx_q->queue_index = queue;
2036 rx_q->priv_data = priv;
2037
2038 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2039 pp_params.pool_size = dma_conf->dma_rx_size;
2040 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2041 pp_params.order = ilog2(num_pages);
2042 pp_params.nid = dev_to_node(priv->device);
2043 pp_params.dev = priv->device;
2044 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2045 pp_params.offset = stmmac_rx_offset(priv);
2046 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2047
2048 rx_q->page_pool = page_pool_create(&pp_params);
2049 if (IS_ERR(rx_q->page_pool)) {
2050 ret = PTR_ERR(rx_q->page_pool);
2051 rx_q->page_pool = NULL;
2052 return ret;
2053 }
2054
2055 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2056 sizeof(*rx_q->buf_pool),
2057 GFP_KERNEL);
2058 if (!rx_q->buf_pool)
2059 return -ENOMEM;
2060
2061 if (priv->extend_desc) {
2062 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2063 dma_conf->dma_rx_size *
2064 sizeof(struct dma_extended_desc),
2065 &rx_q->dma_rx_phy,
2066 GFP_KERNEL);
2067 if (!rx_q->dma_erx)
2068 return -ENOMEM;
2069
2070 } else {
2071 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2072 dma_conf->dma_rx_size *
2073 sizeof(struct dma_desc),
2074 &rx_q->dma_rx_phy,
2075 GFP_KERNEL);
2076 if (!rx_q->dma_rx)
2077 return -ENOMEM;
2078 }
2079
2080 if (stmmac_xdp_is_enabled(priv) &&
2081 test_bit(queue, priv->af_xdp_zc_qps))
2082 napi_id = ch->rxtx_napi.napi_id;
2083 else
2084 napi_id = ch->rx_napi.napi_id;
2085
2086 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2087 rx_q->queue_index,
2088 napi_id);
2089 if (ret) {
2090 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2091 return -EINVAL;
2092 }
2093
2094 return 0;
2095 }
2096
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2097 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2098 struct stmmac_dma_conf *dma_conf)
2099 {
2100 u32 rx_count = priv->plat->rx_queues_to_use;
2101 u32 queue;
2102 int ret;
2103
2104 /* RX queues buffers and DMA */
2105 for (queue = 0; queue < rx_count; queue++) {
2106 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2107 if (ret)
2108 goto err_dma;
2109 }
2110
2111 return 0;
2112
2113 err_dma:
2114 free_dma_rx_desc_resources(priv, dma_conf);
2115
2116 return ret;
2117 }
2118
2119 /**
2120 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2121 * @priv: private structure
2122 * @dma_conf: structure to take the dma data
2123 * @queue: TX queue index
2124 * Description: according to which descriptor can be used (extend or basic)
2125 * this function allocates the resources for TX and RX paths. In case of
2126 * reception, for example, it pre-allocated the RX socket buffer in order to
2127 * allow zero-copy mechanism.
2128 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2129 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2130 struct stmmac_dma_conf *dma_conf,
2131 u32 queue)
2132 {
2133 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2134 size_t size;
2135 void *addr;
2136
2137 tx_q->queue_index = queue;
2138 tx_q->priv_data = priv;
2139
2140 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2141 sizeof(*tx_q->tx_skbuff_dma),
2142 GFP_KERNEL);
2143 if (!tx_q->tx_skbuff_dma)
2144 return -ENOMEM;
2145
2146 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2147 sizeof(struct sk_buff *),
2148 GFP_KERNEL);
2149 if (!tx_q->tx_skbuff)
2150 return -ENOMEM;
2151
2152 if (priv->extend_desc)
2153 size = sizeof(struct dma_extended_desc);
2154 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2155 size = sizeof(struct dma_edesc);
2156 else
2157 size = sizeof(struct dma_desc);
2158
2159 size *= dma_conf->dma_tx_size;
2160
2161 addr = dma_alloc_coherent(priv->device, size,
2162 &tx_q->dma_tx_phy, GFP_KERNEL);
2163 if (!addr)
2164 return -ENOMEM;
2165
2166 if (priv->extend_desc)
2167 tx_q->dma_etx = addr;
2168 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2169 tx_q->dma_entx = addr;
2170 else
2171 tx_q->dma_tx = addr;
2172
2173 return 0;
2174 }
2175
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2176 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2177 struct stmmac_dma_conf *dma_conf)
2178 {
2179 u32 tx_count = priv->plat->tx_queues_to_use;
2180 u32 queue;
2181 int ret;
2182
2183 /* TX queues buffers and DMA */
2184 for (queue = 0; queue < tx_count; queue++) {
2185 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2186 if (ret)
2187 goto err_dma;
2188 }
2189
2190 return 0;
2191
2192 err_dma:
2193 free_dma_tx_desc_resources(priv, dma_conf);
2194 return ret;
2195 }
2196
2197 /**
2198 * alloc_dma_desc_resources - alloc TX/RX resources.
2199 * @priv: private structure
2200 * @dma_conf: structure to take the dma data
2201 * Description: according to which descriptor can be used (extend or basic)
2202 * this function allocates the resources for TX and RX paths. In case of
2203 * reception, for example, it pre-allocated the RX socket buffer in order to
2204 * allow zero-copy mechanism.
2205 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2206 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2207 struct stmmac_dma_conf *dma_conf)
2208 {
2209 /* RX Allocation */
2210 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2211
2212 if (ret)
2213 return ret;
2214
2215 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2216
2217 return ret;
2218 }
2219
2220 /**
2221 * free_dma_desc_resources - free dma desc resources
2222 * @priv: private structure
2223 * @dma_conf: structure to take the dma data
2224 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2225 static void free_dma_desc_resources(struct stmmac_priv *priv,
2226 struct stmmac_dma_conf *dma_conf)
2227 {
2228 /* Release the DMA TX socket buffers */
2229 free_dma_tx_desc_resources(priv, dma_conf);
2230
2231 /* Release the DMA RX socket buffers later
2232 * to ensure all pending XDP_TX buffers are returned.
2233 */
2234 free_dma_rx_desc_resources(priv, dma_conf);
2235 }
2236
2237 /**
2238 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2239 * @priv: driver private structure
2240 * Description: It is used for enabling the rx queues in the MAC
2241 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2242 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2243 {
2244 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2245 int queue;
2246 u8 mode;
2247
2248 for (queue = 0; queue < rx_queues_count; queue++) {
2249 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2250 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2251 }
2252 }
2253
2254 /**
2255 * stmmac_start_rx_dma - start RX DMA channel
2256 * @priv: driver private structure
2257 * @chan: RX channel index
2258 * Description:
2259 * This starts a RX DMA channel
2260 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2261 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2264 stmmac_start_rx(priv, priv->ioaddr, chan);
2265 }
2266
2267 /**
2268 * stmmac_start_tx_dma - start TX DMA channel
2269 * @priv: driver private structure
2270 * @chan: TX channel index
2271 * Description:
2272 * This starts a TX DMA channel
2273 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2274 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2277 stmmac_start_tx(priv, priv->ioaddr, chan);
2278 }
2279
2280 /**
2281 * stmmac_stop_rx_dma - stop RX DMA channel
2282 * @priv: driver private structure
2283 * @chan: RX channel index
2284 * Description:
2285 * This stops a RX DMA channel
2286 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2287 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2290 stmmac_stop_rx(priv, priv->ioaddr, chan);
2291 }
2292
2293 /**
2294 * stmmac_stop_tx_dma - stop TX DMA channel
2295 * @priv: driver private structure
2296 * @chan: TX channel index
2297 * Description:
2298 * This stops a TX DMA channel
2299 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2300 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2301 {
2302 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2303 stmmac_stop_tx(priv, priv->ioaddr, chan);
2304 }
2305
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2306 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2307 {
2308 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2309 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2310 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2311 u32 chan;
2312
2313 for (chan = 0; chan < dma_csr_ch; chan++) {
2314 struct stmmac_channel *ch = &priv->channel[chan];
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&ch->lock, flags);
2318 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2319 spin_unlock_irqrestore(&ch->lock, flags);
2320 }
2321 }
2322
2323 /**
2324 * stmmac_start_all_dma - start all RX and TX DMA channels
2325 * @priv: driver private structure
2326 * Description:
2327 * This starts all the RX and TX DMA channels
2328 */
stmmac_start_all_dma(struct stmmac_priv * priv)2329 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2330 {
2331 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2332 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2333 u32 chan = 0;
2334
2335 for (chan = 0; chan < rx_channels_count; chan++)
2336 stmmac_start_rx_dma(priv, chan);
2337
2338 for (chan = 0; chan < tx_channels_count; chan++)
2339 stmmac_start_tx_dma(priv, chan);
2340 }
2341
2342 /**
2343 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2344 * @priv: driver private structure
2345 * Description:
2346 * This stops the RX and TX DMA channels
2347 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2348 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2349 {
2350 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2351 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2352 u32 chan = 0;
2353
2354 for (chan = 0; chan < rx_channels_count; chan++)
2355 stmmac_stop_rx_dma(priv, chan);
2356
2357 for (chan = 0; chan < tx_channels_count; chan++)
2358 stmmac_stop_tx_dma(priv, chan);
2359 }
2360
2361 /**
2362 * stmmac_dma_operation_mode - HW DMA operation mode
2363 * @priv: driver private structure
2364 * Description: it is used for configuring the DMA operation mode register in
2365 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2366 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2367 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2368 {
2369 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2370 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2371 int rxfifosz = priv->plat->rx_fifo_size;
2372 int txfifosz = priv->plat->tx_fifo_size;
2373 u32 txmode = 0;
2374 u32 rxmode = 0;
2375 u32 chan = 0;
2376 u8 qmode = 0;
2377
2378 if (rxfifosz == 0)
2379 rxfifosz = priv->dma_cap.rx_fifo_size;
2380 if (txfifosz == 0)
2381 txfifosz = priv->dma_cap.tx_fifo_size;
2382
2383 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2384 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2385 rxfifosz /= rx_channels_count;
2386 txfifosz /= tx_channels_count;
2387 }
2388
2389 if (priv->plat->force_thresh_dma_mode) {
2390 txmode = tc;
2391 rxmode = tc;
2392 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2393 /*
2394 * In case of GMAC, SF mode can be enabled
2395 * to perform the TX COE in HW. This depends on:
2396 * 1) TX COE if actually supported
2397 * 2) There is no bugged Jumbo frame support
2398 * that needs to not insert csum in the TDES.
2399 */
2400 txmode = SF_DMA_MODE;
2401 rxmode = SF_DMA_MODE;
2402 priv->xstats.threshold = SF_DMA_MODE;
2403 } else {
2404 txmode = tc;
2405 rxmode = SF_DMA_MODE;
2406 }
2407
2408 /* configure all channels */
2409 for (chan = 0; chan < rx_channels_count; chan++) {
2410 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2411 u32 buf_size;
2412
2413 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2414
2415 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2416 rxfifosz, qmode);
2417
2418 if (rx_q->xsk_pool) {
2419 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2420 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 buf_size,
2422 chan);
2423 } else {
2424 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2425 priv->dma_conf.dma_buf_sz,
2426 chan);
2427 }
2428 }
2429
2430 for (chan = 0; chan < tx_channels_count; chan++) {
2431 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2432
2433 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2434 txfifosz, qmode);
2435 }
2436 }
2437
stmmac_xsk_request_timestamp(void * _priv)2438 static void stmmac_xsk_request_timestamp(void *_priv)
2439 {
2440 struct stmmac_metadata_request *meta_req = _priv;
2441
2442 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2443 *meta_req->set_ic = true;
2444 }
2445
stmmac_xsk_fill_timestamp(void * _priv)2446 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2447 {
2448 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2449 struct stmmac_priv *priv = tx_compl->priv;
2450 struct dma_desc *desc = tx_compl->desc;
2451 bool found = false;
2452 u64 ns = 0;
2453
2454 if (!priv->hwts_tx_en)
2455 return 0;
2456
2457 /* check tx tstamp status */
2458 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2459 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2460 found = true;
2461 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2462 found = true;
2463 }
2464
2465 if (found) {
2466 ns -= priv->plat->cdc_error_adj;
2467 return ns_to_ktime(ns);
2468 }
2469
2470 return 0;
2471 }
2472
2473 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2474 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2475 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2476 };
2477
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2478 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2479 {
2480 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2481 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2482 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2483 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2484 unsigned int entry = tx_q->cur_tx;
2485 struct dma_desc *tx_desc = NULL;
2486 struct xdp_desc xdp_desc;
2487 bool work_done = true;
2488 u32 tx_set_ic_bit = 0;
2489
2490 /* Avoids TX time-out as we are sharing with slow path */
2491 txq_trans_cond_update(nq);
2492
2493 budget = min(budget, stmmac_tx_avail(priv, queue));
2494
2495 while (budget-- > 0) {
2496 struct stmmac_metadata_request meta_req;
2497 struct xsk_tx_metadata *meta = NULL;
2498 dma_addr_t dma_addr;
2499 bool set_ic;
2500
2501 /* We are sharing with slow path and stop XSK TX desc submission when
2502 * available TX ring is less than threshold.
2503 */
2504 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2505 !netif_carrier_ok(priv->dev)) {
2506 work_done = false;
2507 break;
2508 }
2509
2510 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2511 break;
2512
2513 if (priv->est && priv->est->enable &&
2514 priv->est->max_sdu[queue] &&
2515 xdp_desc.len > priv->est->max_sdu[queue]) {
2516 priv->xstats.max_sdu_txq_drop[queue]++;
2517 continue;
2518 }
2519
2520 if (likely(priv->extend_desc))
2521 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2522 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2523 tx_desc = &tx_q->dma_entx[entry].basic;
2524 else
2525 tx_desc = tx_q->dma_tx + entry;
2526
2527 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2528 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2529 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2530
2531 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2532
2533 /* To return XDP buffer to XSK pool, we simple call
2534 * xsk_tx_completed(), so we don't need to fill up
2535 * 'buf' and 'xdpf'.
2536 */
2537 tx_q->tx_skbuff_dma[entry].buf = 0;
2538 tx_q->xdpf[entry] = NULL;
2539
2540 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2541 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2542 tx_q->tx_skbuff_dma[entry].last_segment = true;
2543 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2544
2545 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2546
2547 tx_q->tx_count_frames++;
2548
2549 if (!priv->tx_coal_frames[queue])
2550 set_ic = false;
2551 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2552 set_ic = true;
2553 else
2554 set_ic = false;
2555
2556 meta_req.priv = priv;
2557 meta_req.tx_desc = tx_desc;
2558 meta_req.set_ic = &set_ic;
2559 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2560 &meta_req);
2561 if (set_ic) {
2562 tx_q->tx_count_frames = 0;
2563 stmmac_set_tx_ic(priv, tx_desc);
2564 tx_set_ic_bit++;
2565 }
2566
2567 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2568 true, priv->mode, true, true,
2569 xdp_desc.len);
2570
2571 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2572
2573 xsk_tx_metadata_to_compl(meta,
2574 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2575
2576 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2577 entry = tx_q->cur_tx;
2578 }
2579 u64_stats_update_begin(&txq_stats->napi_syncp);
2580 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2581 u64_stats_update_end(&txq_stats->napi_syncp);
2582
2583 if (tx_desc) {
2584 stmmac_flush_tx_descriptors(priv, queue);
2585 xsk_tx_release(pool);
2586 }
2587
2588 /* Return true if all of the 3 conditions are met
2589 * a) TX Budget is still available
2590 * b) work_done = true when XSK TX desc peek is empty (no more
2591 * pending XSK TX for transmission)
2592 */
2593 return !!budget && work_done;
2594 }
2595
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2596 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2597 {
2598 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2599 tc += 64;
2600
2601 if (priv->plat->force_thresh_dma_mode)
2602 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2603 else
2604 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2605 chan);
2606
2607 priv->xstats.threshold = tc;
2608 }
2609 }
2610
2611 /**
2612 * stmmac_tx_clean - to manage the transmission completion
2613 * @priv: driver private structure
2614 * @budget: napi budget limiting this functions packet handling
2615 * @queue: TX queue index
2616 * @pending_packets: signal to arm the TX coal timer
2617 * Description: it reclaims the transmit resources after transmission completes.
2618 * If some packets still needs to be handled, due to TX coalesce, set
2619 * pending_packets to true to make NAPI arm the TX coal timer.
2620 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2621 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2622 bool *pending_packets)
2623 {
2624 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2625 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2626 unsigned int bytes_compl = 0, pkts_compl = 0;
2627 unsigned int entry, xmits = 0, count = 0;
2628 u32 tx_packets = 0, tx_errors = 0;
2629
2630 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2631
2632 tx_q->xsk_frames_done = 0;
2633
2634 entry = tx_q->dirty_tx;
2635
2636 /* Try to clean all TX complete frame in 1 shot */
2637 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2638 struct xdp_frame *xdpf;
2639 struct sk_buff *skb;
2640 struct dma_desc *p;
2641 int status;
2642
2643 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2644 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2645 xdpf = tx_q->xdpf[entry];
2646 skb = NULL;
2647 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2648 xdpf = NULL;
2649 skb = tx_q->tx_skbuff[entry];
2650 } else {
2651 xdpf = NULL;
2652 skb = NULL;
2653 }
2654
2655 if (priv->extend_desc)
2656 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2657 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2658 p = &tx_q->dma_entx[entry].basic;
2659 else
2660 p = tx_q->dma_tx + entry;
2661
2662 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2663 /* Check if the descriptor is owned by the DMA */
2664 if (unlikely(status & tx_dma_own))
2665 break;
2666
2667 count++;
2668
2669 /* Make sure descriptor fields are read after reading
2670 * the own bit.
2671 */
2672 dma_rmb();
2673
2674 /* Just consider the last segment and ...*/
2675 if (likely(!(status & tx_not_ls))) {
2676 /* ... verify the status error condition */
2677 if (unlikely(status & tx_err)) {
2678 tx_errors++;
2679 if (unlikely(status & tx_err_bump_tc))
2680 stmmac_bump_dma_threshold(priv, queue);
2681 } else {
2682 tx_packets++;
2683 }
2684 if (skb) {
2685 stmmac_get_tx_hwtstamp(priv, p, skb);
2686 } else if (tx_q->xsk_pool &&
2687 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2688 struct stmmac_xsk_tx_complete tx_compl = {
2689 .priv = priv,
2690 .desc = p,
2691 };
2692
2693 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2694 &stmmac_xsk_tx_metadata_ops,
2695 &tx_compl);
2696 }
2697 }
2698
2699 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2700 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2701 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2702 dma_unmap_page(priv->device,
2703 tx_q->tx_skbuff_dma[entry].buf,
2704 tx_q->tx_skbuff_dma[entry].len,
2705 DMA_TO_DEVICE);
2706 else
2707 dma_unmap_single(priv->device,
2708 tx_q->tx_skbuff_dma[entry].buf,
2709 tx_q->tx_skbuff_dma[entry].len,
2710 DMA_TO_DEVICE);
2711 tx_q->tx_skbuff_dma[entry].buf = 0;
2712 tx_q->tx_skbuff_dma[entry].len = 0;
2713 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2714 }
2715
2716 stmmac_clean_desc3(priv, tx_q, p);
2717
2718 tx_q->tx_skbuff_dma[entry].last_segment = false;
2719 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2720
2721 if (xdpf &&
2722 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2723 xdp_return_frame_rx_napi(xdpf);
2724 tx_q->xdpf[entry] = NULL;
2725 }
2726
2727 if (xdpf &&
2728 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2729 xdp_return_frame(xdpf);
2730 tx_q->xdpf[entry] = NULL;
2731 }
2732
2733 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2734 tx_q->xsk_frames_done++;
2735
2736 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2737 if (likely(skb)) {
2738 pkts_compl++;
2739 bytes_compl += skb->len;
2740 dev_consume_skb_any(skb);
2741 tx_q->tx_skbuff[entry] = NULL;
2742 }
2743 }
2744
2745 stmmac_release_tx_desc(priv, p, priv->mode);
2746
2747 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2748 }
2749 tx_q->dirty_tx = entry;
2750
2751 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2752 pkts_compl, bytes_compl);
2753
2754 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2755 queue))) &&
2756 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2757
2758 netif_dbg(priv, tx_done, priv->dev,
2759 "%s: restart transmit\n", __func__);
2760 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2761 }
2762
2763 if (tx_q->xsk_pool) {
2764 bool work_done;
2765
2766 if (tx_q->xsk_frames_done)
2767 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2768
2769 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2770 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2771
2772 /* For XSK TX, we try to send as many as possible.
2773 * If XSK work done (XSK TX desc empty and budget still
2774 * available), return "budget - 1" to reenable TX IRQ.
2775 * Else, return "budget" to make NAPI continue polling.
2776 */
2777 work_done = stmmac_xdp_xmit_zc(priv, queue,
2778 STMMAC_XSK_TX_BUDGET_MAX);
2779 if (work_done)
2780 xmits = budget - 1;
2781 else
2782 xmits = budget;
2783 }
2784
2785 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2786 priv->eee_sw_timer_en) {
2787 if (stmmac_enable_eee_mode(priv))
2788 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2789 }
2790
2791 /* We still have pending packets, let's call for a new scheduling */
2792 if (tx_q->dirty_tx != tx_q->cur_tx)
2793 *pending_packets = true;
2794
2795 u64_stats_update_begin(&txq_stats->napi_syncp);
2796 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2797 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2798 u64_stats_inc(&txq_stats->napi.tx_clean);
2799 u64_stats_update_end(&txq_stats->napi_syncp);
2800
2801 priv->xstats.tx_errors += tx_errors;
2802
2803 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2804
2805 /* Combine decisions from TX clean and XSK TX */
2806 return max(count, xmits);
2807 }
2808
2809 /**
2810 * stmmac_tx_err - to manage the tx error
2811 * @priv: driver private structure
2812 * @chan: channel index
2813 * Description: it cleans the descriptors and restarts the transmission
2814 * in case of transmission errors.
2815 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2816 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2817 {
2818 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2819
2820 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2821
2822 stmmac_stop_tx_dma(priv, chan);
2823 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2824 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2825 stmmac_reset_tx_queue(priv, chan);
2826 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2827 tx_q->dma_tx_phy, chan);
2828 stmmac_start_tx_dma(priv, chan);
2829
2830 priv->xstats.tx_errors++;
2831 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2832 }
2833
2834 /**
2835 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2836 * @priv: driver private structure
2837 * @txmode: TX operating mode
2838 * @rxmode: RX operating mode
2839 * @chan: channel index
2840 * Description: it is used for configuring of the DMA operation mode in
2841 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2842 * mode.
2843 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2844 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2845 u32 rxmode, u32 chan)
2846 {
2847 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2848 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2849 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2850 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2851 int rxfifosz = priv->plat->rx_fifo_size;
2852 int txfifosz = priv->plat->tx_fifo_size;
2853
2854 if (rxfifosz == 0)
2855 rxfifosz = priv->dma_cap.rx_fifo_size;
2856 if (txfifosz == 0)
2857 txfifosz = priv->dma_cap.tx_fifo_size;
2858
2859 /* Adjust for real per queue fifo size */
2860 rxfifosz /= rx_channels_count;
2861 txfifosz /= tx_channels_count;
2862
2863 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2864 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2865 }
2866
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2867 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2868 {
2869 int ret;
2870
2871 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2872 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2873 if (ret && (ret != -EINVAL)) {
2874 stmmac_global_err(priv);
2875 return true;
2876 }
2877
2878 return false;
2879 }
2880
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2881 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2882 {
2883 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2884 &priv->xstats, chan, dir);
2885 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2886 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2887 struct stmmac_channel *ch = &priv->channel[chan];
2888 struct napi_struct *rx_napi;
2889 struct napi_struct *tx_napi;
2890 unsigned long flags;
2891
2892 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2893 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2894
2895 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2896 if (napi_schedule_prep(rx_napi)) {
2897 spin_lock_irqsave(&ch->lock, flags);
2898 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2899 spin_unlock_irqrestore(&ch->lock, flags);
2900 __napi_schedule(rx_napi);
2901 }
2902 }
2903
2904 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2905 if (napi_schedule_prep(tx_napi)) {
2906 spin_lock_irqsave(&ch->lock, flags);
2907 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2908 spin_unlock_irqrestore(&ch->lock, flags);
2909 __napi_schedule(tx_napi);
2910 }
2911 }
2912
2913 return status;
2914 }
2915
2916 /**
2917 * stmmac_dma_interrupt - DMA ISR
2918 * @priv: driver private structure
2919 * Description: this is the DMA ISR. It is called by the main ISR.
2920 * It calls the dwmac dma routine and schedule poll method in case of some
2921 * work can be done.
2922 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2923 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2924 {
2925 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2926 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2927 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2928 tx_channel_count : rx_channel_count;
2929 u32 chan;
2930 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2931
2932 /* Make sure we never check beyond our status buffer. */
2933 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2934 channels_to_check = ARRAY_SIZE(status);
2935
2936 for (chan = 0; chan < channels_to_check; chan++)
2937 status[chan] = stmmac_napi_check(priv, chan,
2938 DMA_DIR_RXTX);
2939
2940 for (chan = 0; chan < tx_channel_count; chan++) {
2941 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2942 /* Try to bump up the dma threshold on this failure */
2943 stmmac_bump_dma_threshold(priv, chan);
2944 } else if (unlikely(status[chan] == tx_hard_error)) {
2945 stmmac_tx_err(priv, chan);
2946 }
2947 }
2948 }
2949
2950 /**
2951 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2952 * @priv: driver private structure
2953 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2954 */
stmmac_mmc_setup(struct stmmac_priv * priv)2955 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2956 {
2957 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2958 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2959
2960 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2961
2962 if (priv->dma_cap.rmon) {
2963 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2964 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2965 } else
2966 netdev_info(priv->dev, "No MAC Management Counters available\n");
2967 }
2968
2969 /**
2970 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2971 * @priv: driver private structure
2972 * Description:
2973 * new GMAC chip generations have a new register to indicate the
2974 * presence of the optional feature/functions.
2975 * This can be also used to override the value passed through the
2976 * platform and necessary for old MAC10/100 and GMAC chips.
2977 */
stmmac_get_hw_features(struct stmmac_priv * priv)2978 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2979 {
2980 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2981 }
2982
2983 /**
2984 * stmmac_check_ether_addr - check if the MAC addr is valid
2985 * @priv: driver private structure
2986 * Description:
2987 * it is to verify if the MAC address is valid, in case of failures it
2988 * generates a random MAC address
2989 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2990 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2991 {
2992 u8 addr[ETH_ALEN];
2993
2994 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2995 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2996 if (is_valid_ether_addr(addr))
2997 eth_hw_addr_set(priv->dev, addr);
2998 else
2999 eth_hw_addr_random(priv->dev);
3000 dev_info(priv->device, "device MAC address %pM\n",
3001 priv->dev->dev_addr);
3002 }
3003 }
3004
3005 /**
3006 * stmmac_init_dma_engine - DMA init.
3007 * @priv: driver private structure
3008 * Description:
3009 * It inits the DMA invoking the specific MAC/GMAC callback.
3010 * Some DMA parameters can be passed from the platform;
3011 * in case of these are not passed a default is kept for the MAC or GMAC.
3012 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3013 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3014 {
3015 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3016 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3017 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3018 struct stmmac_rx_queue *rx_q;
3019 struct stmmac_tx_queue *tx_q;
3020 u32 chan = 0;
3021 int ret = 0;
3022
3023 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3024 dev_err(priv->device, "Invalid DMA configuration\n");
3025 return -EINVAL;
3026 }
3027
3028 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3029 priv->plat->dma_cfg->atds = 1;
3030
3031 ret = stmmac_reset(priv, priv->ioaddr);
3032 if (ret) {
3033 dev_err(priv->device, "Failed to reset the dma\n");
3034 return ret;
3035 }
3036
3037 /* DMA Configuration */
3038 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3039
3040 if (priv->plat->axi)
3041 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3042
3043 /* DMA CSR Channel configuration */
3044 for (chan = 0; chan < dma_csr_ch; chan++) {
3045 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3046 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3047 }
3048
3049 /* DMA RX Channel Configuration */
3050 for (chan = 0; chan < rx_channels_count; chan++) {
3051 rx_q = &priv->dma_conf.rx_queue[chan];
3052
3053 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3054 rx_q->dma_rx_phy, chan);
3055
3056 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3057 (rx_q->buf_alloc_num *
3058 sizeof(struct dma_desc));
3059 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3060 rx_q->rx_tail_addr, chan);
3061 }
3062
3063 /* DMA TX Channel Configuration */
3064 for (chan = 0; chan < tx_channels_count; chan++) {
3065 tx_q = &priv->dma_conf.tx_queue[chan];
3066
3067 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3068 tx_q->dma_tx_phy, chan);
3069
3070 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3071 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3072 tx_q->tx_tail_addr, chan);
3073 }
3074
3075 return ret;
3076 }
3077
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3078 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3079 {
3080 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3081 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3082 struct stmmac_channel *ch;
3083 struct napi_struct *napi;
3084
3085 if (!tx_coal_timer)
3086 return;
3087
3088 ch = &priv->channel[tx_q->queue_index];
3089 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3090
3091 /* Arm timer only if napi is not already scheduled.
3092 * Try to cancel any timer if napi is scheduled, timer will be armed
3093 * again in the next scheduled napi.
3094 */
3095 if (unlikely(!napi_is_scheduled(napi)))
3096 hrtimer_start(&tx_q->txtimer,
3097 STMMAC_COAL_TIMER(tx_coal_timer),
3098 HRTIMER_MODE_REL);
3099 else
3100 hrtimer_try_to_cancel(&tx_q->txtimer);
3101 }
3102
3103 /**
3104 * stmmac_tx_timer - mitigation sw timer for tx.
3105 * @t: data pointer
3106 * Description:
3107 * This is the timer handler to directly invoke the stmmac_tx_clean.
3108 */
stmmac_tx_timer(struct hrtimer * t)3109 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3110 {
3111 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3112 struct stmmac_priv *priv = tx_q->priv_data;
3113 struct stmmac_channel *ch;
3114 struct napi_struct *napi;
3115
3116 ch = &priv->channel[tx_q->queue_index];
3117 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3118
3119 if (likely(napi_schedule_prep(napi))) {
3120 unsigned long flags;
3121
3122 spin_lock_irqsave(&ch->lock, flags);
3123 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3124 spin_unlock_irqrestore(&ch->lock, flags);
3125 __napi_schedule(napi);
3126 }
3127
3128 return HRTIMER_NORESTART;
3129 }
3130
3131 /**
3132 * stmmac_init_coalesce - init mitigation options.
3133 * @priv: driver private structure
3134 * Description:
3135 * This inits the coalesce parameters: i.e. timer rate,
3136 * timer handler and default threshold used for enabling the
3137 * interrupt on completion bit.
3138 */
stmmac_init_coalesce(struct stmmac_priv * priv)3139 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3140 {
3141 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3142 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3143 u32 chan;
3144
3145 for (chan = 0; chan < tx_channel_count; chan++) {
3146 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3147
3148 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3149 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3150
3151 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3152 tx_q->txtimer.function = stmmac_tx_timer;
3153 }
3154
3155 for (chan = 0; chan < rx_channel_count; chan++)
3156 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3157 }
3158
stmmac_set_rings_length(struct stmmac_priv * priv)3159 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3160 {
3161 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3162 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3163 u32 chan;
3164
3165 /* set TX ring length */
3166 for (chan = 0; chan < tx_channels_count; chan++)
3167 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3168 (priv->dma_conf.dma_tx_size - 1), chan);
3169
3170 /* set RX ring length */
3171 for (chan = 0; chan < rx_channels_count; chan++)
3172 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3173 (priv->dma_conf.dma_rx_size - 1), chan);
3174 }
3175
3176 /**
3177 * stmmac_set_tx_queue_weight - Set TX queue weight
3178 * @priv: driver private structure
3179 * Description: It is used for setting TX queues weight
3180 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3181 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3182 {
3183 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3184 u32 weight;
3185 u32 queue;
3186
3187 for (queue = 0; queue < tx_queues_count; queue++) {
3188 weight = priv->plat->tx_queues_cfg[queue].weight;
3189 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3190 }
3191 }
3192
3193 /**
3194 * stmmac_configure_cbs - Configure CBS in TX queue
3195 * @priv: driver private structure
3196 * Description: It is used for configuring CBS in AVB TX queues
3197 */
stmmac_configure_cbs(struct stmmac_priv * priv)3198 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3199 {
3200 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3201 u32 mode_to_use;
3202 u32 queue;
3203
3204 /* queue 0 is reserved for legacy traffic */
3205 for (queue = 1; queue < tx_queues_count; queue++) {
3206 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3207 if (mode_to_use == MTL_QUEUE_DCB)
3208 continue;
3209
3210 stmmac_config_cbs(priv, priv->hw,
3211 priv->plat->tx_queues_cfg[queue].send_slope,
3212 priv->plat->tx_queues_cfg[queue].idle_slope,
3213 priv->plat->tx_queues_cfg[queue].high_credit,
3214 priv->plat->tx_queues_cfg[queue].low_credit,
3215 queue);
3216 }
3217 }
3218
3219 /**
3220 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3221 * @priv: driver private structure
3222 * Description: It is used for mapping RX queues to RX dma channels
3223 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3224 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3225 {
3226 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3227 u32 queue;
3228 u32 chan;
3229
3230 for (queue = 0; queue < rx_queues_count; queue++) {
3231 chan = priv->plat->rx_queues_cfg[queue].chan;
3232 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3233 }
3234 }
3235
3236 /**
3237 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3238 * @priv: driver private structure
3239 * Description: It is used for configuring the RX Queue Priority
3240 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3241 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3242 {
3243 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3244 u32 queue;
3245 u32 prio;
3246
3247 for (queue = 0; queue < rx_queues_count; queue++) {
3248 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3249 continue;
3250
3251 prio = priv->plat->rx_queues_cfg[queue].prio;
3252 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3253 }
3254 }
3255
3256 /**
3257 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3258 * @priv: driver private structure
3259 * Description: It is used for configuring the TX Queue Priority
3260 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3261 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3262 {
3263 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3264 u32 queue;
3265 u32 prio;
3266
3267 for (queue = 0; queue < tx_queues_count; queue++) {
3268 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3269 continue;
3270
3271 prio = priv->plat->tx_queues_cfg[queue].prio;
3272 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3273 }
3274 }
3275
3276 /**
3277 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3278 * @priv: driver private structure
3279 * Description: It is used for configuring the RX queue routing
3280 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3281 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3282 {
3283 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3284 u32 queue;
3285 u8 packet;
3286
3287 for (queue = 0; queue < rx_queues_count; queue++) {
3288 /* no specific packet type routing specified for the queue */
3289 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3290 continue;
3291
3292 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3293 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3294 }
3295 }
3296
stmmac_mac_config_rss(struct stmmac_priv * priv)3297 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3298 {
3299 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3300 priv->rss.enable = false;
3301 return;
3302 }
3303
3304 if (priv->dev->features & NETIF_F_RXHASH)
3305 priv->rss.enable = true;
3306 else
3307 priv->rss.enable = false;
3308
3309 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3310 priv->plat->rx_queues_to_use);
3311 }
3312
3313 /**
3314 * stmmac_mtl_configuration - Configure MTL
3315 * @priv: driver private structure
3316 * Description: It is used for configurring MTL
3317 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3318 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3319 {
3320 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3321 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3322
3323 if (tx_queues_count > 1)
3324 stmmac_set_tx_queue_weight(priv);
3325
3326 /* Configure MTL RX algorithms */
3327 if (rx_queues_count > 1)
3328 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3329 priv->plat->rx_sched_algorithm);
3330
3331 /* Configure MTL TX algorithms */
3332 if (tx_queues_count > 1)
3333 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3334 priv->plat->tx_sched_algorithm);
3335
3336 /* Configure CBS in AVB TX queues */
3337 if (tx_queues_count > 1)
3338 stmmac_configure_cbs(priv);
3339
3340 /* Map RX MTL to DMA channels */
3341 stmmac_rx_queue_dma_chan_map(priv);
3342
3343 /* Enable MAC RX Queues */
3344 stmmac_mac_enable_rx_queues(priv);
3345
3346 /* Set RX priorities */
3347 if (rx_queues_count > 1)
3348 stmmac_mac_config_rx_queues_prio(priv);
3349
3350 /* Set TX priorities */
3351 if (tx_queues_count > 1)
3352 stmmac_mac_config_tx_queues_prio(priv);
3353
3354 /* Set RX routing */
3355 if (rx_queues_count > 1)
3356 stmmac_mac_config_rx_queues_routing(priv);
3357
3358 /* Receive Side Scaling */
3359 if (rx_queues_count > 1)
3360 stmmac_mac_config_rss(priv);
3361 }
3362
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3363 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3364 {
3365 if (priv->dma_cap.asp) {
3366 netdev_info(priv->dev, "Enabling Safety Features\n");
3367 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3368 priv->plat->safety_feat_cfg);
3369 } else {
3370 netdev_info(priv->dev, "No Safety Features support found\n");
3371 }
3372 }
3373
3374 /**
3375 * stmmac_hw_setup - setup mac in a usable state.
3376 * @dev : pointer to the device structure.
3377 * @ptp_register: register PTP if set
3378 * Description:
3379 * this is the main function to setup the HW in a usable state because the
3380 * dma engine is reset, the core registers are configured (e.g. AXI,
3381 * Checksum features, timers). The DMA is ready to start receiving and
3382 * transmitting.
3383 * Return value:
3384 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3385 * file on failure.
3386 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3387 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3388 {
3389 struct stmmac_priv *priv = netdev_priv(dev);
3390 u32 rx_cnt = priv->plat->rx_queues_to_use;
3391 u32 tx_cnt = priv->plat->tx_queues_to_use;
3392 bool sph_en;
3393 u32 chan;
3394 int ret;
3395
3396 /* Make sure RX clock is enabled */
3397 if (priv->hw->phylink_pcs)
3398 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3399
3400 /* DMA initialization and SW reset */
3401 ret = stmmac_init_dma_engine(priv);
3402 if (ret < 0) {
3403 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3404 __func__);
3405 return ret;
3406 }
3407
3408 /* Copy the MAC addr into the HW */
3409 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3410
3411 /* PS and related bits will be programmed according to the speed */
3412 if (priv->hw->pcs) {
3413 int speed = priv->plat->mac_port_sel_speed;
3414
3415 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3416 (speed == SPEED_1000)) {
3417 priv->hw->ps = speed;
3418 } else {
3419 dev_warn(priv->device, "invalid port speed\n");
3420 priv->hw->ps = 0;
3421 }
3422 }
3423
3424 /* Initialize the MAC Core */
3425 stmmac_core_init(priv, priv->hw, dev);
3426
3427 /* Initialize MTL*/
3428 stmmac_mtl_configuration(priv);
3429
3430 /* Initialize Safety Features */
3431 stmmac_safety_feat_configuration(priv);
3432
3433 ret = stmmac_rx_ipc(priv, priv->hw);
3434 if (!ret) {
3435 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3436 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3437 priv->hw->rx_csum = 0;
3438 }
3439
3440 /* Enable the MAC Rx/Tx */
3441 stmmac_mac_set(priv, priv->ioaddr, true);
3442
3443 /* Set the HW DMA mode and the COE */
3444 stmmac_dma_operation_mode(priv);
3445
3446 stmmac_mmc_setup(priv);
3447
3448 if (ptp_register) {
3449 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3450 if (ret < 0)
3451 netdev_warn(priv->dev,
3452 "failed to enable PTP reference clock: %pe\n",
3453 ERR_PTR(ret));
3454 }
3455
3456 ret = stmmac_init_ptp(priv);
3457 if (ret == -EOPNOTSUPP)
3458 netdev_info(priv->dev, "PTP not supported by HW\n");
3459 else if (ret)
3460 netdev_warn(priv->dev, "PTP init failed\n");
3461 else if (ptp_register)
3462 stmmac_ptp_register(priv);
3463
3464 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3465
3466 /* Convert the timer from msec to usec */
3467 if (!priv->tx_lpi_timer)
3468 priv->tx_lpi_timer = eee_timer * 1000;
3469
3470 if (priv->use_riwt) {
3471 u32 queue;
3472
3473 for (queue = 0; queue < rx_cnt; queue++) {
3474 if (!priv->rx_riwt[queue])
3475 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3476
3477 stmmac_rx_watchdog(priv, priv->ioaddr,
3478 priv->rx_riwt[queue], queue);
3479 }
3480 }
3481
3482 if (priv->hw->pcs)
3483 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3484
3485 /* set TX and RX rings length */
3486 stmmac_set_rings_length(priv);
3487
3488 /* Enable TSO */
3489 if (priv->tso) {
3490 for (chan = 0; chan < tx_cnt; chan++) {
3491 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3492
3493 /* TSO and TBS cannot co-exist */
3494 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3495 continue;
3496
3497 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3498 }
3499 }
3500
3501 /* Enable Split Header */
3502 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3503 for (chan = 0; chan < rx_cnt; chan++)
3504 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3505
3506
3507 /* VLAN Tag Insertion */
3508 if (priv->dma_cap.vlins)
3509 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3510
3511 /* TBS */
3512 for (chan = 0; chan < tx_cnt; chan++) {
3513 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3514 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3515
3516 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3517 }
3518
3519 /* Configure real RX and TX queues */
3520 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3521 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3522
3523 /* Start the ball rolling... */
3524 stmmac_start_all_dma(priv);
3525
3526 stmmac_set_hw_vlan_mode(priv, priv->hw);
3527
3528 return 0;
3529 }
3530
stmmac_hw_teardown(struct net_device * dev)3531 static void stmmac_hw_teardown(struct net_device *dev)
3532 {
3533 struct stmmac_priv *priv = netdev_priv(dev);
3534
3535 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3536 }
3537
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3538 static void stmmac_free_irq(struct net_device *dev,
3539 enum request_irq_err irq_err, int irq_idx)
3540 {
3541 struct stmmac_priv *priv = netdev_priv(dev);
3542 int j;
3543
3544 switch (irq_err) {
3545 case REQ_IRQ_ERR_ALL:
3546 irq_idx = priv->plat->tx_queues_to_use;
3547 fallthrough;
3548 case REQ_IRQ_ERR_TX:
3549 for (j = irq_idx - 1; j >= 0; j--) {
3550 if (priv->tx_irq[j] > 0) {
3551 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3552 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3553 }
3554 }
3555 irq_idx = priv->plat->rx_queues_to_use;
3556 fallthrough;
3557 case REQ_IRQ_ERR_RX:
3558 for (j = irq_idx - 1; j >= 0; j--) {
3559 if (priv->rx_irq[j] > 0) {
3560 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3561 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3562 }
3563 }
3564
3565 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3566 free_irq(priv->sfty_ue_irq, dev);
3567 fallthrough;
3568 case REQ_IRQ_ERR_SFTY_UE:
3569 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3570 free_irq(priv->sfty_ce_irq, dev);
3571 fallthrough;
3572 case REQ_IRQ_ERR_SFTY_CE:
3573 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3574 free_irq(priv->lpi_irq, dev);
3575 fallthrough;
3576 case REQ_IRQ_ERR_LPI:
3577 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3578 free_irq(priv->wol_irq, dev);
3579 fallthrough;
3580 case REQ_IRQ_ERR_SFTY:
3581 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3582 free_irq(priv->sfty_irq, dev);
3583 fallthrough;
3584 case REQ_IRQ_ERR_WOL:
3585 free_irq(dev->irq, dev);
3586 fallthrough;
3587 case REQ_IRQ_ERR_MAC:
3588 case REQ_IRQ_ERR_NO:
3589 /* If MAC IRQ request error, no more IRQ to free */
3590 break;
3591 }
3592 }
3593
stmmac_request_irq_multi_msi(struct net_device * dev)3594 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3595 {
3596 struct stmmac_priv *priv = netdev_priv(dev);
3597 enum request_irq_err irq_err;
3598 cpumask_t cpu_mask;
3599 int irq_idx = 0;
3600 char *int_name;
3601 int ret;
3602 int i;
3603
3604 /* For common interrupt */
3605 int_name = priv->int_name_mac;
3606 sprintf(int_name, "%s:%s", dev->name, "mac");
3607 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3608 0, int_name, dev);
3609 if (unlikely(ret < 0)) {
3610 netdev_err(priv->dev,
3611 "%s: alloc mac MSI %d (error: %d)\n",
3612 __func__, dev->irq, ret);
3613 irq_err = REQ_IRQ_ERR_MAC;
3614 goto irq_error;
3615 }
3616
3617 /* Request the Wake IRQ in case of another line
3618 * is used for WoL
3619 */
3620 priv->wol_irq_disabled = true;
3621 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3622 int_name = priv->int_name_wol;
3623 sprintf(int_name, "%s:%s", dev->name, "wol");
3624 ret = request_irq(priv->wol_irq,
3625 stmmac_mac_interrupt,
3626 0, int_name, dev);
3627 if (unlikely(ret < 0)) {
3628 netdev_err(priv->dev,
3629 "%s: alloc wol MSI %d (error: %d)\n",
3630 __func__, priv->wol_irq, ret);
3631 irq_err = REQ_IRQ_ERR_WOL;
3632 goto irq_error;
3633 }
3634 }
3635
3636 /* Request the LPI IRQ in case of another line
3637 * is used for LPI
3638 */
3639 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3640 int_name = priv->int_name_lpi;
3641 sprintf(int_name, "%s:%s", dev->name, "lpi");
3642 ret = request_irq(priv->lpi_irq,
3643 stmmac_mac_interrupt,
3644 0, int_name, dev);
3645 if (unlikely(ret < 0)) {
3646 netdev_err(priv->dev,
3647 "%s: alloc lpi MSI %d (error: %d)\n",
3648 __func__, priv->lpi_irq, ret);
3649 irq_err = REQ_IRQ_ERR_LPI;
3650 goto irq_error;
3651 }
3652 }
3653
3654 /* Request the common Safety Feature Correctible/Uncorrectible
3655 * Error line in case of another line is used
3656 */
3657 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3658 int_name = priv->int_name_sfty;
3659 sprintf(int_name, "%s:%s", dev->name, "safety");
3660 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3661 0, int_name, dev);
3662 if (unlikely(ret < 0)) {
3663 netdev_err(priv->dev,
3664 "%s: alloc sfty MSI %d (error: %d)\n",
3665 __func__, priv->sfty_irq, ret);
3666 irq_err = REQ_IRQ_ERR_SFTY;
3667 goto irq_error;
3668 }
3669 }
3670
3671 /* Request the Safety Feature Correctible Error line in
3672 * case of another line is used
3673 */
3674 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3675 int_name = priv->int_name_sfty_ce;
3676 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3677 ret = request_irq(priv->sfty_ce_irq,
3678 stmmac_safety_interrupt,
3679 0, int_name, dev);
3680 if (unlikely(ret < 0)) {
3681 netdev_err(priv->dev,
3682 "%s: alloc sfty ce MSI %d (error: %d)\n",
3683 __func__, priv->sfty_ce_irq, ret);
3684 irq_err = REQ_IRQ_ERR_SFTY_CE;
3685 goto irq_error;
3686 }
3687 }
3688
3689 /* Request the Safety Feature Uncorrectible Error line in
3690 * case of another line is used
3691 */
3692 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3693 int_name = priv->int_name_sfty_ue;
3694 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3695 ret = request_irq(priv->sfty_ue_irq,
3696 stmmac_safety_interrupt,
3697 0, int_name, dev);
3698 if (unlikely(ret < 0)) {
3699 netdev_err(priv->dev,
3700 "%s: alloc sfty ue MSI %d (error: %d)\n",
3701 __func__, priv->sfty_ue_irq, ret);
3702 irq_err = REQ_IRQ_ERR_SFTY_UE;
3703 goto irq_error;
3704 }
3705 }
3706
3707 /* Request Rx MSI irq */
3708 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3709 if (i >= MTL_MAX_RX_QUEUES)
3710 break;
3711 if (priv->rx_irq[i] == 0)
3712 continue;
3713
3714 int_name = priv->int_name_rx_irq[i];
3715 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3716 ret = request_irq(priv->rx_irq[i],
3717 stmmac_msi_intr_rx,
3718 0, int_name, &priv->dma_conf.rx_queue[i]);
3719 if (unlikely(ret < 0)) {
3720 netdev_err(priv->dev,
3721 "%s: alloc rx-%d MSI %d (error: %d)\n",
3722 __func__, i, priv->rx_irq[i], ret);
3723 irq_err = REQ_IRQ_ERR_RX;
3724 irq_idx = i;
3725 goto irq_error;
3726 }
3727 cpumask_clear(&cpu_mask);
3728 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3730 }
3731
3732 /* Request Tx MSI irq */
3733 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3734 if (i >= MTL_MAX_TX_QUEUES)
3735 break;
3736 if (priv->tx_irq[i] == 0)
3737 continue;
3738
3739 int_name = priv->int_name_tx_irq[i];
3740 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3741 ret = request_irq(priv->tx_irq[i],
3742 stmmac_msi_intr_tx,
3743 0, int_name, &priv->dma_conf.tx_queue[i]);
3744 if (unlikely(ret < 0)) {
3745 netdev_err(priv->dev,
3746 "%s: alloc tx-%d MSI %d (error: %d)\n",
3747 __func__, i, priv->tx_irq[i], ret);
3748 irq_err = REQ_IRQ_ERR_TX;
3749 irq_idx = i;
3750 goto irq_error;
3751 }
3752 cpumask_clear(&cpu_mask);
3753 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3754 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3755 }
3756
3757 return 0;
3758
3759 irq_error:
3760 stmmac_free_irq(dev, irq_err, irq_idx);
3761 return ret;
3762 }
3763
stmmac_request_irq_single(struct net_device * dev)3764 static int stmmac_request_irq_single(struct net_device *dev)
3765 {
3766 struct stmmac_priv *priv = netdev_priv(dev);
3767 enum request_irq_err irq_err;
3768 int ret;
3769
3770 ret = request_irq(dev->irq, stmmac_interrupt,
3771 IRQF_SHARED, dev->name, dev);
3772 if (unlikely(ret < 0)) {
3773 netdev_err(priv->dev,
3774 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3775 __func__, dev->irq, ret);
3776 irq_err = REQ_IRQ_ERR_MAC;
3777 goto irq_error;
3778 }
3779
3780 /* Request the Wake IRQ in case of another line
3781 * is used for WoL
3782 */
3783 priv->wol_irq_disabled = true;
3784 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3785 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3786 IRQF_SHARED, dev->name, dev);
3787 if (unlikely(ret < 0)) {
3788 netdev_err(priv->dev,
3789 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3790 __func__, priv->wol_irq, ret);
3791 irq_err = REQ_IRQ_ERR_WOL;
3792 goto irq_error;
3793 }
3794 }
3795
3796 /* Request the IRQ lines */
3797 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3798 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3799 IRQF_SHARED, dev->name, dev);
3800 if (unlikely(ret < 0)) {
3801 netdev_err(priv->dev,
3802 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3803 __func__, priv->lpi_irq, ret);
3804 irq_err = REQ_IRQ_ERR_LPI;
3805 goto irq_error;
3806 }
3807 }
3808
3809 /* Request the common Safety Feature Correctible/Uncorrectible
3810 * Error line in case of another line is used
3811 */
3812 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3813 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3814 IRQF_SHARED, dev->name, dev);
3815 if (unlikely(ret < 0)) {
3816 netdev_err(priv->dev,
3817 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3818 __func__, priv->sfty_irq, ret);
3819 irq_err = REQ_IRQ_ERR_SFTY;
3820 goto irq_error;
3821 }
3822 }
3823
3824 return 0;
3825
3826 irq_error:
3827 stmmac_free_irq(dev, irq_err, 0);
3828 return ret;
3829 }
3830
stmmac_request_irq(struct net_device * dev)3831 static int stmmac_request_irq(struct net_device *dev)
3832 {
3833 struct stmmac_priv *priv = netdev_priv(dev);
3834 int ret;
3835
3836 /* Request the IRQ lines */
3837 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3838 ret = stmmac_request_irq_multi_msi(dev);
3839 else
3840 ret = stmmac_request_irq_single(dev);
3841
3842 return ret;
3843 }
3844
3845 /**
3846 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3847 * @priv: driver private structure
3848 * @mtu: MTU to setup the dma queue and buf with
3849 * Description: Allocate and generate a dma_conf based on the provided MTU.
3850 * Allocate the Tx/Rx DMA queue and init them.
3851 * Return value:
3852 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3853 */
3854 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3855 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3856 {
3857 struct stmmac_dma_conf *dma_conf;
3858 int chan, bfsize, ret;
3859
3860 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3861 if (!dma_conf) {
3862 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3863 __func__);
3864 return ERR_PTR(-ENOMEM);
3865 }
3866
3867 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3868 if (bfsize < 0)
3869 bfsize = 0;
3870
3871 if (bfsize < BUF_SIZE_16KiB)
3872 bfsize = stmmac_set_bfsize(mtu, 0);
3873
3874 dma_conf->dma_buf_sz = bfsize;
3875 /* Chose the tx/rx size from the already defined one in the
3876 * priv struct. (if defined)
3877 */
3878 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3879 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3880
3881 if (!dma_conf->dma_tx_size)
3882 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3883 if (!dma_conf->dma_rx_size)
3884 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3885
3886 /* Earlier check for TBS */
3887 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3888 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3889 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3890
3891 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3892 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3893 }
3894
3895 ret = alloc_dma_desc_resources(priv, dma_conf);
3896 if (ret < 0) {
3897 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3898 __func__);
3899 goto alloc_error;
3900 }
3901
3902 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3903 if (ret < 0) {
3904 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3905 __func__);
3906 goto init_error;
3907 }
3908
3909 return dma_conf;
3910
3911 init_error:
3912 free_dma_desc_resources(priv, dma_conf);
3913 alloc_error:
3914 kfree(dma_conf);
3915 return ERR_PTR(ret);
3916 }
3917
3918 /**
3919 * __stmmac_open - open entry point of the driver
3920 * @dev : pointer to the device structure.
3921 * @dma_conf : structure to take the dma data
3922 * Description:
3923 * This function is the open entry point of the driver.
3924 * Return value:
3925 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3926 * file on failure.
3927 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3928 static int __stmmac_open(struct net_device *dev,
3929 struct stmmac_dma_conf *dma_conf)
3930 {
3931 struct stmmac_priv *priv = netdev_priv(dev);
3932 int mode = priv->plat->phy_interface;
3933 u32 chan;
3934 int ret;
3935
3936 ret = pm_runtime_resume_and_get(priv->device);
3937 if (ret < 0)
3938 return ret;
3939
3940 if ((!priv->hw->xpcs ||
3941 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3942 ret = stmmac_init_phy(dev);
3943 if (ret) {
3944 netdev_err(priv->dev,
3945 "%s: Cannot attach to PHY (error: %d)\n",
3946 __func__, ret);
3947 goto init_phy_error;
3948 }
3949 }
3950
3951 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3952
3953 buf_sz = dma_conf->dma_buf_sz;
3954 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3955 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3956 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3957 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3958
3959 stmmac_reset_queues_param(priv);
3960
3961 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3962 priv->plat->serdes_powerup) {
3963 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3964 if (ret < 0) {
3965 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3966 __func__);
3967 goto init_error;
3968 }
3969 }
3970
3971 ret = stmmac_hw_setup(dev, true);
3972 if (ret < 0) {
3973 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3974 goto init_error;
3975 }
3976
3977 stmmac_init_coalesce(priv);
3978
3979 phylink_start(priv->phylink);
3980 /* We may have called phylink_speed_down before */
3981 phylink_speed_up(priv->phylink);
3982
3983 ret = stmmac_request_irq(dev);
3984 if (ret)
3985 goto irq_error;
3986
3987 stmmac_enable_all_queues(priv);
3988 netif_tx_start_all_queues(priv->dev);
3989 stmmac_enable_all_dma_irq(priv);
3990
3991 return 0;
3992
3993 irq_error:
3994 phylink_stop(priv->phylink);
3995
3996 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3997 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3998
3999 stmmac_hw_teardown(dev);
4000 init_error:
4001 phylink_disconnect_phy(priv->phylink);
4002 init_phy_error:
4003 pm_runtime_put(priv->device);
4004 return ret;
4005 }
4006
stmmac_open(struct net_device * dev)4007 static int stmmac_open(struct net_device *dev)
4008 {
4009 struct stmmac_priv *priv = netdev_priv(dev);
4010 struct stmmac_dma_conf *dma_conf;
4011 int ret;
4012
4013 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4014 if (IS_ERR(dma_conf))
4015 return PTR_ERR(dma_conf);
4016
4017 ret = __stmmac_open(dev, dma_conf);
4018 if (ret)
4019 free_dma_desc_resources(priv, dma_conf);
4020
4021 kfree(dma_conf);
4022 return ret;
4023 }
4024
4025 /**
4026 * stmmac_release - close entry point of the driver
4027 * @dev : device pointer.
4028 * Description:
4029 * This is the stop entry point of the driver.
4030 */
stmmac_release(struct net_device * dev)4031 static int stmmac_release(struct net_device *dev)
4032 {
4033 struct stmmac_priv *priv = netdev_priv(dev);
4034 u32 chan;
4035
4036 if (device_may_wakeup(priv->device))
4037 phylink_speed_down(priv->phylink, false);
4038 /* Stop and disconnect the PHY */
4039 phylink_stop(priv->phylink);
4040 phylink_disconnect_phy(priv->phylink);
4041
4042 stmmac_disable_all_queues(priv);
4043
4044 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4045 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4046
4047 netif_tx_disable(dev);
4048
4049 /* Free the IRQ lines */
4050 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4051
4052 if (priv->eee_enabled) {
4053 priv->tx_path_in_lpi_mode = false;
4054 del_timer_sync(&priv->eee_ctrl_timer);
4055 }
4056
4057 /* Stop TX/RX DMA and clear the descriptors */
4058 stmmac_stop_all_dma(priv);
4059
4060 /* Release and free the Rx/Tx resources */
4061 free_dma_desc_resources(priv, &priv->dma_conf);
4062
4063 /* Disable the MAC Rx/Tx */
4064 stmmac_mac_set(priv, priv->ioaddr, false);
4065
4066 /* Powerdown Serdes if there is */
4067 if (priv->plat->serdes_powerdown)
4068 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4069
4070 stmmac_release_ptp(priv);
4071
4072 if (priv->dma_cap.fpesel)
4073 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4074
4075 pm_runtime_put(priv->device);
4076
4077 return 0;
4078 }
4079
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4080 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4081 struct stmmac_tx_queue *tx_q)
4082 {
4083 u16 tag = 0x0, inner_tag = 0x0;
4084 u32 inner_type = 0x0;
4085 struct dma_desc *p;
4086
4087 if (!priv->dma_cap.vlins)
4088 return false;
4089 if (!skb_vlan_tag_present(skb))
4090 return false;
4091 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4092 inner_tag = skb_vlan_tag_get(skb);
4093 inner_type = STMMAC_VLAN_INSERT;
4094 }
4095
4096 tag = skb_vlan_tag_get(skb);
4097
4098 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4099 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4100 else
4101 p = &tx_q->dma_tx[tx_q->cur_tx];
4102
4103 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4104 return false;
4105
4106 stmmac_set_tx_owner(priv, p);
4107 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4108 return true;
4109 }
4110
4111 /**
4112 * stmmac_tso_allocator - close entry point of the driver
4113 * @priv: driver private structure
4114 * @des: buffer start address
4115 * @total_len: total length to fill in descriptors
4116 * @last_segment: condition for the last descriptor
4117 * @queue: TX queue index
4118 * Description:
4119 * This function fills descriptor and request new descriptors according to
4120 * buffer length to fill
4121 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4122 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4123 int total_len, bool last_segment, u32 queue)
4124 {
4125 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4126 struct dma_desc *desc;
4127 u32 buff_size;
4128 int tmp_len;
4129
4130 tmp_len = total_len;
4131
4132 while (tmp_len > 0) {
4133 dma_addr_t curr_addr;
4134
4135 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4136 priv->dma_conf.dma_tx_size);
4137 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4138
4139 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4140 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4141 else
4142 desc = &tx_q->dma_tx[tx_q->cur_tx];
4143
4144 curr_addr = des + (total_len - tmp_len);
4145 if (priv->dma_cap.addr64 <= 32)
4146 desc->des0 = cpu_to_le32(curr_addr);
4147 else
4148 stmmac_set_desc_addr(priv, desc, curr_addr);
4149
4150 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4151 TSO_MAX_BUFF_SIZE : tmp_len;
4152
4153 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4154 0, 1,
4155 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4156 0, 0);
4157
4158 tmp_len -= TSO_MAX_BUFF_SIZE;
4159 }
4160 }
4161
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4162 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4163 {
4164 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4165 int desc_size;
4166
4167 if (likely(priv->extend_desc))
4168 desc_size = sizeof(struct dma_extended_desc);
4169 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4170 desc_size = sizeof(struct dma_edesc);
4171 else
4172 desc_size = sizeof(struct dma_desc);
4173
4174 /* The own bit must be the latest setting done when prepare the
4175 * descriptor and then barrier is needed to make sure that
4176 * all is coherent before granting the DMA engine.
4177 */
4178 wmb();
4179
4180 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4181 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4182 }
4183
4184 /**
4185 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4186 * @skb : the socket buffer
4187 * @dev : device pointer
4188 * Description: this is the transmit function that is called on TSO frames
4189 * (support available on GMAC4 and newer chips).
4190 * Diagram below show the ring programming in case of TSO frames:
4191 *
4192 * First Descriptor
4193 * --------
4194 * | DES0 |---> buffer1 = L2/L3/L4 header
4195 * | DES1 |---> TCP Payload (can continue on next descr...)
4196 * | DES2 |---> buffer 1 and 2 len
4197 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4198 * --------
4199 * |
4200 * ...
4201 * |
4202 * --------
4203 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4204 * | DES1 | --|
4205 * | DES2 | --> buffer 1 and 2 len
4206 * | DES3 |
4207 * --------
4208 *
4209 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4210 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4211 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4212 {
4213 struct dma_desc *desc, *first, *mss_desc = NULL;
4214 struct stmmac_priv *priv = netdev_priv(dev);
4215 int tmp_pay_len = 0, first_tx, nfrags;
4216 unsigned int first_entry, tx_packets;
4217 struct stmmac_txq_stats *txq_stats;
4218 struct stmmac_tx_queue *tx_q;
4219 u32 pay_len, mss, queue;
4220 u8 proto_hdr_len, hdr;
4221 dma_addr_t des;
4222 bool set_ic;
4223 int i;
4224
4225 /* Always insert VLAN tag to SKB payload for TSO frames.
4226 *
4227 * Never insert VLAN tag by HW, since segments splited by
4228 * TSO engine will be un-tagged by mistake.
4229 */
4230 if (skb_vlan_tag_present(skb)) {
4231 skb = __vlan_hwaccel_push_inside(skb);
4232 if (unlikely(!skb)) {
4233 priv->xstats.tx_dropped++;
4234 return NETDEV_TX_OK;
4235 }
4236 }
4237
4238 nfrags = skb_shinfo(skb)->nr_frags;
4239 queue = skb_get_queue_mapping(skb);
4240
4241 tx_q = &priv->dma_conf.tx_queue[queue];
4242 txq_stats = &priv->xstats.txq_stats[queue];
4243 first_tx = tx_q->cur_tx;
4244
4245 /* Compute header lengths */
4246 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4247 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4248 hdr = sizeof(struct udphdr);
4249 } else {
4250 proto_hdr_len = skb_tcp_all_headers(skb);
4251 hdr = tcp_hdrlen(skb);
4252 }
4253
4254 /* Desc availability based on threshold should be enough safe */
4255 if (unlikely(stmmac_tx_avail(priv, queue) <
4256 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4257 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4258 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4259 queue));
4260 /* This is a hard error, log it. */
4261 netdev_err(priv->dev,
4262 "%s: Tx Ring full when queue awake\n",
4263 __func__);
4264 }
4265 return NETDEV_TX_BUSY;
4266 }
4267
4268 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4269
4270 mss = skb_shinfo(skb)->gso_size;
4271
4272 /* set new MSS value if needed */
4273 if (mss != tx_q->mss) {
4274 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4275 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4276 else
4277 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4278
4279 stmmac_set_mss(priv, mss_desc, mss);
4280 tx_q->mss = mss;
4281 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4282 priv->dma_conf.dma_tx_size);
4283 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4284 }
4285
4286 if (netif_msg_tx_queued(priv)) {
4287 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4288 __func__, hdr, proto_hdr_len, pay_len, mss);
4289 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4290 skb->data_len);
4291 }
4292
4293 first_entry = tx_q->cur_tx;
4294 WARN_ON(tx_q->tx_skbuff[first_entry]);
4295
4296 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4297 desc = &tx_q->dma_entx[first_entry].basic;
4298 else
4299 desc = &tx_q->dma_tx[first_entry];
4300 first = desc;
4301
4302 /* first descriptor: fill Headers on Buf1 */
4303 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4304 DMA_TO_DEVICE);
4305 if (dma_mapping_error(priv->device, des))
4306 goto dma_map_err;
4307
4308 if (priv->dma_cap.addr64 <= 32) {
4309 first->des0 = cpu_to_le32(des);
4310
4311 /* Fill start of payload in buff2 of first descriptor */
4312 if (pay_len)
4313 first->des1 = cpu_to_le32(des + proto_hdr_len);
4314
4315 /* If needed take extra descriptors to fill the remaining payload */
4316 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4317 } else {
4318 stmmac_set_desc_addr(priv, first, des);
4319 tmp_pay_len = pay_len;
4320 des += proto_hdr_len;
4321 pay_len = 0;
4322 }
4323
4324 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4325
4326 /* In case two or more DMA transmit descriptors are allocated for this
4327 * non-paged SKB data, the DMA buffer address should be saved to
4328 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4329 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4330 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4331 * since the tail areas of the DMA buffer can be accessed by DMA engine
4332 * sooner or later.
4333 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4334 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4335 * this DMA buffer right after the DMA engine completely finishes the
4336 * full buffer transmission.
4337 */
4338 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4339 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4340 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4341 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4342
4343 /* Prepare fragments */
4344 for (i = 0; i < nfrags; i++) {
4345 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4346
4347 des = skb_frag_dma_map(priv->device, frag, 0,
4348 skb_frag_size(frag),
4349 DMA_TO_DEVICE);
4350 if (dma_mapping_error(priv->device, des))
4351 goto dma_map_err;
4352
4353 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4354 (i == nfrags - 1), queue);
4355
4356 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4357 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4358 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4359 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4360 }
4361
4362 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4363
4364 /* Only the last descriptor gets to point to the skb. */
4365 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4366 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4367
4368 /* Manage tx mitigation */
4369 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4370 tx_q->tx_count_frames += tx_packets;
4371
4372 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4373 set_ic = true;
4374 else if (!priv->tx_coal_frames[queue])
4375 set_ic = false;
4376 else if (tx_packets > priv->tx_coal_frames[queue])
4377 set_ic = true;
4378 else if ((tx_q->tx_count_frames %
4379 priv->tx_coal_frames[queue]) < tx_packets)
4380 set_ic = true;
4381 else
4382 set_ic = false;
4383
4384 if (set_ic) {
4385 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4386 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4387 else
4388 desc = &tx_q->dma_tx[tx_q->cur_tx];
4389
4390 tx_q->tx_count_frames = 0;
4391 stmmac_set_tx_ic(priv, desc);
4392 }
4393
4394 /* We've used all descriptors we need for this skb, however,
4395 * advance cur_tx so that it references a fresh descriptor.
4396 * ndo_start_xmit will fill this descriptor the next time it's
4397 * called and stmmac_tx_clean may clean up to this descriptor.
4398 */
4399 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4400
4401 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4402 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4403 __func__);
4404 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4405 }
4406
4407 u64_stats_update_begin(&txq_stats->q_syncp);
4408 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4409 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4410 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4411 if (set_ic)
4412 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4413 u64_stats_update_end(&txq_stats->q_syncp);
4414
4415 if (priv->sarc_type)
4416 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4417
4418 skb_tx_timestamp(skb);
4419
4420 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4421 priv->hwts_tx_en)) {
4422 /* declare that device is doing timestamping */
4423 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4424 stmmac_enable_tx_timestamp(priv, first);
4425 }
4426
4427 /* Complete the first descriptor before granting the DMA */
4428 stmmac_prepare_tso_tx_desc(priv, first, 1,
4429 proto_hdr_len,
4430 pay_len,
4431 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4432 hdr / 4, (skb->len - proto_hdr_len));
4433
4434 /* If context desc is used to change MSS */
4435 if (mss_desc) {
4436 /* Make sure that first descriptor has been completely
4437 * written, including its own bit. This is because MSS is
4438 * actually before first descriptor, so we need to make
4439 * sure that MSS's own bit is the last thing written.
4440 */
4441 dma_wmb();
4442 stmmac_set_tx_owner(priv, mss_desc);
4443 }
4444
4445 if (netif_msg_pktdata(priv)) {
4446 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4447 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4448 tx_q->cur_tx, first, nfrags);
4449 pr_info(">>> frame to be transmitted: ");
4450 print_pkt(skb->data, skb_headlen(skb));
4451 }
4452
4453 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4454
4455 stmmac_flush_tx_descriptors(priv, queue);
4456 stmmac_tx_timer_arm(priv, queue);
4457
4458 return NETDEV_TX_OK;
4459
4460 dma_map_err:
4461 dev_err(priv->device, "Tx dma map failed\n");
4462 dev_kfree_skb(skb);
4463 priv->xstats.tx_dropped++;
4464 return NETDEV_TX_OK;
4465 }
4466
4467 /**
4468 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4469 * @skb: socket buffer to check
4470 *
4471 * Check if a packet has an ethertype that will trigger the IP header checks
4472 * and IP/TCP checksum engine of the stmmac core.
4473 *
4474 * Return: true if the ethertype can trigger the checksum engine, false
4475 * otherwise
4476 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4477 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4478 {
4479 int depth = 0;
4480 __be16 proto;
4481
4482 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4483 &depth);
4484
4485 return (depth <= ETH_HLEN) &&
4486 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4487 }
4488
4489 /**
4490 * stmmac_xmit - Tx entry point of the driver
4491 * @skb : the socket buffer
4492 * @dev : device pointer
4493 * Description : this is the tx entry point of the driver.
4494 * It programs the chain or the ring and supports oversized frames
4495 * and SG feature.
4496 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4497 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4498 {
4499 unsigned int first_entry, tx_packets, enh_desc;
4500 struct stmmac_priv *priv = netdev_priv(dev);
4501 unsigned int nopaged_len = skb_headlen(skb);
4502 int i, csum_insertion = 0, is_jumbo = 0;
4503 u32 queue = skb_get_queue_mapping(skb);
4504 int nfrags = skb_shinfo(skb)->nr_frags;
4505 int gso = skb_shinfo(skb)->gso_type;
4506 struct stmmac_txq_stats *txq_stats;
4507 struct dma_edesc *tbs_desc = NULL;
4508 struct dma_desc *desc, *first;
4509 struct stmmac_tx_queue *tx_q;
4510 bool has_vlan, set_ic;
4511 int entry, first_tx;
4512 dma_addr_t des;
4513
4514 tx_q = &priv->dma_conf.tx_queue[queue];
4515 txq_stats = &priv->xstats.txq_stats[queue];
4516 first_tx = tx_q->cur_tx;
4517
4518 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4519 stmmac_disable_eee_mode(priv);
4520
4521 /* Manage oversized TCP frames for GMAC4 device */
4522 if (skb_is_gso(skb) && priv->tso) {
4523 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4524 return stmmac_tso_xmit(skb, dev);
4525 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4526 return stmmac_tso_xmit(skb, dev);
4527 }
4528
4529 if (priv->est && priv->est->enable &&
4530 priv->est->max_sdu[queue] &&
4531 skb->len > priv->est->max_sdu[queue]){
4532 priv->xstats.max_sdu_txq_drop[queue]++;
4533 goto max_sdu_err;
4534 }
4535
4536 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4537 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4538 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4539 queue));
4540 /* This is a hard error, log it. */
4541 netdev_err(priv->dev,
4542 "%s: Tx Ring full when queue awake\n",
4543 __func__);
4544 }
4545 return NETDEV_TX_BUSY;
4546 }
4547
4548 /* Check if VLAN can be inserted by HW */
4549 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4550
4551 entry = tx_q->cur_tx;
4552 first_entry = entry;
4553 WARN_ON(tx_q->tx_skbuff[first_entry]);
4554
4555 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4556 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4557 * queues. In that case, checksum offloading for those queues that don't
4558 * support tx coe needs to fallback to software checksum calculation.
4559 *
4560 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4561 * also have to be checksummed in software.
4562 */
4563 if (csum_insertion &&
4564 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4565 !stmmac_has_ip_ethertype(skb))) {
4566 if (unlikely(skb_checksum_help(skb)))
4567 goto dma_map_err;
4568 csum_insertion = !csum_insertion;
4569 }
4570
4571 if (likely(priv->extend_desc))
4572 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4573 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4574 desc = &tx_q->dma_entx[entry].basic;
4575 else
4576 desc = tx_q->dma_tx + entry;
4577
4578 first = desc;
4579
4580 if (has_vlan)
4581 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4582
4583 enh_desc = priv->plat->enh_desc;
4584 /* To program the descriptors according to the size of the frame */
4585 if (enh_desc)
4586 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4587
4588 if (unlikely(is_jumbo)) {
4589 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4590 if (unlikely(entry < 0) && (entry != -EINVAL))
4591 goto dma_map_err;
4592 }
4593
4594 for (i = 0; i < nfrags; i++) {
4595 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4596 int len = skb_frag_size(frag);
4597 bool last_segment = (i == (nfrags - 1));
4598
4599 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4600 WARN_ON(tx_q->tx_skbuff[entry]);
4601
4602 if (likely(priv->extend_desc))
4603 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4604 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4605 desc = &tx_q->dma_entx[entry].basic;
4606 else
4607 desc = tx_q->dma_tx + entry;
4608
4609 des = skb_frag_dma_map(priv->device, frag, 0, len,
4610 DMA_TO_DEVICE);
4611 if (dma_mapping_error(priv->device, des))
4612 goto dma_map_err; /* should reuse desc w/o issues */
4613
4614 tx_q->tx_skbuff_dma[entry].buf = des;
4615
4616 stmmac_set_desc_addr(priv, desc, des);
4617
4618 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4619 tx_q->tx_skbuff_dma[entry].len = len;
4620 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4621 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4622
4623 /* Prepare the descriptor and set the own bit too */
4624 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4625 priv->mode, 1, last_segment, skb->len);
4626 }
4627
4628 /* Only the last descriptor gets to point to the skb. */
4629 tx_q->tx_skbuff[entry] = skb;
4630 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4631
4632 /* According to the coalesce parameter the IC bit for the latest
4633 * segment is reset and the timer re-started to clean the tx status.
4634 * This approach takes care about the fragments: desc is the first
4635 * element in case of no SG.
4636 */
4637 tx_packets = (entry + 1) - first_tx;
4638 tx_q->tx_count_frames += tx_packets;
4639
4640 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4641 set_ic = true;
4642 else if (!priv->tx_coal_frames[queue])
4643 set_ic = false;
4644 else if (tx_packets > priv->tx_coal_frames[queue])
4645 set_ic = true;
4646 else if ((tx_q->tx_count_frames %
4647 priv->tx_coal_frames[queue]) < tx_packets)
4648 set_ic = true;
4649 else
4650 set_ic = false;
4651
4652 if (set_ic) {
4653 if (likely(priv->extend_desc))
4654 desc = &tx_q->dma_etx[entry].basic;
4655 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4656 desc = &tx_q->dma_entx[entry].basic;
4657 else
4658 desc = &tx_q->dma_tx[entry];
4659
4660 tx_q->tx_count_frames = 0;
4661 stmmac_set_tx_ic(priv, desc);
4662 }
4663
4664 /* We've used all descriptors we need for this skb, however,
4665 * advance cur_tx so that it references a fresh descriptor.
4666 * ndo_start_xmit will fill this descriptor the next time it's
4667 * called and stmmac_tx_clean may clean up to this descriptor.
4668 */
4669 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4670 tx_q->cur_tx = entry;
4671
4672 if (netif_msg_pktdata(priv)) {
4673 netdev_dbg(priv->dev,
4674 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4675 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4676 entry, first, nfrags);
4677
4678 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4679 print_pkt(skb->data, skb->len);
4680 }
4681
4682 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4683 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4684 __func__);
4685 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4686 }
4687
4688 u64_stats_update_begin(&txq_stats->q_syncp);
4689 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4690 if (set_ic)
4691 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4692 u64_stats_update_end(&txq_stats->q_syncp);
4693
4694 if (priv->sarc_type)
4695 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4696
4697 skb_tx_timestamp(skb);
4698
4699 /* Ready to fill the first descriptor and set the OWN bit w/o any
4700 * problems because all the descriptors are actually ready to be
4701 * passed to the DMA engine.
4702 */
4703 if (likely(!is_jumbo)) {
4704 bool last_segment = (nfrags == 0);
4705
4706 des = dma_map_single(priv->device, skb->data,
4707 nopaged_len, DMA_TO_DEVICE);
4708 if (dma_mapping_error(priv->device, des))
4709 goto dma_map_err;
4710
4711 tx_q->tx_skbuff_dma[first_entry].buf = des;
4712 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4713 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4714
4715 stmmac_set_desc_addr(priv, first, des);
4716
4717 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4718 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4719
4720 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4721 priv->hwts_tx_en)) {
4722 /* declare that device is doing timestamping */
4723 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4724 stmmac_enable_tx_timestamp(priv, first);
4725 }
4726
4727 /* Prepare the first descriptor setting the OWN bit too */
4728 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4729 csum_insertion, priv->mode, 0, last_segment,
4730 skb->len);
4731 }
4732
4733 if (tx_q->tbs & STMMAC_TBS_EN) {
4734 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4735
4736 tbs_desc = &tx_q->dma_entx[first_entry];
4737 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4738 }
4739
4740 stmmac_set_tx_owner(priv, first);
4741
4742 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4743
4744 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4745
4746 stmmac_flush_tx_descriptors(priv, queue);
4747 stmmac_tx_timer_arm(priv, queue);
4748
4749 return NETDEV_TX_OK;
4750
4751 dma_map_err:
4752 netdev_err(priv->dev, "Tx DMA map failed\n");
4753 max_sdu_err:
4754 dev_kfree_skb(skb);
4755 priv->xstats.tx_dropped++;
4756 return NETDEV_TX_OK;
4757 }
4758
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4759 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4760 {
4761 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4762 __be16 vlan_proto = veth->h_vlan_proto;
4763 u16 vlanid;
4764
4765 if ((vlan_proto == htons(ETH_P_8021Q) &&
4766 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4767 (vlan_proto == htons(ETH_P_8021AD) &&
4768 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4769 /* pop the vlan tag */
4770 vlanid = ntohs(veth->h_vlan_TCI);
4771 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4772 skb_pull(skb, VLAN_HLEN);
4773 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4774 }
4775 }
4776
4777 /**
4778 * stmmac_rx_refill - refill used skb preallocated buffers
4779 * @priv: driver private structure
4780 * @queue: RX queue index
4781 * Description : this is to reallocate the skb for the reception process
4782 * that is based on zero-copy.
4783 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4784 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4785 {
4786 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4787 int dirty = stmmac_rx_dirty(priv, queue);
4788 unsigned int entry = rx_q->dirty_rx;
4789 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4790
4791 if (priv->dma_cap.host_dma_width <= 32)
4792 gfp |= GFP_DMA32;
4793
4794 while (dirty-- > 0) {
4795 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4796 struct dma_desc *p;
4797 bool use_rx_wd;
4798
4799 if (priv->extend_desc)
4800 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4801 else
4802 p = rx_q->dma_rx + entry;
4803
4804 if (!buf->page) {
4805 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4806 if (!buf->page)
4807 break;
4808 }
4809
4810 if (priv->sph && !buf->sec_page) {
4811 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4812 if (!buf->sec_page)
4813 break;
4814
4815 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4816 }
4817
4818 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4819
4820 stmmac_set_desc_addr(priv, p, buf->addr);
4821 if (priv->sph)
4822 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4823 else
4824 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4825 stmmac_refill_desc3(priv, rx_q, p);
4826
4827 rx_q->rx_count_frames++;
4828 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4829 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4830 rx_q->rx_count_frames = 0;
4831
4832 use_rx_wd = !priv->rx_coal_frames[queue];
4833 use_rx_wd |= rx_q->rx_count_frames > 0;
4834 if (!priv->use_riwt)
4835 use_rx_wd = false;
4836
4837 dma_wmb();
4838 stmmac_set_rx_owner(priv, p, use_rx_wd);
4839
4840 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4841 }
4842 rx_q->dirty_rx = entry;
4843 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4844 (rx_q->dirty_rx * sizeof(struct dma_desc));
4845 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4846 }
4847
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4848 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4849 struct dma_desc *p,
4850 int status, unsigned int len)
4851 {
4852 unsigned int plen = 0, hlen = 0;
4853 int coe = priv->hw->rx_csum;
4854
4855 /* Not first descriptor, buffer is always zero */
4856 if (priv->sph && len)
4857 return 0;
4858
4859 /* First descriptor, get split header length */
4860 stmmac_get_rx_header_len(priv, p, &hlen);
4861 if (priv->sph && hlen) {
4862 priv->xstats.rx_split_hdr_pkt_n++;
4863 return hlen;
4864 }
4865
4866 /* First descriptor, not last descriptor and not split header */
4867 if (status & rx_not_ls)
4868 return priv->dma_conf.dma_buf_sz;
4869
4870 plen = stmmac_get_rx_frame_len(priv, p, coe);
4871
4872 /* First descriptor and last descriptor and not split header */
4873 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4874 }
4875
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4876 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4877 struct dma_desc *p,
4878 int status, unsigned int len)
4879 {
4880 int coe = priv->hw->rx_csum;
4881 unsigned int plen = 0;
4882
4883 /* Not split header, buffer is not available */
4884 if (!priv->sph)
4885 return 0;
4886
4887 /* Not last descriptor */
4888 if (status & rx_not_ls)
4889 return priv->dma_conf.dma_buf_sz;
4890
4891 plen = stmmac_get_rx_frame_len(priv, p, coe);
4892
4893 /* Last descriptor */
4894 return plen - len;
4895 }
4896
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4897 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4898 struct xdp_frame *xdpf, bool dma_map)
4899 {
4900 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4901 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4902 unsigned int entry = tx_q->cur_tx;
4903 struct dma_desc *tx_desc;
4904 dma_addr_t dma_addr;
4905 bool set_ic;
4906
4907 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4908 return STMMAC_XDP_CONSUMED;
4909
4910 if (priv->est && priv->est->enable &&
4911 priv->est->max_sdu[queue] &&
4912 xdpf->len > priv->est->max_sdu[queue]) {
4913 priv->xstats.max_sdu_txq_drop[queue]++;
4914 return STMMAC_XDP_CONSUMED;
4915 }
4916
4917 if (likely(priv->extend_desc))
4918 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4919 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4920 tx_desc = &tx_q->dma_entx[entry].basic;
4921 else
4922 tx_desc = tx_q->dma_tx + entry;
4923
4924 if (dma_map) {
4925 dma_addr = dma_map_single(priv->device, xdpf->data,
4926 xdpf->len, DMA_TO_DEVICE);
4927 if (dma_mapping_error(priv->device, dma_addr))
4928 return STMMAC_XDP_CONSUMED;
4929
4930 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4931 } else {
4932 struct page *page = virt_to_page(xdpf->data);
4933
4934 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4935 xdpf->headroom;
4936 dma_sync_single_for_device(priv->device, dma_addr,
4937 xdpf->len, DMA_BIDIRECTIONAL);
4938
4939 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4940 }
4941
4942 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4943 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4944 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4945 tx_q->tx_skbuff_dma[entry].last_segment = true;
4946 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4947
4948 tx_q->xdpf[entry] = xdpf;
4949
4950 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4951
4952 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4953 true, priv->mode, true, true,
4954 xdpf->len);
4955
4956 tx_q->tx_count_frames++;
4957
4958 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4959 set_ic = true;
4960 else
4961 set_ic = false;
4962
4963 if (set_ic) {
4964 tx_q->tx_count_frames = 0;
4965 stmmac_set_tx_ic(priv, tx_desc);
4966 u64_stats_update_begin(&txq_stats->q_syncp);
4967 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4968 u64_stats_update_end(&txq_stats->q_syncp);
4969 }
4970
4971 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4972
4973 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4974 tx_q->cur_tx = entry;
4975
4976 return STMMAC_XDP_TX;
4977 }
4978
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4979 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4980 int cpu)
4981 {
4982 int index = cpu;
4983
4984 if (unlikely(index < 0))
4985 index = 0;
4986
4987 while (index >= priv->plat->tx_queues_to_use)
4988 index -= priv->plat->tx_queues_to_use;
4989
4990 return index;
4991 }
4992
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4993 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4994 struct xdp_buff *xdp)
4995 {
4996 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4997 int cpu = smp_processor_id();
4998 struct netdev_queue *nq;
4999 int queue;
5000 int res;
5001
5002 if (unlikely(!xdpf))
5003 return STMMAC_XDP_CONSUMED;
5004
5005 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5006 nq = netdev_get_tx_queue(priv->dev, queue);
5007
5008 __netif_tx_lock(nq, cpu);
5009 /* Avoids TX time-out as we are sharing with slow path */
5010 txq_trans_cond_update(nq);
5011
5012 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5013 if (res == STMMAC_XDP_TX)
5014 stmmac_flush_tx_descriptors(priv, queue);
5015
5016 __netif_tx_unlock(nq);
5017
5018 return res;
5019 }
5020
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5021 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5022 struct bpf_prog *prog,
5023 struct xdp_buff *xdp)
5024 {
5025 u32 act;
5026 int res;
5027
5028 act = bpf_prog_run_xdp(prog, xdp);
5029 switch (act) {
5030 case XDP_PASS:
5031 res = STMMAC_XDP_PASS;
5032 break;
5033 case XDP_TX:
5034 res = stmmac_xdp_xmit_back(priv, xdp);
5035 break;
5036 case XDP_REDIRECT:
5037 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5038 res = STMMAC_XDP_CONSUMED;
5039 else
5040 res = STMMAC_XDP_REDIRECT;
5041 break;
5042 default:
5043 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5044 fallthrough;
5045 case XDP_ABORTED:
5046 trace_xdp_exception(priv->dev, prog, act);
5047 fallthrough;
5048 case XDP_DROP:
5049 res = STMMAC_XDP_CONSUMED;
5050 break;
5051 }
5052
5053 return res;
5054 }
5055
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5056 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5057 struct xdp_buff *xdp)
5058 {
5059 struct bpf_prog *prog;
5060 int res;
5061
5062 prog = READ_ONCE(priv->xdp_prog);
5063 if (!prog) {
5064 res = STMMAC_XDP_PASS;
5065 goto out;
5066 }
5067
5068 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5069 out:
5070 return ERR_PTR(-res);
5071 }
5072
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5073 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5074 int xdp_status)
5075 {
5076 int cpu = smp_processor_id();
5077 int queue;
5078
5079 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5080
5081 if (xdp_status & STMMAC_XDP_TX)
5082 stmmac_tx_timer_arm(priv, queue);
5083
5084 if (xdp_status & STMMAC_XDP_REDIRECT)
5085 xdp_do_flush();
5086 }
5087
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5088 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5089 struct xdp_buff *xdp)
5090 {
5091 unsigned int metasize = xdp->data - xdp->data_meta;
5092 unsigned int datasize = xdp->data_end - xdp->data;
5093 struct sk_buff *skb;
5094
5095 skb = napi_alloc_skb(&ch->rxtx_napi,
5096 xdp->data_end - xdp->data_hard_start);
5097 if (unlikely(!skb))
5098 return NULL;
5099
5100 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5101 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5102 if (metasize)
5103 skb_metadata_set(skb, metasize);
5104
5105 return skb;
5106 }
5107
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5108 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5109 struct dma_desc *p, struct dma_desc *np,
5110 struct xdp_buff *xdp)
5111 {
5112 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5113 struct stmmac_channel *ch = &priv->channel[queue];
5114 unsigned int len = xdp->data_end - xdp->data;
5115 enum pkt_hash_types hash_type;
5116 int coe = priv->hw->rx_csum;
5117 struct sk_buff *skb;
5118 u32 hash;
5119
5120 skb = stmmac_construct_skb_zc(ch, xdp);
5121 if (!skb) {
5122 priv->xstats.rx_dropped++;
5123 return;
5124 }
5125
5126 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5127 if (priv->hw->hw_vlan_en)
5128 /* MAC level stripping. */
5129 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5130 else
5131 /* Driver level stripping. */
5132 stmmac_rx_vlan(priv->dev, skb);
5133 skb->protocol = eth_type_trans(skb, priv->dev);
5134
5135 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5136 skb_checksum_none_assert(skb);
5137 else
5138 skb->ip_summed = CHECKSUM_UNNECESSARY;
5139
5140 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5141 skb_set_hash(skb, hash, hash_type);
5142
5143 skb_record_rx_queue(skb, queue);
5144 napi_gro_receive(&ch->rxtx_napi, skb);
5145
5146 u64_stats_update_begin(&rxq_stats->napi_syncp);
5147 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5148 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5149 u64_stats_update_end(&rxq_stats->napi_syncp);
5150 }
5151
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5152 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5153 {
5154 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5155 unsigned int entry = rx_q->dirty_rx;
5156 struct dma_desc *rx_desc = NULL;
5157 bool ret = true;
5158
5159 budget = min(budget, stmmac_rx_dirty(priv, queue));
5160
5161 while (budget-- > 0 && entry != rx_q->cur_rx) {
5162 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5163 dma_addr_t dma_addr;
5164 bool use_rx_wd;
5165
5166 if (!buf->xdp) {
5167 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5168 if (!buf->xdp) {
5169 ret = false;
5170 break;
5171 }
5172 }
5173
5174 if (priv->extend_desc)
5175 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5176 else
5177 rx_desc = rx_q->dma_rx + entry;
5178
5179 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5180 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5181 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5182 stmmac_refill_desc3(priv, rx_q, rx_desc);
5183
5184 rx_q->rx_count_frames++;
5185 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5186 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5187 rx_q->rx_count_frames = 0;
5188
5189 use_rx_wd = !priv->rx_coal_frames[queue];
5190 use_rx_wd |= rx_q->rx_count_frames > 0;
5191 if (!priv->use_riwt)
5192 use_rx_wd = false;
5193
5194 dma_wmb();
5195 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5196
5197 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5198 }
5199
5200 if (rx_desc) {
5201 rx_q->dirty_rx = entry;
5202 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5203 (rx_q->dirty_rx * sizeof(struct dma_desc));
5204 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5205 }
5206
5207 return ret;
5208 }
5209
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5210 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5211 {
5212 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5213 * to represent incoming packet, whereas cb field in the same structure
5214 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5215 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5216 */
5217 return (struct stmmac_xdp_buff *)xdp;
5218 }
5219
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5220 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5221 {
5222 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5223 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5224 unsigned int count = 0, error = 0, len = 0;
5225 int dirty = stmmac_rx_dirty(priv, queue);
5226 unsigned int next_entry = rx_q->cur_rx;
5227 u32 rx_errors = 0, rx_dropped = 0;
5228 unsigned int desc_size;
5229 struct bpf_prog *prog;
5230 bool failure = false;
5231 int xdp_status = 0;
5232 int status = 0;
5233
5234 if (netif_msg_rx_status(priv)) {
5235 void *rx_head;
5236
5237 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5238 if (priv->extend_desc) {
5239 rx_head = (void *)rx_q->dma_erx;
5240 desc_size = sizeof(struct dma_extended_desc);
5241 } else {
5242 rx_head = (void *)rx_q->dma_rx;
5243 desc_size = sizeof(struct dma_desc);
5244 }
5245
5246 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5247 rx_q->dma_rx_phy, desc_size);
5248 }
5249 while (count < limit) {
5250 struct stmmac_rx_buffer *buf;
5251 struct stmmac_xdp_buff *ctx;
5252 unsigned int buf1_len = 0;
5253 struct dma_desc *np, *p;
5254 int entry;
5255 int res;
5256
5257 if (!count && rx_q->state_saved) {
5258 error = rx_q->state.error;
5259 len = rx_q->state.len;
5260 } else {
5261 rx_q->state_saved = false;
5262 error = 0;
5263 len = 0;
5264 }
5265
5266 if (count >= limit)
5267 break;
5268
5269 read_again:
5270 buf1_len = 0;
5271 entry = next_entry;
5272 buf = &rx_q->buf_pool[entry];
5273
5274 if (dirty >= STMMAC_RX_FILL_BATCH) {
5275 failure = failure ||
5276 !stmmac_rx_refill_zc(priv, queue, dirty);
5277 dirty = 0;
5278 }
5279
5280 if (priv->extend_desc)
5281 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5282 else
5283 p = rx_q->dma_rx + entry;
5284
5285 /* read the status of the incoming frame */
5286 status = stmmac_rx_status(priv, &priv->xstats, p);
5287 /* check if managed by the DMA otherwise go ahead */
5288 if (unlikely(status & dma_own))
5289 break;
5290
5291 /* Prefetch the next RX descriptor */
5292 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5293 priv->dma_conf.dma_rx_size);
5294 next_entry = rx_q->cur_rx;
5295
5296 if (priv->extend_desc)
5297 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5298 else
5299 np = rx_q->dma_rx + next_entry;
5300
5301 prefetch(np);
5302
5303 /* Ensure a valid XSK buffer before proceed */
5304 if (!buf->xdp)
5305 break;
5306
5307 if (priv->extend_desc)
5308 stmmac_rx_extended_status(priv, &priv->xstats,
5309 rx_q->dma_erx + entry);
5310 if (unlikely(status == discard_frame)) {
5311 xsk_buff_free(buf->xdp);
5312 buf->xdp = NULL;
5313 dirty++;
5314 error = 1;
5315 if (!priv->hwts_rx_en)
5316 rx_errors++;
5317 }
5318
5319 if (unlikely(error && (status & rx_not_ls)))
5320 goto read_again;
5321 if (unlikely(error)) {
5322 count++;
5323 continue;
5324 }
5325
5326 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5327 if (likely(status & rx_not_ls)) {
5328 xsk_buff_free(buf->xdp);
5329 buf->xdp = NULL;
5330 dirty++;
5331 count++;
5332 goto read_again;
5333 }
5334
5335 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5336 ctx->priv = priv;
5337 ctx->desc = p;
5338 ctx->ndesc = np;
5339
5340 /* XDP ZC Frame only support primary buffers for now */
5341 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5342 len += buf1_len;
5343
5344 /* ACS is disabled; strip manually. */
5345 if (likely(!(status & rx_not_ls))) {
5346 buf1_len -= ETH_FCS_LEN;
5347 len -= ETH_FCS_LEN;
5348 }
5349
5350 /* RX buffer is good and fit into a XSK pool buffer */
5351 buf->xdp->data_end = buf->xdp->data + buf1_len;
5352 xsk_buff_dma_sync_for_cpu(buf->xdp);
5353
5354 prog = READ_ONCE(priv->xdp_prog);
5355 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5356
5357 switch (res) {
5358 case STMMAC_XDP_PASS:
5359 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5360 xsk_buff_free(buf->xdp);
5361 break;
5362 case STMMAC_XDP_CONSUMED:
5363 xsk_buff_free(buf->xdp);
5364 rx_dropped++;
5365 break;
5366 case STMMAC_XDP_TX:
5367 case STMMAC_XDP_REDIRECT:
5368 xdp_status |= res;
5369 break;
5370 }
5371
5372 buf->xdp = NULL;
5373 dirty++;
5374 count++;
5375 }
5376
5377 if (status & rx_not_ls) {
5378 rx_q->state_saved = true;
5379 rx_q->state.error = error;
5380 rx_q->state.len = len;
5381 }
5382
5383 stmmac_finalize_xdp_rx(priv, xdp_status);
5384
5385 u64_stats_update_begin(&rxq_stats->napi_syncp);
5386 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5387 u64_stats_update_end(&rxq_stats->napi_syncp);
5388
5389 priv->xstats.rx_dropped += rx_dropped;
5390 priv->xstats.rx_errors += rx_errors;
5391
5392 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5393 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5394 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5395 else
5396 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5397
5398 return (int)count;
5399 }
5400
5401 return failure ? limit : (int)count;
5402 }
5403
5404 /**
5405 * stmmac_rx - manage the receive process
5406 * @priv: driver private structure
5407 * @limit: napi bugget
5408 * @queue: RX queue index.
5409 * Description : this the function called by the napi poll method.
5410 * It gets all the frames inside the ring.
5411 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5412 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5413 {
5414 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5415 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5416 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5417 struct stmmac_channel *ch = &priv->channel[queue];
5418 unsigned int count = 0, error = 0, len = 0;
5419 int status = 0, coe = priv->hw->rx_csum;
5420 unsigned int next_entry = rx_q->cur_rx;
5421 enum dma_data_direction dma_dir;
5422 unsigned int desc_size;
5423 struct sk_buff *skb = NULL;
5424 struct stmmac_xdp_buff ctx;
5425 int xdp_status = 0;
5426 int buf_sz;
5427
5428 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5429 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5430 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5431
5432 if (netif_msg_rx_status(priv)) {
5433 void *rx_head;
5434
5435 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5436 if (priv->extend_desc) {
5437 rx_head = (void *)rx_q->dma_erx;
5438 desc_size = sizeof(struct dma_extended_desc);
5439 } else {
5440 rx_head = (void *)rx_q->dma_rx;
5441 desc_size = sizeof(struct dma_desc);
5442 }
5443
5444 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5445 rx_q->dma_rx_phy, desc_size);
5446 }
5447 while (count < limit) {
5448 unsigned int buf1_len = 0, buf2_len = 0;
5449 enum pkt_hash_types hash_type;
5450 struct stmmac_rx_buffer *buf;
5451 struct dma_desc *np, *p;
5452 int entry;
5453 u32 hash;
5454
5455 if (!count && rx_q->state_saved) {
5456 skb = rx_q->state.skb;
5457 error = rx_q->state.error;
5458 len = rx_q->state.len;
5459 } else {
5460 rx_q->state_saved = false;
5461 skb = NULL;
5462 error = 0;
5463 len = 0;
5464 }
5465
5466 read_again:
5467 if (count >= limit)
5468 break;
5469
5470 buf1_len = 0;
5471 buf2_len = 0;
5472 entry = next_entry;
5473 buf = &rx_q->buf_pool[entry];
5474
5475 if (priv->extend_desc)
5476 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5477 else
5478 p = rx_q->dma_rx + entry;
5479
5480 /* read the status of the incoming frame */
5481 status = stmmac_rx_status(priv, &priv->xstats, p);
5482 /* check if managed by the DMA otherwise go ahead */
5483 if (unlikely(status & dma_own))
5484 break;
5485
5486 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5487 priv->dma_conf.dma_rx_size);
5488 next_entry = rx_q->cur_rx;
5489
5490 if (priv->extend_desc)
5491 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5492 else
5493 np = rx_q->dma_rx + next_entry;
5494
5495 prefetch(np);
5496
5497 if (priv->extend_desc)
5498 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5499 if (unlikely(status == discard_frame)) {
5500 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5501 buf->page = NULL;
5502 error = 1;
5503 if (!priv->hwts_rx_en)
5504 rx_errors++;
5505 }
5506
5507 if (unlikely(error && (status & rx_not_ls)))
5508 goto read_again;
5509 if (unlikely(error)) {
5510 dev_kfree_skb(skb);
5511 skb = NULL;
5512 count++;
5513 continue;
5514 }
5515
5516 /* Buffer is good. Go on. */
5517
5518 prefetch(page_address(buf->page) + buf->page_offset);
5519 if (buf->sec_page)
5520 prefetch(page_address(buf->sec_page));
5521
5522 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5523 len += buf1_len;
5524 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5525 len += buf2_len;
5526
5527 /* ACS is disabled; strip manually. */
5528 if (likely(!(status & rx_not_ls))) {
5529 if (buf2_len) {
5530 buf2_len -= ETH_FCS_LEN;
5531 len -= ETH_FCS_LEN;
5532 } else if (buf1_len) {
5533 buf1_len -= ETH_FCS_LEN;
5534 len -= ETH_FCS_LEN;
5535 }
5536 }
5537
5538 if (!skb) {
5539 unsigned int pre_len, sync_len;
5540
5541 dma_sync_single_for_cpu(priv->device, buf->addr,
5542 buf1_len, dma_dir);
5543
5544 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5545 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5546 buf->page_offset, buf1_len, true);
5547
5548 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5549 buf->page_offset;
5550
5551 ctx.priv = priv;
5552 ctx.desc = p;
5553 ctx.ndesc = np;
5554
5555 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5556 /* Due xdp_adjust_tail: DMA sync for_device
5557 * cover max len CPU touch
5558 */
5559 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5560 buf->page_offset;
5561 sync_len = max(sync_len, pre_len);
5562
5563 /* For Not XDP_PASS verdict */
5564 if (IS_ERR(skb)) {
5565 unsigned int xdp_res = -PTR_ERR(skb);
5566
5567 if (xdp_res & STMMAC_XDP_CONSUMED) {
5568 page_pool_put_page(rx_q->page_pool,
5569 virt_to_head_page(ctx.xdp.data),
5570 sync_len, true);
5571 buf->page = NULL;
5572 rx_dropped++;
5573
5574 /* Clear skb as it was set as
5575 * status by XDP program.
5576 */
5577 skb = NULL;
5578
5579 if (unlikely((status & rx_not_ls)))
5580 goto read_again;
5581
5582 count++;
5583 continue;
5584 } else if (xdp_res & (STMMAC_XDP_TX |
5585 STMMAC_XDP_REDIRECT)) {
5586 xdp_status |= xdp_res;
5587 buf->page = NULL;
5588 skb = NULL;
5589 count++;
5590 continue;
5591 }
5592 }
5593 }
5594
5595 if (!skb) {
5596 /* XDP program may expand or reduce tail */
5597 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5598
5599 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5600 if (!skb) {
5601 rx_dropped++;
5602 count++;
5603 goto drain_data;
5604 }
5605
5606 /* XDP program may adjust header */
5607 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5608 skb_put(skb, buf1_len);
5609
5610 /* Data payload copied into SKB, page ready for recycle */
5611 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5612 buf->page = NULL;
5613 } else if (buf1_len) {
5614 dma_sync_single_for_cpu(priv->device, buf->addr,
5615 buf1_len, dma_dir);
5616 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5617 buf->page, buf->page_offset, buf1_len,
5618 priv->dma_conf.dma_buf_sz);
5619
5620 /* Data payload appended into SKB */
5621 skb_mark_for_recycle(skb);
5622 buf->page = NULL;
5623 }
5624
5625 if (buf2_len) {
5626 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5627 buf2_len, dma_dir);
5628 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5629 buf->sec_page, 0, buf2_len,
5630 priv->dma_conf.dma_buf_sz);
5631
5632 /* Data payload appended into SKB */
5633 skb_mark_for_recycle(skb);
5634 buf->sec_page = NULL;
5635 }
5636
5637 drain_data:
5638 if (likely(status & rx_not_ls))
5639 goto read_again;
5640 if (!skb)
5641 continue;
5642
5643 /* Got entire packet into SKB. Finish it. */
5644
5645 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5646
5647 if (priv->hw->hw_vlan_en)
5648 /* MAC level stripping. */
5649 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5650 else
5651 /* Driver level stripping. */
5652 stmmac_rx_vlan(priv->dev, skb);
5653
5654 skb->protocol = eth_type_trans(skb, priv->dev);
5655
5656 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5657 skb_checksum_none_assert(skb);
5658 else
5659 skb->ip_summed = CHECKSUM_UNNECESSARY;
5660
5661 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5662 skb_set_hash(skb, hash, hash_type);
5663
5664 skb_record_rx_queue(skb, queue);
5665 napi_gro_receive(&ch->rx_napi, skb);
5666 skb = NULL;
5667
5668 rx_packets++;
5669 rx_bytes += len;
5670 count++;
5671 }
5672
5673 if (status & rx_not_ls || skb) {
5674 rx_q->state_saved = true;
5675 rx_q->state.skb = skb;
5676 rx_q->state.error = error;
5677 rx_q->state.len = len;
5678 }
5679
5680 stmmac_finalize_xdp_rx(priv, xdp_status);
5681
5682 stmmac_rx_refill(priv, queue);
5683
5684 u64_stats_update_begin(&rxq_stats->napi_syncp);
5685 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5686 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5687 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5688 u64_stats_update_end(&rxq_stats->napi_syncp);
5689
5690 priv->xstats.rx_dropped += rx_dropped;
5691 priv->xstats.rx_errors += rx_errors;
5692
5693 return count;
5694 }
5695
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5696 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5697 {
5698 struct stmmac_channel *ch =
5699 container_of(napi, struct stmmac_channel, rx_napi);
5700 struct stmmac_priv *priv = ch->priv_data;
5701 struct stmmac_rxq_stats *rxq_stats;
5702 u32 chan = ch->index;
5703 int work_done;
5704
5705 rxq_stats = &priv->xstats.rxq_stats[chan];
5706 u64_stats_update_begin(&rxq_stats->napi_syncp);
5707 u64_stats_inc(&rxq_stats->napi.poll);
5708 u64_stats_update_end(&rxq_stats->napi_syncp);
5709
5710 work_done = stmmac_rx(priv, budget, chan);
5711 if (work_done < budget && napi_complete_done(napi, work_done)) {
5712 unsigned long flags;
5713
5714 spin_lock_irqsave(&ch->lock, flags);
5715 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5716 spin_unlock_irqrestore(&ch->lock, flags);
5717 }
5718
5719 return work_done;
5720 }
5721
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5722 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5723 {
5724 struct stmmac_channel *ch =
5725 container_of(napi, struct stmmac_channel, tx_napi);
5726 struct stmmac_priv *priv = ch->priv_data;
5727 struct stmmac_txq_stats *txq_stats;
5728 bool pending_packets = false;
5729 u32 chan = ch->index;
5730 int work_done;
5731
5732 txq_stats = &priv->xstats.txq_stats[chan];
5733 u64_stats_update_begin(&txq_stats->napi_syncp);
5734 u64_stats_inc(&txq_stats->napi.poll);
5735 u64_stats_update_end(&txq_stats->napi_syncp);
5736
5737 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5738 work_done = min(work_done, budget);
5739
5740 if (work_done < budget && napi_complete_done(napi, work_done)) {
5741 unsigned long flags;
5742
5743 spin_lock_irqsave(&ch->lock, flags);
5744 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5745 spin_unlock_irqrestore(&ch->lock, flags);
5746 }
5747
5748 /* TX still have packet to handle, check if we need to arm tx timer */
5749 if (pending_packets)
5750 stmmac_tx_timer_arm(priv, chan);
5751
5752 return work_done;
5753 }
5754
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5755 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5756 {
5757 struct stmmac_channel *ch =
5758 container_of(napi, struct stmmac_channel, rxtx_napi);
5759 struct stmmac_priv *priv = ch->priv_data;
5760 bool tx_pending_packets = false;
5761 int rx_done, tx_done, rxtx_done;
5762 struct stmmac_rxq_stats *rxq_stats;
5763 struct stmmac_txq_stats *txq_stats;
5764 u32 chan = ch->index;
5765
5766 rxq_stats = &priv->xstats.rxq_stats[chan];
5767 u64_stats_update_begin(&rxq_stats->napi_syncp);
5768 u64_stats_inc(&rxq_stats->napi.poll);
5769 u64_stats_update_end(&rxq_stats->napi_syncp);
5770
5771 txq_stats = &priv->xstats.txq_stats[chan];
5772 u64_stats_update_begin(&txq_stats->napi_syncp);
5773 u64_stats_inc(&txq_stats->napi.poll);
5774 u64_stats_update_end(&txq_stats->napi_syncp);
5775
5776 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5777 tx_done = min(tx_done, budget);
5778
5779 rx_done = stmmac_rx_zc(priv, budget, chan);
5780
5781 rxtx_done = max(tx_done, rx_done);
5782
5783 /* If either TX or RX work is not complete, return budget
5784 * and keep pooling
5785 */
5786 if (rxtx_done >= budget)
5787 return budget;
5788
5789 /* all work done, exit the polling mode */
5790 if (napi_complete_done(napi, rxtx_done)) {
5791 unsigned long flags;
5792
5793 spin_lock_irqsave(&ch->lock, flags);
5794 /* Both RX and TX work done are compelte,
5795 * so enable both RX & TX IRQs.
5796 */
5797 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5798 spin_unlock_irqrestore(&ch->lock, flags);
5799 }
5800
5801 /* TX still have packet to handle, check if we need to arm tx timer */
5802 if (tx_pending_packets)
5803 stmmac_tx_timer_arm(priv, chan);
5804
5805 return min(rxtx_done, budget - 1);
5806 }
5807
5808 /**
5809 * stmmac_tx_timeout
5810 * @dev : Pointer to net device structure
5811 * @txqueue: the index of the hanging transmit queue
5812 * Description: this function is called when a packet transmission fails to
5813 * complete within a reasonable time. The driver will mark the error in the
5814 * netdev structure and arrange for the device to be reset to a sane state
5815 * in order to transmit a new packet.
5816 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5817 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5818 {
5819 struct stmmac_priv *priv = netdev_priv(dev);
5820
5821 stmmac_global_err(priv);
5822 }
5823
5824 /**
5825 * stmmac_set_rx_mode - entry point for multicast addressing
5826 * @dev : pointer to the device structure
5827 * Description:
5828 * This function is a driver entry point which gets called by the kernel
5829 * whenever multicast addresses must be enabled/disabled.
5830 * Return value:
5831 * void.
5832 */
stmmac_set_rx_mode(struct net_device * dev)5833 static void stmmac_set_rx_mode(struct net_device *dev)
5834 {
5835 struct stmmac_priv *priv = netdev_priv(dev);
5836
5837 stmmac_set_filter(priv, priv->hw, dev);
5838 }
5839
5840 /**
5841 * stmmac_change_mtu - entry point to change MTU size for the device.
5842 * @dev : device pointer.
5843 * @new_mtu : the new MTU size for the device.
5844 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5845 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5846 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5847 * Return value:
5848 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5849 * file on failure.
5850 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5851 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5852 {
5853 struct stmmac_priv *priv = netdev_priv(dev);
5854 int txfifosz = priv->plat->tx_fifo_size;
5855 struct stmmac_dma_conf *dma_conf;
5856 const int mtu = new_mtu;
5857 int ret;
5858
5859 if (txfifosz == 0)
5860 txfifosz = priv->dma_cap.tx_fifo_size;
5861
5862 txfifosz /= priv->plat->tx_queues_to_use;
5863
5864 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5865 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5866 return -EINVAL;
5867 }
5868
5869 new_mtu = STMMAC_ALIGN(new_mtu);
5870
5871 /* If condition true, FIFO is too small or MTU too large */
5872 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5873 return -EINVAL;
5874
5875 if (netif_running(dev)) {
5876 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5877 /* Try to allocate the new DMA conf with the new mtu */
5878 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5879 if (IS_ERR(dma_conf)) {
5880 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5881 mtu);
5882 return PTR_ERR(dma_conf);
5883 }
5884
5885 stmmac_release(dev);
5886
5887 ret = __stmmac_open(dev, dma_conf);
5888 if (ret) {
5889 free_dma_desc_resources(priv, dma_conf);
5890 kfree(dma_conf);
5891 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5892 return ret;
5893 }
5894
5895 kfree(dma_conf);
5896
5897 stmmac_set_rx_mode(dev);
5898 }
5899
5900 WRITE_ONCE(dev->mtu, mtu);
5901 netdev_update_features(dev);
5902
5903 return 0;
5904 }
5905
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5906 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5907 netdev_features_t features)
5908 {
5909 struct stmmac_priv *priv = netdev_priv(dev);
5910
5911 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5912 features &= ~NETIF_F_RXCSUM;
5913
5914 if (!priv->plat->tx_coe)
5915 features &= ~NETIF_F_CSUM_MASK;
5916
5917 /* Some GMAC devices have a bugged Jumbo frame support that
5918 * needs to have the Tx COE disabled for oversized frames
5919 * (due to limited buffer sizes). In this case we disable
5920 * the TX csum insertion in the TDES and not use SF.
5921 */
5922 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5923 features &= ~NETIF_F_CSUM_MASK;
5924
5925 /* Disable tso if asked by ethtool */
5926 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5927 if (features & NETIF_F_TSO)
5928 priv->tso = true;
5929 else
5930 priv->tso = false;
5931 }
5932
5933 return features;
5934 }
5935
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5936 static int stmmac_set_features(struct net_device *netdev,
5937 netdev_features_t features)
5938 {
5939 struct stmmac_priv *priv = netdev_priv(netdev);
5940
5941 /* Keep the COE Type in case of csum is supporting */
5942 if (features & NETIF_F_RXCSUM)
5943 priv->hw->rx_csum = priv->plat->rx_coe;
5944 else
5945 priv->hw->rx_csum = 0;
5946 /* No check needed because rx_coe has been set before and it will be
5947 * fixed in case of issue.
5948 */
5949 stmmac_rx_ipc(priv, priv->hw);
5950
5951 if (priv->sph_cap) {
5952 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5953 u32 chan;
5954
5955 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5956 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5957 }
5958
5959 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5960 priv->hw->hw_vlan_en = true;
5961 else
5962 priv->hw->hw_vlan_en = false;
5963
5964 stmmac_set_hw_vlan_mode(priv, priv->hw);
5965
5966 return 0;
5967 }
5968
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5969 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5970 {
5971 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
5972
5973 /* This is interrupt context, just spin_lock() */
5974 spin_lock(&fpe_cfg->lock);
5975
5976 if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
5977 goto unlock_out;
5978
5979 /* LP has sent verify mPacket */
5980 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
5981 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
5982 MPACKET_RESPONSE);
5983
5984 /* Local has sent verify mPacket */
5985 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
5986 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
5987 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
5988
5989 /* LP has sent response mPacket */
5990 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
5991 fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
5992 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
5993
5994 unlock_out:
5995 spin_unlock(&fpe_cfg->lock);
5996 }
5997
stmmac_common_interrupt(struct stmmac_priv * priv)5998 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5999 {
6000 u32 rx_cnt = priv->plat->rx_queues_to_use;
6001 u32 tx_cnt = priv->plat->tx_queues_to_use;
6002 u32 queues_count;
6003 u32 queue;
6004 bool xmac;
6005
6006 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6007 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6008
6009 if (priv->irq_wake)
6010 pm_wakeup_event(priv->device, 0);
6011
6012 if (priv->dma_cap.estsel)
6013 stmmac_est_irq_status(priv, priv, priv->dev,
6014 &priv->xstats, tx_cnt);
6015
6016 if (priv->dma_cap.fpesel) {
6017 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6018 priv->dev);
6019
6020 stmmac_fpe_event_status(priv, status);
6021 }
6022
6023 /* To handle GMAC own interrupts */
6024 if ((priv->plat->has_gmac) || xmac) {
6025 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6026
6027 if (unlikely(status)) {
6028 /* For LPI we need to save the tx status */
6029 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6030 priv->tx_path_in_lpi_mode = true;
6031 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6032 priv->tx_path_in_lpi_mode = false;
6033 }
6034
6035 for (queue = 0; queue < queues_count; queue++)
6036 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6037
6038 /* PCS link status */
6039 if (priv->hw->pcs &&
6040 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6041 if (priv->xstats.pcs_link)
6042 netif_carrier_on(priv->dev);
6043 else
6044 netif_carrier_off(priv->dev);
6045 }
6046
6047 stmmac_timestamp_interrupt(priv, priv);
6048 }
6049 }
6050
6051 /**
6052 * stmmac_interrupt - main ISR
6053 * @irq: interrupt number.
6054 * @dev_id: to pass the net device pointer.
6055 * Description: this is the main driver interrupt service routine.
6056 * It can call:
6057 * o DMA service routine (to manage incoming frame reception and transmission
6058 * status)
6059 * o Core interrupts to manage: remote wake-up, management counter, LPI
6060 * interrupts.
6061 */
stmmac_interrupt(int irq,void * dev_id)6062 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6063 {
6064 struct net_device *dev = (struct net_device *)dev_id;
6065 struct stmmac_priv *priv = netdev_priv(dev);
6066
6067 /* Check if adapter is up */
6068 if (test_bit(STMMAC_DOWN, &priv->state))
6069 return IRQ_HANDLED;
6070
6071 /* Check ASP error if it isn't delivered via an individual IRQ */
6072 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6073 return IRQ_HANDLED;
6074
6075 /* To handle Common interrupts */
6076 stmmac_common_interrupt(priv);
6077
6078 /* To handle DMA interrupts */
6079 stmmac_dma_interrupt(priv);
6080
6081 return IRQ_HANDLED;
6082 }
6083
stmmac_mac_interrupt(int irq,void * dev_id)6084 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6085 {
6086 struct net_device *dev = (struct net_device *)dev_id;
6087 struct stmmac_priv *priv = netdev_priv(dev);
6088
6089 /* Check if adapter is up */
6090 if (test_bit(STMMAC_DOWN, &priv->state))
6091 return IRQ_HANDLED;
6092
6093 /* To handle Common interrupts */
6094 stmmac_common_interrupt(priv);
6095
6096 return IRQ_HANDLED;
6097 }
6098
stmmac_safety_interrupt(int irq,void * dev_id)6099 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6100 {
6101 struct net_device *dev = (struct net_device *)dev_id;
6102 struct stmmac_priv *priv = netdev_priv(dev);
6103
6104 /* Check if adapter is up */
6105 if (test_bit(STMMAC_DOWN, &priv->state))
6106 return IRQ_HANDLED;
6107
6108 /* Check if a fatal error happened */
6109 stmmac_safety_feat_interrupt(priv);
6110
6111 return IRQ_HANDLED;
6112 }
6113
stmmac_msi_intr_tx(int irq,void * data)6114 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6115 {
6116 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6117 struct stmmac_dma_conf *dma_conf;
6118 int chan = tx_q->queue_index;
6119 struct stmmac_priv *priv;
6120 int status;
6121
6122 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6123 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6124
6125 /* Check if adapter is up */
6126 if (test_bit(STMMAC_DOWN, &priv->state))
6127 return IRQ_HANDLED;
6128
6129 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6130
6131 if (unlikely(status & tx_hard_error_bump_tc)) {
6132 /* Try to bump up the dma threshold on this failure */
6133 stmmac_bump_dma_threshold(priv, chan);
6134 } else if (unlikely(status == tx_hard_error)) {
6135 stmmac_tx_err(priv, chan);
6136 }
6137
6138 return IRQ_HANDLED;
6139 }
6140
stmmac_msi_intr_rx(int irq,void * data)6141 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6142 {
6143 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6144 struct stmmac_dma_conf *dma_conf;
6145 int chan = rx_q->queue_index;
6146 struct stmmac_priv *priv;
6147
6148 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6149 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6150
6151 /* Check if adapter is up */
6152 if (test_bit(STMMAC_DOWN, &priv->state))
6153 return IRQ_HANDLED;
6154
6155 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6156
6157 return IRQ_HANDLED;
6158 }
6159
6160 /**
6161 * stmmac_ioctl - Entry point for the Ioctl
6162 * @dev: Device pointer.
6163 * @rq: An IOCTL specefic structure, that can contain a pointer to
6164 * a proprietary structure used to pass information to the driver.
6165 * @cmd: IOCTL command
6166 * Description:
6167 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6168 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6169 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6170 {
6171 struct stmmac_priv *priv = netdev_priv (dev);
6172 int ret = -EOPNOTSUPP;
6173
6174 if (!netif_running(dev))
6175 return -EINVAL;
6176
6177 switch (cmd) {
6178 case SIOCGMIIPHY:
6179 case SIOCGMIIREG:
6180 case SIOCSMIIREG:
6181 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6182 break;
6183 case SIOCSHWTSTAMP:
6184 ret = stmmac_hwtstamp_set(dev, rq);
6185 break;
6186 case SIOCGHWTSTAMP:
6187 ret = stmmac_hwtstamp_get(dev, rq);
6188 break;
6189 default:
6190 break;
6191 }
6192
6193 return ret;
6194 }
6195
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6196 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6197 void *cb_priv)
6198 {
6199 struct stmmac_priv *priv = cb_priv;
6200 int ret = -EOPNOTSUPP;
6201
6202 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6203 return ret;
6204
6205 __stmmac_disable_all_queues(priv);
6206
6207 switch (type) {
6208 case TC_SETUP_CLSU32:
6209 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6210 break;
6211 case TC_SETUP_CLSFLOWER:
6212 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6213 break;
6214 default:
6215 break;
6216 }
6217
6218 stmmac_enable_all_queues(priv);
6219 return ret;
6220 }
6221
6222 static LIST_HEAD(stmmac_block_cb_list);
6223
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6224 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6225 void *type_data)
6226 {
6227 struct stmmac_priv *priv = netdev_priv(ndev);
6228
6229 switch (type) {
6230 case TC_QUERY_CAPS:
6231 return stmmac_tc_query_caps(priv, priv, type_data);
6232 case TC_SETUP_QDISC_MQPRIO:
6233 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6234 case TC_SETUP_BLOCK:
6235 return flow_block_cb_setup_simple(type_data,
6236 &stmmac_block_cb_list,
6237 stmmac_setup_tc_block_cb,
6238 priv, priv, true);
6239 case TC_SETUP_QDISC_CBS:
6240 return stmmac_tc_setup_cbs(priv, priv, type_data);
6241 case TC_SETUP_QDISC_TAPRIO:
6242 return stmmac_tc_setup_taprio(priv, priv, type_data);
6243 case TC_SETUP_QDISC_ETF:
6244 return stmmac_tc_setup_etf(priv, priv, type_data);
6245 default:
6246 return -EOPNOTSUPP;
6247 }
6248 }
6249
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6250 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6251 struct net_device *sb_dev)
6252 {
6253 int gso = skb_shinfo(skb)->gso_type;
6254
6255 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6256 /*
6257 * There is no way to determine the number of TSO/USO
6258 * capable Queues. Let's use always the Queue 0
6259 * because if TSO/USO is supported then at least this
6260 * one will be capable.
6261 */
6262 return 0;
6263 }
6264
6265 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6266 }
6267
stmmac_set_mac_address(struct net_device * ndev,void * addr)6268 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6269 {
6270 struct stmmac_priv *priv = netdev_priv(ndev);
6271 int ret = 0;
6272
6273 ret = pm_runtime_resume_and_get(priv->device);
6274 if (ret < 0)
6275 return ret;
6276
6277 ret = eth_mac_addr(ndev, addr);
6278 if (ret)
6279 goto set_mac_error;
6280
6281 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6282
6283 set_mac_error:
6284 pm_runtime_put(priv->device);
6285
6286 return ret;
6287 }
6288
6289 #ifdef CONFIG_DEBUG_FS
6290 static struct dentry *stmmac_fs_dir;
6291
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6292 static void sysfs_display_ring(void *head, int size, int extend_desc,
6293 struct seq_file *seq, dma_addr_t dma_phy_addr)
6294 {
6295 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6296 struct dma_desc *p = (struct dma_desc *)head;
6297 unsigned int desc_size;
6298 dma_addr_t dma_addr;
6299 int i;
6300
6301 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6302 for (i = 0; i < size; i++) {
6303 dma_addr = dma_phy_addr + i * desc_size;
6304 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6305 i, &dma_addr,
6306 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6307 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6308 if (extend_desc)
6309 p = &(++ep)->basic;
6310 else
6311 p++;
6312 }
6313 }
6314
stmmac_rings_status_show(struct seq_file * seq,void * v)6315 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6316 {
6317 struct net_device *dev = seq->private;
6318 struct stmmac_priv *priv = netdev_priv(dev);
6319 u32 rx_count = priv->plat->rx_queues_to_use;
6320 u32 tx_count = priv->plat->tx_queues_to_use;
6321 u32 queue;
6322
6323 if ((dev->flags & IFF_UP) == 0)
6324 return 0;
6325
6326 for (queue = 0; queue < rx_count; queue++) {
6327 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6328
6329 seq_printf(seq, "RX Queue %d:\n", queue);
6330
6331 if (priv->extend_desc) {
6332 seq_printf(seq, "Extended descriptor ring:\n");
6333 sysfs_display_ring((void *)rx_q->dma_erx,
6334 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6335 } else {
6336 seq_printf(seq, "Descriptor ring:\n");
6337 sysfs_display_ring((void *)rx_q->dma_rx,
6338 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6339 }
6340 }
6341
6342 for (queue = 0; queue < tx_count; queue++) {
6343 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6344
6345 seq_printf(seq, "TX Queue %d:\n", queue);
6346
6347 if (priv->extend_desc) {
6348 seq_printf(seq, "Extended descriptor ring:\n");
6349 sysfs_display_ring((void *)tx_q->dma_etx,
6350 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6351 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6352 seq_printf(seq, "Descriptor ring:\n");
6353 sysfs_display_ring((void *)tx_q->dma_tx,
6354 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6355 }
6356 }
6357
6358 return 0;
6359 }
6360 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6361
stmmac_dma_cap_show(struct seq_file * seq,void * v)6362 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6363 {
6364 static const char * const dwxgmac_timestamp_source[] = {
6365 "None",
6366 "Internal",
6367 "External",
6368 "Both",
6369 };
6370 static const char * const dwxgmac_safety_feature_desc[] = {
6371 "No",
6372 "All Safety Features with ECC and Parity",
6373 "All Safety Features without ECC or Parity",
6374 "All Safety Features with Parity Only",
6375 "ECC Only",
6376 "UNDEFINED",
6377 "UNDEFINED",
6378 "UNDEFINED",
6379 };
6380 struct net_device *dev = seq->private;
6381 struct stmmac_priv *priv = netdev_priv(dev);
6382
6383 if (!priv->hw_cap_support) {
6384 seq_printf(seq, "DMA HW features not supported\n");
6385 return 0;
6386 }
6387
6388 seq_printf(seq, "==============================\n");
6389 seq_printf(seq, "\tDMA HW features\n");
6390 seq_printf(seq, "==============================\n");
6391
6392 seq_printf(seq, "\t10/100 Mbps: %s\n",
6393 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6394 seq_printf(seq, "\t1000 Mbps: %s\n",
6395 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6396 seq_printf(seq, "\tHalf duplex: %s\n",
6397 (priv->dma_cap.half_duplex) ? "Y" : "N");
6398 if (priv->plat->has_xgmac) {
6399 seq_printf(seq,
6400 "\tNumber of Additional MAC address registers: %d\n",
6401 priv->dma_cap.multi_addr);
6402 } else {
6403 seq_printf(seq, "\tHash Filter: %s\n",
6404 (priv->dma_cap.hash_filter) ? "Y" : "N");
6405 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6406 (priv->dma_cap.multi_addr) ? "Y" : "N");
6407 }
6408 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6409 (priv->dma_cap.pcs) ? "Y" : "N");
6410 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6411 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6412 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6413 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6414 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6415 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6416 seq_printf(seq, "\tRMON module: %s\n",
6417 (priv->dma_cap.rmon) ? "Y" : "N");
6418 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6419 (priv->dma_cap.time_stamp) ? "Y" : "N");
6420 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6421 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6422 if (priv->plat->has_xgmac)
6423 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6424 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6425 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6426 (priv->dma_cap.eee) ? "Y" : "N");
6427 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6428 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6429 (priv->dma_cap.tx_coe) ? "Y" : "N");
6430 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6431 priv->plat->has_xgmac) {
6432 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6433 (priv->dma_cap.rx_coe) ? "Y" : "N");
6434 } else {
6435 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6436 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6437 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6438 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6439 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6440 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6441 }
6442 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6443 priv->dma_cap.number_rx_channel);
6444 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6445 priv->dma_cap.number_tx_channel);
6446 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6447 priv->dma_cap.number_rx_queues);
6448 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6449 priv->dma_cap.number_tx_queues);
6450 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6451 (priv->dma_cap.enh_desc) ? "Y" : "N");
6452 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6453 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6454 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6455 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6456 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6457 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6458 priv->dma_cap.pps_out_num);
6459 seq_printf(seq, "\tSafety Features: %s\n",
6460 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6461 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6462 priv->dma_cap.frpsel ? "Y" : "N");
6463 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6464 priv->dma_cap.host_dma_width);
6465 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6466 priv->dma_cap.rssen ? "Y" : "N");
6467 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6468 priv->dma_cap.vlhash ? "Y" : "N");
6469 seq_printf(seq, "\tSplit Header: %s\n",
6470 priv->dma_cap.sphen ? "Y" : "N");
6471 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6472 priv->dma_cap.vlins ? "Y" : "N");
6473 seq_printf(seq, "\tDouble VLAN: %s\n",
6474 priv->dma_cap.dvlan ? "Y" : "N");
6475 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6476 priv->dma_cap.l3l4fnum);
6477 seq_printf(seq, "\tARP Offloading: %s\n",
6478 priv->dma_cap.arpoffsel ? "Y" : "N");
6479 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6480 priv->dma_cap.estsel ? "Y" : "N");
6481 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6482 priv->dma_cap.fpesel ? "Y" : "N");
6483 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6484 priv->dma_cap.tbssel ? "Y" : "N");
6485 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6486 priv->dma_cap.tbs_ch_num);
6487 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6488 priv->dma_cap.sgfsel ? "Y" : "N");
6489 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6490 BIT(priv->dma_cap.ttsfd) >> 1);
6491 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6492 priv->dma_cap.numtc);
6493 seq_printf(seq, "\tDCB Feature: %s\n",
6494 priv->dma_cap.dcben ? "Y" : "N");
6495 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6496 priv->dma_cap.advthword ? "Y" : "N");
6497 seq_printf(seq, "\tPTP Offload: %s\n",
6498 priv->dma_cap.ptoen ? "Y" : "N");
6499 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6500 priv->dma_cap.osten ? "Y" : "N");
6501 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6502 priv->dma_cap.pfcen ? "Y" : "N");
6503 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6504 BIT(priv->dma_cap.frpes) << 6);
6505 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6506 BIT(priv->dma_cap.frpbs) << 6);
6507 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6508 priv->dma_cap.frppipe_num);
6509 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6510 priv->dma_cap.nrvf_num ?
6511 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6512 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6513 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6514 seq_printf(seq, "\tDepth of GCL: %lu\n",
6515 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6516 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6517 priv->dma_cap.cbtisel ? "Y" : "N");
6518 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6519 priv->dma_cap.aux_snapshot_n);
6520 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6521 priv->dma_cap.pou_ost_en ? "Y" : "N");
6522 seq_printf(seq, "\tEnhanced DMA: %s\n",
6523 priv->dma_cap.edma ? "Y" : "N");
6524 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6525 priv->dma_cap.ediffc ? "Y" : "N");
6526 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6527 priv->dma_cap.vxn ? "Y" : "N");
6528 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6529 priv->dma_cap.dbgmem ? "Y" : "N");
6530 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6531 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6532 return 0;
6533 }
6534 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6535
6536 /* Use network device events to rename debugfs file entries.
6537 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6538 static int stmmac_device_event(struct notifier_block *unused,
6539 unsigned long event, void *ptr)
6540 {
6541 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6542 struct stmmac_priv *priv = netdev_priv(dev);
6543
6544 if (dev->netdev_ops != &stmmac_netdev_ops)
6545 goto done;
6546
6547 switch (event) {
6548 case NETDEV_CHANGENAME:
6549 if (priv->dbgfs_dir)
6550 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6551 priv->dbgfs_dir,
6552 stmmac_fs_dir,
6553 dev->name);
6554 break;
6555 }
6556 done:
6557 return NOTIFY_DONE;
6558 }
6559
6560 static struct notifier_block stmmac_notifier = {
6561 .notifier_call = stmmac_device_event,
6562 };
6563
stmmac_init_fs(struct net_device * dev)6564 static void stmmac_init_fs(struct net_device *dev)
6565 {
6566 struct stmmac_priv *priv = netdev_priv(dev);
6567
6568 rtnl_lock();
6569
6570 /* Create per netdev entries */
6571 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6572
6573 /* Entry to report DMA RX/TX rings */
6574 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6575 &stmmac_rings_status_fops);
6576
6577 /* Entry to report the DMA HW features */
6578 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6579 &stmmac_dma_cap_fops);
6580
6581 rtnl_unlock();
6582 }
6583
stmmac_exit_fs(struct net_device * dev)6584 static void stmmac_exit_fs(struct net_device *dev)
6585 {
6586 struct stmmac_priv *priv = netdev_priv(dev);
6587
6588 debugfs_remove_recursive(priv->dbgfs_dir);
6589 }
6590 #endif /* CONFIG_DEBUG_FS */
6591
stmmac_vid_crc32_le(__le16 vid_le)6592 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6593 {
6594 unsigned char *data = (unsigned char *)&vid_le;
6595 unsigned char data_byte = 0;
6596 u32 crc = ~0x0;
6597 u32 temp = 0;
6598 int i, bits;
6599
6600 bits = get_bitmask_order(VLAN_VID_MASK);
6601 for (i = 0; i < bits; i++) {
6602 if ((i % 8) == 0)
6603 data_byte = data[i / 8];
6604
6605 temp = ((crc & 1) ^ data_byte) & 1;
6606 crc >>= 1;
6607 data_byte >>= 1;
6608
6609 if (temp)
6610 crc ^= 0xedb88320;
6611 }
6612
6613 return crc;
6614 }
6615
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6616 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6617 {
6618 u32 crc, hash = 0;
6619 u16 pmatch = 0;
6620 int count = 0;
6621 u16 vid = 0;
6622
6623 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6624 __le16 vid_le = cpu_to_le16(vid);
6625 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6626 hash |= (1 << crc);
6627 count++;
6628 }
6629
6630 if (!priv->dma_cap.vlhash) {
6631 if (count > 2) /* VID = 0 always passes filter */
6632 return -EOPNOTSUPP;
6633
6634 pmatch = vid;
6635 hash = 0;
6636 }
6637
6638 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6639 }
6640
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6641 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6642 {
6643 struct stmmac_priv *priv = netdev_priv(ndev);
6644 bool is_double = false;
6645 int ret;
6646
6647 ret = pm_runtime_resume_and_get(priv->device);
6648 if (ret < 0)
6649 return ret;
6650
6651 if (be16_to_cpu(proto) == ETH_P_8021AD)
6652 is_double = true;
6653
6654 set_bit(vid, priv->active_vlans);
6655 ret = stmmac_vlan_update(priv, is_double);
6656 if (ret) {
6657 clear_bit(vid, priv->active_vlans);
6658 goto err_pm_put;
6659 }
6660
6661 if (priv->hw->num_vlan) {
6662 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6663 if (ret)
6664 goto err_pm_put;
6665 }
6666 err_pm_put:
6667 pm_runtime_put(priv->device);
6668
6669 return ret;
6670 }
6671
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6672 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6673 {
6674 struct stmmac_priv *priv = netdev_priv(ndev);
6675 bool is_double = false;
6676 int ret;
6677
6678 ret = pm_runtime_resume_and_get(priv->device);
6679 if (ret < 0)
6680 return ret;
6681
6682 if (be16_to_cpu(proto) == ETH_P_8021AD)
6683 is_double = true;
6684
6685 clear_bit(vid, priv->active_vlans);
6686
6687 if (priv->hw->num_vlan) {
6688 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6689 if (ret)
6690 goto del_vlan_error;
6691 }
6692
6693 ret = stmmac_vlan_update(priv, is_double);
6694
6695 del_vlan_error:
6696 pm_runtime_put(priv->device);
6697
6698 return ret;
6699 }
6700
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6701 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6702 {
6703 struct stmmac_priv *priv = netdev_priv(dev);
6704
6705 switch (bpf->command) {
6706 case XDP_SETUP_PROG:
6707 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6708 case XDP_SETUP_XSK_POOL:
6709 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6710 bpf->xsk.queue_id);
6711 default:
6712 return -EOPNOTSUPP;
6713 }
6714 }
6715
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6716 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6717 struct xdp_frame **frames, u32 flags)
6718 {
6719 struct stmmac_priv *priv = netdev_priv(dev);
6720 int cpu = smp_processor_id();
6721 struct netdev_queue *nq;
6722 int i, nxmit = 0;
6723 int queue;
6724
6725 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6726 return -ENETDOWN;
6727
6728 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6729 return -EINVAL;
6730
6731 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6732 nq = netdev_get_tx_queue(priv->dev, queue);
6733
6734 __netif_tx_lock(nq, cpu);
6735 /* Avoids TX time-out as we are sharing with slow path */
6736 txq_trans_cond_update(nq);
6737
6738 for (i = 0; i < num_frames; i++) {
6739 int res;
6740
6741 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6742 if (res == STMMAC_XDP_CONSUMED)
6743 break;
6744
6745 nxmit++;
6746 }
6747
6748 if (flags & XDP_XMIT_FLUSH) {
6749 stmmac_flush_tx_descriptors(priv, queue);
6750 stmmac_tx_timer_arm(priv, queue);
6751 }
6752
6753 __netif_tx_unlock(nq);
6754
6755 return nxmit;
6756 }
6757
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6758 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6759 {
6760 struct stmmac_channel *ch = &priv->channel[queue];
6761 unsigned long flags;
6762
6763 spin_lock_irqsave(&ch->lock, flags);
6764 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6765 spin_unlock_irqrestore(&ch->lock, flags);
6766
6767 stmmac_stop_rx_dma(priv, queue);
6768 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6769 }
6770
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6771 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6772 {
6773 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6774 struct stmmac_channel *ch = &priv->channel[queue];
6775 unsigned long flags;
6776 u32 buf_size;
6777 int ret;
6778
6779 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6780 if (ret) {
6781 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6782 return;
6783 }
6784
6785 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6786 if (ret) {
6787 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6788 netdev_err(priv->dev, "Failed to init RX desc.\n");
6789 return;
6790 }
6791
6792 stmmac_reset_rx_queue(priv, queue);
6793 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6794
6795 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6796 rx_q->dma_rx_phy, rx_q->queue_index);
6797
6798 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6799 sizeof(struct dma_desc));
6800 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6801 rx_q->rx_tail_addr, rx_q->queue_index);
6802
6803 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6804 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6805 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6806 buf_size,
6807 rx_q->queue_index);
6808 } else {
6809 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6810 priv->dma_conf.dma_buf_sz,
6811 rx_q->queue_index);
6812 }
6813
6814 stmmac_start_rx_dma(priv, queue);
6815
6816 spin_lock_irqsave(&ch->lock, flags);
6817 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6818 spin_unlock_irqrestore(&ch->lock, flags);
6819 }
6820
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6821 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6822 {
6823 struct stmmac_channel *ch = &priv->channel[queue];
6824 unsigned long flags;
6825
6826 spin_lock_irqsave(&ch->lock, flags);
6827 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6828 spin_unlock_irqrestore(&ch->lock, flags);
6829
6830 stmmac_stop_tx_dma(priv, queue);
6831 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6832 }
6833
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6834 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6835 {
6836 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6837 struct stmmac_channel *ch = &priv->channel[queue];
6838 unsigned long flags;
6839 int ret;
6840
6841 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6842 if (ret) {
6843 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6844 return;
6845 }
6846
6847 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6848 if (ret) {
6849 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6850 netdev_err(priv->dev, "Failed to init TX desc.\n");
6851 return;
6852 }
6853
6854 stmmac_reset_tx_queue(priv, queue);
6855 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6856
6857 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6858 tx_q->dma_tx_phy, tx_q->queue_index);
6859
6860 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6861 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6862
6863 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6864 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6865 tx_q->tx_tail_addr, tx_q->queue_index);
6866
6867 stmmac_start_tx_dma(priv, queue);
6868
6869 spin_lock_irqsave(&ch->lock, flags);
6870 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6871 spin_unlock_irqrestore(&ch->lock, flags);
6872 }
6873
stmmac_xdp_release(struct net_device * dev)6874 void stmmac_xdp_release(struct net_device *dev)
6875 {
6876 struct stmmac_priv *priv = netdev_priv(dev);
6877 u32 chan;
6878
6879 /* Ensure tx function is not running */
6880 netif_tx_disable(dev);
6881
6882 /* Disable NAPI process */
6883 stmmac_disable_all_queues(priv);
6884
6885 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6886 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6887
6888 /* Free the IRQ lines */
6889 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6890
6891 /* Stop TX/RX DMA channels */
6892 stmmac_stop_all_dma(priv);
6893
6894 /* Release and free the Rx/Tx resources */
6895 free_dma_desc_resources(priv, &priv->dma_conf);
6896
6897 /* Disable the MAC Rx/Tx */
6898 stmmac_mac_set(priv, priv->ioaddr, false);
6899
6900 /* set trans_start so we don't get spurious
6901 * watchdogs during reset
6902 */
6903 netif_trans_update(dev);
6904 netif_carrier_off(dev);
6905 }
6906
stmmac_xdp_open(struct net_device * dev)6907 int stmmac_xdp_open(struct net_device *dev)
6908 {
6909 struct stmmac_priv *priv = netdev_priv(dev);
6910 u32 rx_cnt = priv->plat->rx_queues_to_use;
6911 u32 tx_cnt = priv->plat->tx_queues_to_use;
6912 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6913 struct stmmac_rx_queue *rx_q;
6914 struct stmmac_tx_queue *tx_q;
6915 u32 buf_size;
6916 bool sph_en;
6917 u32 chan;
6918 int ret;
6919
6920 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6921 if (ret < 0) {
6922 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6923 __func__);
6924 goto dma_desc_error;
6925 }
6926
6927 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6928 if (ret < 0) {
6929 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6930 __func__);
6931 goto init_error;
6932 }
6933
6934 stmmac_reset_queues_param(priv);
6935
6936 /* DMA CSR Channel configuration */
6937 for (chan = 0; chan < dma_csr_ch; chan++) {
6938 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6939 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6940 }
6941
6942 /* Adjust Split header */
6943 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6944
6945 /* DMA RX Channel Configuration */
6946 for (chan = 0; chan < rx_cnt; chan++) {
6947 rx_q = &priv->dma_conf.rx_queue[chan];
6948
6949 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6950 rx_q->dma_rx_phy, chan);
6951
6952 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6953 (rx_q->buf_alloc_num *
6954 sizeof(struct dma_desc));
6955 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6956 rx_q->rx_tail_addr, chan);
6957
6958 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6959 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6960 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6961 buf_size,
6962 rx_q->queue_index);
6963 } else {
6964 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6965 priv->dma_conf.dma_buf_sz,
6966 rx_q->queue_index);
6967 }
6968
6969 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6970 }
6971
6972 /* DMA TX Channel Configuration */
6973 for (chan = 0; chan < tx_cnt; chan++) {
6974 tx_q = &priv->dma_conf.tx_queue[chan];
6975
6976 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6977 tx_q->dma_tx_phy, chan);
6978
6979 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6980 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6981 tx_q->tx_tail_addr, chan);
6982
6983 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6984 tx_q->txtimer.function = stmmac_tx_timer;
6985 }
6986
6987 /* Enable the MAC Rx/Tx */
6988 stmmac_mac_set(priv, priv->ioaddr, true);
6989
6990 /* Start Rx & Tx DMA Channels */
6991 stmmac_start_all_dma(priv);
6992
6993 ret = stmmac_request_irq(dev);
6994 if (ret)
6995 goto irq_error;
6996
6997 /* Enable NAPI process*/
6998 stmmac_enable_all_queues(priv);
6999 netif_carrier_on(dev);
7000 netif_tx_start_all_queues(dev);
7001 stmmac_enable_all_dma_irq(priv);
7002
7003 return 0;
7004
7005 irq_error:
7006 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7007 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7008
7009 stmmac_hw_teardown(dev);
7010 init_error:
7011 free_dma_desc_resources(priv, &priv->dma_conf);
7012 dma_desc_error:
7013 return ret;
7014 }
7015
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7016 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7017 {
7018 struct stmmac_priv *priv = netdev_priv(dev);
7019 struct stmmac_rx_queue *rx_q;
7020 struct stmmac_tx_queue *tx_q;
7021 struct stmmac_channel *ch;
7022
7023 if (test_bit(STMMAC_DOWN, &priv->state) ||
7024 !netif_carrier_ok(priv->dev))
7025 return -ENETDOWN;
7026
7027 if (!stmmac_xdp_is_enabled(priv))
7028 return -EINVAL;
7029
7030 if (queue >= priv->plat->rx_queues_to_use ||
7031 queue >= priv->plat->tx_queues_to_use)
7032 return -EINVAL;
7033
7034 rx_q = &priv->dma_conf.rx_queue[queue];
7035 tx_q = &priv->dma_conf.tx_queue[queue];
7036 ch = &priv->channel[queue];
7037
7038 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7039 return -EINVAL;
7040
7041 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7042 /* EQoS does not have per-DMA channel SW interrupt,
7043 * so we schedule RX Napi straight-away.
7044 */
7045 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7046 __napi_schedule(&ch->rxtx_napi);
7047 }
7048
7049 return 0;
7050 }
7051
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7052 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7053 {
7054 struct stmmac_priv *priv = netdev_priv(dev);
7055 u32 tx_cnt = priv->plat->tx_queues_to_use;
7056 u32 rx_cnt = priv->plat->rx_queues_to_use;
7057 unsigned int start;
7058 int q;
7059
7060 for (q = 0; q < tx_cnt; q++) {
7061 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7062 u64 tx_packets;
7063 u64 tx_bytes;
7064
7065 do {
7066 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7067 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7068 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7069 do {
7070 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7071 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7072 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7073
7074 stats->tx_packets += tx_packets;
7075 stats->tx_bytes += tx_bytes;
7076 }
7077
7078 for (q = 0; q < rx_cnt; q++) {
7079 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7080 u64 rx_packets;
7081 u64 rx_bytes;
7082
7083 do {
7084 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7085 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7086 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7087 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7088
7089 stats->rx_packets += rx_packets;
7090 stats->rx_bytes += rx_bytes;
7091 }
7092
7093 stats->rx_dropped = priv->xstats.rx_dropped;
7094 stats->rx_errors = priv->xstats.rx_errors;
7095 stats->tx_dropped = priv->xstats.tx_dropped;
7096 stats->tx_errors = priv->xstats.tx_errors;
7097 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7098 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7099 stats->rx_length_errors = priv->xstats.rx_length;
7100 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7101 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7102 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7103 }
7104
7105 static const struct net_device_ops stmmac_netdev_ops = {
7106 .ndo_open = stmmac_open,
7107 .ndo_start_xmit = stmmac_xmit,
7108 .ndo_stop = stmmac_release,
7109 .ndo_change_mtu = stmmac_change_mtu,
7110 .ndo_fix_features = stmmac_fix_features,
7111 .ndo_set_features = stmmac_set_features,
7112 .ndo_set_rx_mode = stmmac_set_rx_mode,
7113 .ndo_tx_timeout = stmmac_tx_timeout,
7114 .ndo_eth_ioctl = stmmac_ioctl,
7115 .ndo_get_stats64 = stmmac_get_stats64,
7116 .ndo_setup_tc = stmmac_setup_tc,
7117 .ndo_select_queue = stmmac_select_queue,
7118 .ndo_set_mac_address = stmmac_set_mac_address,
7119 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7120 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7121 .ndo_bpf = stmmac_bpf,
7122 .ndo_xdp_xmit = stmmac_xdp_xmit,
7123 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7124 };
7125
stmmac_reset_subtask(struct stmmac_priv * priv)7126 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7127 {
7128 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7129 return;
7130 if (test_bit(STMMAC_DOWN, &priv->state))
7131 return;
7132
7133 netdev_err(priv->dev, "Reset adapter.\n");
7134
7135 rtnl_lock();
7136 netif_trans_update(priv->dev);
7137 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7138 usleep_range(1000, 2000);
7139
7140 set_bit(STMMAC_DOWN, &priv->state);
7141 dev_close(priv->dev);
7142 dev_open(priv->dev, NULL);
7143 clear_bit(STMMAC_DOWN, &priv->state);
7144 clear_bit(STMMAC_RESETING, &priv->state);
7145 rtnl_unlock();
7146 }
7147
stmmac_service_task(struct work_struct * work)7148 static void stmmac_service_task(struct work_struct *work)
7149 {
7150 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7151 service_task);
7152
7153 stmmac_reset_subtask(priv);
7154 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7155 }
7156
7157 /**
7158 * stmmac_hw_init - Init the MAC device
7159 * @priv: driver private structure
7160 * Description: this function is to configure the MAC device according to
7161 * some platform parameters or the HW capability register. It prepares the
7162 * driver to use either ring or chain modes and to setup either enhanced or
7163 * normal descriptors.
7164 */
stmmac_hw_init(struct stmmac_priv * priv)7165 static int stmmac_hw_init(struct stmmac_priv *priv)
7166 {
7167 int ret;
7168
7169 /* dwmac-sun8i only work in chain mode */
7170 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7171 chain_mode = 1;
7172 priv->chain_mode = chain_mode;
7173
7174 /* Initialize HW Interface */
7175 ret = stmmac_hwif_init(priv);
7176 if (ret)
7177 return ret;
7178
7179 /* Get the HW capability (new GMAC newer than 3.50a) */
7180 priv->hw_cap_support = stmmac_get_hw_features(priv);
7181 if (priv->hw_cap_support) {
7182 dev_info(priv->device, "DMA HW capability register supported\n");
7183
7184 /* We can override some gmac/dma configuration fields: e.g.
7185 * enh_desc, tx_coe (e.g. that are passed through the
7186 * platform) with the values from the HW capability
7187 * register (if supported).
7188 */
7189 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7190 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7191 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7192 priv->hw->pmt = priv->plat->pmt;
7193 if (priv->dma_cap.hash_tb_sz) {
7194 priv->hw->multicast_filter_bins =
7195 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7196 priv->hw->mcast_bits_log2 =
7197 ilog2(priv->hw->multicast_filter_bins);
7198 }
7199
7200 /* TXCOE doesn't work in thresh DMA mode */
7201 if (priv->plat->force_thresh_dma_mode)
7202 priv->plat->tx_coe = 0;
7203 else
7204 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7205
7206 /* In case of GMAC4 rx_coe is from HW cap register. */
7207 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7208
7209 if (priv->dma_cap.rx_coe_type2)
7210 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7211 else if (priv->dma_cap.rx_coe_type1)
7212 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7213
7214 } else {
7215 dev_info(priv->device, "No HW DMA feature register supported\n");
7216 }
7217
7218 if (priv->plat->rx_coe) {
7219 priv->hw->rx_csum = priv->plat->rx_coe;
7220 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7221 if (priv->synopsys_id < DWMAC_CORE_4_00)
7222 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7223 }
7224 if (priv->plat->tx_coe)
7225 dev_info(priv->device, "TX Checksum insertion supported\n");
7226
7227 if (priv->plat->pmt) {
7228 dev_info(priv->device, "Wake-Up On Lan supported\n");
7229 device_set_wakeup_capable(priv->device, 1);
7230 }
7231
7232 if (priv->dma_cap.tsoen)
7233 dev_info(priv->device, "TSO supported\n");
7234
7235 priv->hw->vlan_fail_q_en =
7236 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7237 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7238
7239 /* Run HW quirks, if any */
7240 if (priv->hwif_quirks) {
7241 ret = priv->hwif_quirks(priv);
7242 if (ret)
7243 return ret;
7244 }
7245
7246 /* Rx Watchdog is available in the COREs newer than the 3.40.
7247 * In some case, for example on bugged HW this feature
7248 * has to be disable and this can be done by passing the
7249 * riwt_off field from the platform.
7250 */
7251 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7252 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7253 priv->use_riwt = 1;
7254 dev_info(priv->device,
7255 "Enable RX Mitigation via HW Watchdog Timer\n");
7256 }
7257
7258 return 0;
7259 }
7260
stmmac_napi_add(struct net_device * dev)7261 static void stmmac_napi_add(struct net_device *dev)
7262 {
7263 struct stmmac_priv *priv = netdev_priv(dev);
7264 u32 queue, maxq;
7265
7266 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7267
7268 for (queue = 0; queue < maxq; queue++) {
7269 struct stmmac_channel *ch = &priv->channel[queue];
7270
7271 ch->priv_data = priv;
7272 ch->index = queue;
7273 spin_lock_init(&ch->lock);
7274
7275 if (queue < priv->plat->rx_queues_to_use) {
7276 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7277 }
7278 if (queue < priv->plat->tx_queues_to_use) {
7279 netif_napi_add_tx(dev, &ch->tx_napi,
7280 stmmac_napi_poll_tx);
7281 }
7282 if (queue < priv->plat->rx_queues_to_use &&
7283 queue < priv->plat->tx_queues_to_use) {
7284 netif_napi_add(dev, &ch->rxtx_napi,
7285 stmmac_napi_poll_rxtx);
7286 }
7287 }
7288 }
7289
stmmac_napi_del(struct net_device * dev)7290 static void stmmac_napi_del(struct net_device *dev)
7291 {
7292 struct stmmac_priv *priv = netdev_priv(dev);
7293 u32 queue, maxq;
7294
7295 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7296
7297 for (queue = 0; queue < maxq; queue++) {
7298 struct stmmac_channel *ch = &priv->channel[queue];
7299
7300 if (queue < priv->plat->rx_queues_to_use)
7301 netif_napi_del(&ch->rx_napi);
7302 if (queue < priv->plat->tx_queues_to_use)
7303 netif_napi_del(&ch->tx_napi);
7304 if (queue < priv->plat->rx_queues_to_use &&
7305 queue < priv->plat->tx_queues_to_use) {
7306 netif_napi_del(&ch->rxtx_napi);
7307 }
7308 }
7309 }
7310
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7311 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7312 {
7313 struct stmmac_priv *priv = netdev_priv(dev);
7314 int ret = 0, i;
7315
7316 if (netif_running(dev))
7317 stmmac_release(dev);
7318
7319 stmmac_napi_del(dev);
7320
7321 priv->plat->rx_queues_to_use = rx_cnt;
7322 priv->plat->tx_queues_to_use = tx_cnt;
7323 if (!netif_is_rxfh_configured(dev))
7324 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7325 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7326 rx_cnt);
7327
7328 stmmac_napi_add(dev);
7329
7330 if (netif_running(dev))
7331 ret = stmmac_open(dev);
7332
7333 return ret;
7334 }
7335
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7336 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7337 {
7338 struct stmmac_priv *priv = netdev_priv(dev);
7339 int ret = 0;
7340
7341 if (netif_running(dev))
7342 stmmac_release(dev);
7343
7344 priv->dma_conf.dma_rx_size = rx_size;
7345 priv->dma_conf.dma_tx_size = tx_size;
7346
7347 if (netif_running(dev))
7348 ret = stmmac_open(dev);
7349
7350 return ret;
7351 }
7352
7353 /**
7354 * stmmac_fpe_verify_timer - Timer for MAC Merge verification
7355 * @t: timer_list struct containing private info
7356 *
7357 * Verify the MAC Merge capability in the local TX direction, by
7358 * transmitting Verify mPackets up to 3 times. Wait until link
7359 * partner responds with a Response mPacket, otherwise fail.
7360 */
stmmac_fpe_verify_timer(struct timer_list * t)7361 static void stmmac_fpe_verify_timer(struct timer_list *t)
7362 {
7363 struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
7364 struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
7365 fpe_cfg);
7366 unsigned long flags;
7367 bool rearm = false;
7368
7369 spin_lock_irqsave(&fpe_cfg->lock, flags);
7370
7371 switch (fpe_cfg->status) {
7372 case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
7373 case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
7374 if (fpe_cfg->verify_retries != 0) {
7375 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7376 fpe_cfg, MPACKET_VERIFY);
7377 rearm = true;
7378 } else {
7379 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
7380 }
7381
7382 fpe_cfg->verify_retries--;
7383 break;
7384
7385 case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
7386 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7387 priv->plat->tx_queues_to_use,
7388 priv->plat->rx_queues_to_use,
7389 true, true);
7390 break;
7391
7392 default:
7393 break;
7394 }
7395
7396 if (rearm) {
7397 mod_timer(&fpe_cfg->verify_timer,
7398 jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
7399 }
7400
7401 spin_unlock_irqrestore(&fpe_cfg->lock, flags);
7402 }
7403
stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg * fpe_cfg)7404 static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
7405 {
7406 if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
7407 fpe_cfg->verify_enabled &&
7408 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
7409 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
7410 timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
7411 mod_timer(&fpe_cfg->verify_timer, jiffies);
7412 }
7413 }
7414
stmmac_fpe_apply(struct stmmac_priv * priv)7415 void stmmac_fpe_apply(struct stmmac_priv *priv)
7416 {
7417 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
7418
7419 /* If verification is disabled, configure FPE right away.
7420 * Otherwise let the timer code do it.
7421 */
7422 if (!fpe_cfg->verify_enabled) {
7423 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7424 priv->plat->tx_queues_to_use,
7425 priv->plat->rx_queues_to_use,
7426 fpe_cfg->tx_enabled,
7427 fpe_cfg->pmac_enabled);
7428 } else {
7429 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
7430 fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7431
7432 if (netif_running(priv->dev))
7433 stmmac_fpe_verify_timer_arm(fpe_cfg);
7434 }
7435 }
7436
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7437 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7438 {
7439 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7440 struct dma_desc *desc_contains_ts = ctx->desc;
7441 struct stmmac_priv *priv = ctx->priv;
7442 struct dma_desc *ndesc = ctx->ndesc;
7443 struct dma_desc *desc = ctx->desc;
7444 u64 ns = 0;
7445
7446 if (!priv->hwts_rx_en)
7447 return -ENODATA;
7448
7449 /* For GMAC4, the valid timestamp is from CTX next desc. */
7450 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7451 desc_contains_ts = ndesc;
7452
7453 /* Check if timestamp is available */
7454 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7455 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7456 ns -= priv->plat->cdc_error_adj;
7457 *timestamp = ns_to_ktime(ns);
7458 return 0;
7459 }
7460
7461 return -ENODATA;
7462 }
7463
7464 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7465 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7466 };
7467
7468 /**
7469 * stmmac_dvr_probe
7470 * @device: device pointer
7471 * @plat_dat: platform data pointer
7472 * @res: stmmac resource pointer
7473 * Description: this is the main probe function used to
7474 * call the alloc_etherdev, allocate the priv structure.
7475 * Return:
7476 * returns 0 on success, otherwise errno.
7477 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7478 int stmmac_dvr_probe(struct device *device,
7479 struct plat_stmmacenet_data *plat_dat,
7480 struct stmmac_resources *res)
7481 {
7482 struct net_device *ndev = NULL;
7483 struct stmmac_priv *priv;
7484 u32 rxq;
7485 int i, ret = 0;
7486
7487 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7488 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7489 if (!ndev)
7490 return -ENOMEM;
7491
7492 SET_NETDEV_DEV(ndev, device);
7493
7494 priv = netdev_priv(ndev);
7495 priv->device = device;
7496 priv->dev = ndev;
7497
7498 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7499 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7500 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7501 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7502 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7503 }
7504
7505 priv->xstats.pcpu_stats =
7506 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7507 if (!priv->xstats.pcpu_stats)
7508 return -ENOMEM;
7509
7510 stmmac_set_ethtool_ops(ndev);
7511 priv->pause = pause;
7512 priv->plat = plat_dat;
7513 priv->ioaddr = res->addr;
7514 priv->dev->base_addr = (unsigned long)res->addr;
7515 priv->plat->dma_cfg->multi_msi_en =
7516 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7517
7518 priv->dev->irq = res->irq;
7519 priv->wol_irq = res->wol_irq;
7520 priv->lpi_irq = res->lpi_irq;
7521 priv->sfty_irq = res->sfty_irq;
7522 priv->sfty_ce_irq = res->sfty_ce_irq;
7523 priv->sfty_ue_irq = res->sfty_ue_irq;
7524 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7525 priv->rx_irq[i] = res->rx_irq[i];
7526 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7527 priv->tx_irq[i] = res->tx_irq[i];
7528
7529 if (!is_zero_ether_addr(res->mac))
7530 eth_hw_addr_set(priv->dev, res->mac);
7531
7532 dev_set_drvdata(device, priv->dev);
7533
7534 /* Verify driver arguments */
7535 stmmac_verify_args();
7536
7537 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7538 if (!priv->af_xdp_zc_qps)
7539 return -ENOMEM;
7540
7541 /* Allocate workqueue */
7542 priv->wq = create_singlethread_workqueue("stmmac_wq");
7543 if (!priv->wq) {
7544 dev_err(priv->device, "failed to create workqueue\n");
7545 ret = -ENOMEM;
7546 goto error_wq_init;
7547 }
7548
7549 INIT_WORK(&priv->service_task, stmmac_service_task);
7550
7551 /* Override with kernel parameters if supplied XXX CRS XXX
7552 * this needs to have multiple instances
7553 */
7554 if ((phyaddr >= 0) && (phyaddr <= 31))
7555 priv->plat->phy_addr = phyaddr;
7556
7557 if (priv->plat->stmmac_rst) {
7558 ret = reset_control_assert(priv->plat->stmmac_rst);
7559 reset_control_deassert(priv->plat->stmmac_rst);
7560 /* Some reset controllers have only reset callback instead of
7561 * assert + deassert callbacks pair.
7562 */
7563 if (ret == -ENOTSUPP)
7564 reset_control_reset(priv->plat->stmmac_rst);
7565 }
7566
7567 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7568 if (ret == -ENOTSUPP)
7569 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7570 ERR_PTR(ret));
7571
7572 /* Wait a bit for the reset to take effect */
7573 udelay(10);
7574
7575 /* Init MAC and get the capabilities */
7576 ret = stmmac_hw_init(priv);
7577 if (ret)
7578 goto error_hw_init;
7579
7580 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7581 */
7582 if (priv->synopsys_id < DWMAC_CORE_5_20)
7583 priv->plat->dma_cfg->dche = false;
7584
7585 stmmac_check_ether_addr(priv);
7586
7587 ndev->netdev_ops = &stmmac_netdev_ops;
7588
7589 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7590 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7591
7592 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7593 NETIF_F_RXCSUM;
7594 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7595 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7596
7597 ret = stmmac_tc_init(priv, priv);
7598 if (!ret) {
7599 ndev->hw_features |= NETIF_F_HW_TC;
7600 }
7601
7602 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7603 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7604 if (priv->plat->has_gmac4)
7605 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7606 priv->tso = true;
7607 dev_info(priv->device, "TSO feature enabled\n");
7608 }
7609
7610 if (priv->dma_cap.sphen &&
7611 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7612 ndev->hw_features |= NETIF_F_GRO;
7613 priv->sph_cap = true;
7614 priv->sph = priv->sph_cap;
7615 dev_info(priv->device, "SPH feature enabled\n");
7616 }
7617
7618 /* Ideally our host DMA address width is the same as for the
7619 * device. However, it may differ and then we have to use our
7620 * host DMA width for allocation and the device DMA width for
7621 * register handling.
7622 */
7623 if (priv->plat->host_dma_width)
7624 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7625 else
7626 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7627
7628 if (priv->dma_cap.host_dma_width) {
7629 ret = dma_set_mask_and_coherent(device,
7630 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7631 if (!ret) {
7632 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7633 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7634
7635 /*
7636 * If more than 32 bits can be addressed, make sure to
7637 * enable enhanced addressing mode.
7638 */
7639 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7640 priv->plat->dma_cfg->eame = true;
7641 } else {
7642 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7643 if (ret) {
7644 dev_err(priv->device, "Failed to set DMA Mask\n");
7645 goto error_hw_init;
7646 }
7647
7648 priv->dma_cap.host_dma_width = 32;
7649 }
7650 }
7651
7652 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7653 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7654 #ifdef STMMAC_VLAN_TAG_USED
7655 /* Both mac100 and gmac support receive VLAN tag detection */
7656 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7657 if (priv->plat->has_gmac4) {
7658 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7659 priv->hw->hw_vlan_en = true;
7660 }
7661 if (priv->dma_cap.vlhash) {
7662 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7663 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7664 }
7665 if (priv->dma_cap.vlins) {
7666 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7667 if (priv->dma_cap.dvlan)
7668 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7669 }
7670 #endif
7671 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7672
7673 priv->xstats.threshold = tc;
7674
7675 /* Initialize RSS */
7676 rxq = priv->plat->rx_queues_to_use;
7677 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7678 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7679 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7680
7681 if (priv->dma_cap.rssen && priv->plat->rss_en)
7682 ndev->features |= NETIF_F_RXHASH;
7683
7684 ndev->vlan_features |= ndev->features;
7685
7686 /* MTU range: 46 - hw-specific max */
7687 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7688 if (priv->plat->has_xgmac)
7689 ndev->max_mtu = XGMAC_JUMBO_LEN;
7690 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7691 ndev->max_mtu = JUMBO_LEN;
7692 else
7693 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7694 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7695 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7696 */
7697 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7698 (priv->plat->maxmtu >= ndev->min_mtu))
7699 ndev->max_mtu = priv->plat->maxmtu;
7700 else if (priv->plat->maxmtu < ndev->min_mtu)
7701 dev_warn(priv->device,
7702 "%s: warning: maxmtu having invalid value (%d)\n",
7703 __func__, priv->plat->maxmtu);
7704
7705 if (flow_ctrl)
7706 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7707
7708 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7709
7710 /* Setup channels NAPI */
7711 stmmac_napi_add(ndev);
7712
7713 mutex_init(&priv->lock);
7714
7715 priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7716 priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
7717 priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
7718 timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
7719 spin_lock_init(&priv->fpe_cfg.lock);
7720
7721 /* If a specific clk_csr value is passed from the platform
7722 * this means that the CSR Clock Range selection cannot be
7723 * changed at run-time and it is fixed. Viceversa the driver'll try to
7724 * set the MDC clock dynamically according to the csr actual
7725 * clock input.
7726 */
7727 if (priv->plat->clk_csr >= 0)
7728 priv->clk_csr = priv->plat->clk_csr;
7729 else
7730 stmmac_clk_csr_set(priv);
7731
7732 stmmac_check_pcs_mode(priv);
7733
7734 pm_runtime_get_noresume(device);
7735 pm_runtime_set_active(device);
7736 if (!pm_runtime_enabled(device))
7737 pm_runtime_enable(device);
7738
7739 ret = stmmac_mdio_register(ndev);
7740 if (ret < 0) {
7741 dev_err_probe(priv->device, ret,
7742 "MDIO bus (id: %d) registration failed\n",
7743 priv->plat->bus_id);
7744 goto error_mdio_register;
7745 }
7746
7747 if (priv->plat->speed_mode_2500)
7748 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7749
7750 ret = stmmac_pcs_setup(ndev);
7751 if (ret)
7752 goto error_pcs_setup;
7753
7754 ret = stmmac_phy_setup(priv);
7755 if (ret) {
7756 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7757 goto error_phy_setup;
7758 }
7759
7760 ret = register_netdev(ndev);
7761 if (ret) {
7762 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7763 __func__, ret);
7764 goto error_netdev_register;
7765 }
7766
7767 #ifdef CONFIG_DEBUG_FS
7768 stmmac_init_fs(ndev);
7769 #endif
7770
7771 if (priv->plat->dump_debug_regs)
7772 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7773
7774 /* Let pm_runtime_put() disable the clocks.
7775 * If CONFIG_PM is not enabled, the clocks will stay powered.
7776 */
7777 pm_runtime_put(device);
7778
7779 return ret;
7780
7781 error_netdev_register:
7782 phylink_destroy(priv->phylink);
7783 error_phy_setup:
7784 stmmac_pcs_clean(ndev);
7785 error_pcs_setup:
7786 stmmac_mdio_unregister(ndev);
7787 error_mdio_register:
7788 stmmac_napi_del(ndev);
7789 error_hw_init:
7790 destroy_workqueue(priv->wq);
7791 error_wq_init:
7792 bitmap_free(priv->af_xdp_zc_qps);
7793
7794 return ret;
7795 }
7796 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7797
7798 /**
7799 * stmmac_dvr_remove
7800 * @dev: device pointer
7801 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7802 * changes the link status, releases the DMA descriptor rings.
7803 */
stmmac_dvr_remove(struct device * dev)7804 void stmmac_dvr_remove(struct device *dev)
7805 {
7806 struct net_device *ndev = dev_get_drvdata(dev);
7807 struct stmmac_priv *priv = netdev_priv(ndev);
7808
7809 netdev_info(priv->dev, "%s: removing driver", __func__);
7810
7811 pm_runtime_get_sync(dev);
7812
7813 stmmac_stop_all_dma(priv);
7814 stmmac_mac_set(priv, priv->ioaddr, false);
7815 unregister_netdev(ndev);
7816
7817 #ifdef CONFIG_DEBUG_FS
7818 stmmac_exit_fs(ndev);
7819 #endif
7820 phylink_destroy(priv->phylink);
7821 if (priv->plat->stmmac_rst)
7822 reset_control_assert(priv->plat->stmmac_rst);
7823 reset_control_assert(priv->plat->stmmac_ahb_rst);
7824
7825 stmmac_pcs_clean(ndev);
7826 stmmac_mdio_unregister(ndev);
7827
7828 destroy_workqueue(priv->wq);
7829 mutex_destroy(&priv->lock);
7830 bitmap_free(priv->af_xdp_zc_qps);
7831
7832 pm_runtime_disable(dev);
7833 pm_runtime_put_noidle(dev);
7834 }
7835 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7836
7837 /**
7838 * stmmac_suspend - suspend callback
7839 * @dev: device pointer
7840 * Description: this is the function to suspend the device and it is called
7841 * by the platform driver to stop the network queue, release the resources,
7842 * program the PMT register (for WoL), clean and release driver resources.
7843 */
stmmac_suspend(struct device * dev)7844 int stmmac_suspend(struct device *dev)
7845 {
7846 struct net_device *ndev = dev_get_drvdata(dev);
7847 struct stmmac_priv *priv = netdev_priv(ndev);
7848 u32 chan;
7849
7850 if (!ndev || !netif_running(ndev))
7851 return 0;
7852
7853 mutex_lock(&priv->lock);
7854
7855 netif_device_detach(ndev);
7856
7857 stmmac_disable_all_queues(priv);
7858
7859 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7860 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7861
7862 if (priv->eee_enabled) {
7863 priv->tx_path_in_lpi_mode = false;
7864 del_timer_sync(&priv->eee_ctrl_timer);
7865 }
7866
7867 /* Stop TX/RX DMA */
7868 stmmac_stop_all_dma(priv);
7869
7870 if (priv->plat->serdes_powerdown)
7871 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7872
7873 /* Enable Power down mode by programming the PMT regs */
7874 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7875 stmmac_pmt(priv, priv->hw, priv->wolopts);
7876 priv->irq_wake = 1;
7877 } else {
7878 stmmac_mac_set(priv, priv->ioaddr, false);
7879 pinctrl_pm_select_sleep_state(priv->device);
7880 }
7881
7882 mutex_unlock(&priv->lock);
7883
7884 rtnl_lock();
7885 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7886 phylink_suspend(priv->phylink, true);
7887 } else {
7888 if (device_may_wakeup(priv->device))
7889 phylink_speed_down(priv->phylink, false);
7890 phylink_suspend(priv->phylink, false);
7891 }
7892 rtnl_unlock();
7893
7894 if (priv->dma_cap.fpesel)
7895 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7896
7897 priv->speed = SPEED_UNKNOWN;
7898 return 0;
7899 }
7900 EXPORT_SYMBOL_GPL(stmmac_suspend);
7901
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7902 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7903 {
7904 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7905
7906 rx_q->cur_rx = 0;
7907 rx_q->dirty_rx = 0;
7908 }
7909
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7910 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7911 {
7912 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7913
7914 tx_q->cur_tx = 0;
7915 tx_q->dirty_tx = 0;
7916 tx_q->mss = 0;
7917
7918 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7919 }
7920
7921 /**
7922 * stmmac_reset_queues_param - reset queue parameters
7923 * @priv: device pointer
7924 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7925 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7926 {
7927 u32 rx_cnt = priv->plat->rx_queues_to_use;
7928 u32 tx_cnt = priv->plat->tx_queues_to_use;
7929 u32 queue;
7930
7931 for (queue = 0; queue < rx_cnt; queue++)
7932 stmmac_reset_rx_queue(priv, queue);
7933
7934 for (queue = 0; queue < tx_cnt; queue++)
7935 stmmac_reset_tx_queue(priv, queue);
7936 }
7937
7938 /**
7939 * stmmac_resume - resume callback
7940 * @dev: device pointer
7941 * Description: when resume this function is invoked to setup the DMA and CORE
7942 * in a usable state.
7943 */
stmmac_resume(struct device * dev)7944 int stmmac_resume(struct device *dev)
7945 {
7946 struct net_device *ndev = dev_get_drvdata(dev);
7947 struct stmmac_priv *priv = netdev_priv(ndev);
7948 int ret;
7949
7950 if (!netif_running(ndev))
7951 return 0;
7952
7953 /* Power Down bit, into the PM register, is cleared
7954 * automatically as soon as a magic packet or a Wake-up frame
7955 * is received. Anyway, it's better to manually clear
7956 * this bit because it can generate problems while resuming
7957 * from another devices (e.g. serial console).
7958 */
7959 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7960 mutex_lock(&priv->lock);
7961 stmmac_pmt(priv, priv->hw, 0);
7962 mutex_unlock(&priv->lock);
7963 priv->irq_wake = 0;
7964 } else {
7965 pinctrl_pm_select_default_state(priv->device);
7966 /* reset the phy so that it's ready */
7967 if (priv->mii)
7968 stmmac_mdio_reset(priv->mii);
7969 }
7970
7971 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7972 priv->plat->serdes_powerup) {
7973 ret = priv->plat->serdes_powerup(ndev,
7974 priv->plat->bsp_priv);
7975
7976 if (ret < 0)
7977 return ret;
7978 }
7979
7980 rtnl_lock();
7981 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7982 phylink_resume(priv->phylink);
7983 } else {
7984 phylink_resume(priv->phylink);
7985 if (device_may_wakeup(priv->device))
7986 phylink_speed_up(priv->phylink);
7987 }
7988 rtnl_unlock();
7989
7990 rtnl_lock();
7991 mutex_lock(&priv->lock);
7992
7993 stmmac_reset_queues_param(priv);
7994
7995 stmmac_free_tx_skbufs(priv);
7996 stmmac_clear_descriptors(priv, &priv->dma_conf);
7997
7998 stmmac_hw_setup(ndev, false);
7999 stmmac_init_coalesce(priv);
8000 stmmac_set_rx_mode(ndev);
8001
8002 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8003
8004 stmmac_enable_all_queues(priv);
8005 stmmac_enable_all_dma_irq(priv);
8006
8007 mutex_unlock(&priv->lock);
8008 rtnl_unlock();
8009
8010 netif_device_attach(ndev);
8011
8012 return 0;
8013 }
8014 EXPORT_SYMBOL_GPL(stmmac_resume);
8015
8016 #ifndef MODULE
stmmac_cmdline_opt(char * str)8017 static int __init stmmac_cmdline_opt(char *str)
8018 {
8019 char *opt;
8020
8021 if (!str || !*str)
8022 return 1;
8023 while ((opt = strsep(&str, ",")) != NULL) {
8024 if (!strncmp(opt, "debug:", 6)) {
8025 if (kstrtoint(opt + 6, 0, &debug))
8026 goto err;
8027 } else if (!strncmp(opt, "phyaddr:", 8)) {
8028 if (kstrtoint(opt + 8, 0, &phyaddr))
8029 goto err;
8030 } else if (!strncmp(opt, "buf_sz:", 7)) {
8031 if (kstrtoint(opt + 7, 0, &buf_sz))
8032 goto err;
8033 } else if (!strncmp(opt, "tc:", 3)) {
8034 if (kstrtoint(opt + 3, 0, &tc))
8035 goto err;
8036 } else if (!strncmp(opt, "watchdog:", 9)) {
8037 if (kstrtoint(opt + 9, 0, &watchdog))
8038 goto err;
8039 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8040 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8041 goto err;
8042 } else if (!strncmp(opt, "pause:", 6)) {
8043 if (kstrtoint(opt + 6, 0, &pause))
8044 goto err;
8045 } else if (!strncmp(opt, "eee_timer:", 10)) {
8046 if (kstrtoint(opt + 10, 0, &eee_timer))
8047 goto err;
8048 } else if (!strncmp(opt, "chain_mode:", 11)) {
8049 if (kstrtoint(opt + 11, 0, &chain_mode))
8050 goto err;
8051 }
8052 }
8053 return 1;
8054
8055 err:
8056 pr_err("%s: ERROR broken module parameter conversion", __func__);
8057 return 1;
8058 }
8059
8060 __setup("stmmaceth=", stmmac_cmdline_opt);
8061 #endif /* MODULE */
8062
stmmac_init(void)8063 static int __init stmmac_init(void)
8064 {
8065 #ifdef CONFIG_DEBUG_FS
8066 /* Create debugfs main directory if it doesn't exist yet */
8067 if (!stmmac_fs_dir)
8068 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8069 register_netdevice_notifier(&stmmac_notifier);
8070 #endif
8071
8072 return 0;
8073 }
8074
stmmac_exit(void)8075 static void __exit stmmac_exit(void)
8076 {
8077 #ifdef CONFIG_DEBUG_FS
8078 unregister_netdevice_notifier(&stmmac_notifier);
8079 debugfs_remove_recursive(stmmac_fs_dir);
8080 #endif
8081 }
8082
8083 module_init(stmmac_init)
8084 module_exit(stmmac_exit)
8085
8086 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8087 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8088 MODULE_LICENSE("GPL");
8089