Lines Matching +full:lan966x +full:- +full:switch

1 // SPDX-License-Identifier: GPL-2.0+
12 struct lan966x *lan966x = (struct lan966x *)fdma->priv; in lan966x_fdma_rx_dataptr_cb() local
13 struct lan966x_rx *rx = &lan966x->rx; in lan966x_fdma_rx_dataptr_cb()
16 page = page_pool_dev_alloc_pages(rx->page_pool); in lan966x_fdma_rx_dataptr_cb()
18 return -ENOMEM; in lan966x_fdma_rx_dataptr_cb()
20 rx->page[dcb][db] = page; in lan966x_fdma_rx_dataptr_cb()
29 struct lan966x *lan966x = (struct lan966x *)fdma->priv; in lan966x_fdma_tx_dataptr_cb() local
31 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr; in lan966x_fdma_tx_dataptr_cb()
39 struct lan966x *lan966x = (struct lan966x *)fdma->priv; in lan966x_fdma_xdp_tx_dataptr_cb() local
41 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM; in lan966x_fdma_xdp_tx_dataptr_cb()
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x) in lan966x_fdma_channel_active() argument
48 return lan_rd(lan966x, FDMA_CH_ACTIVE); in lan966x_fdma_channel_active()
53 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_free_pages()
56 for (i = 0; i < fdma->n_dcbs; ++i) { in lan966x_fdma_rx_free_pages()
57 for (j = 0; j < fdma->n_dbs; ++j) in lan966x_fdma_rx_free_pages()
58 page_pool_put_full_page(rx->page_pool, in lan966x_fdma_rx_free_pages()
59 rx->page[i][j], false); in lan966x_fdma_rx_free_pages()
65 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_free_page()
68 page = rx->page[fdma->dcb_index][fdma->db_index]; in lan966x_fdma_rx_free_page()
72 page_pool_recycle_direct(rx->page_pool, page); in lan966x_fdma_rx_free_page()
77 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_alloc_page_pool() local
79 .order = rx->page_order, in lan966x_fdma_rx_alloc_page_pool()
81 .pool_size = rx->fdma.n_dcbs, in lan966x_fdma_rx_alloc_page_pool()
83 .dev = lan966x->dev, in lan966x_fdma_rx_alloc_page_pool()
86 .max_len = rx->max_mtu - in lan966x_fdma_rx_alloc_page_pool()
90 if (lan966x_xdp_present(lan966x)) in lan966x_fdma_rx_alloc_page_pool()
93 rx->page_pool = page_pool_create(&pp_params); in lan966x_fdma_rx_alloc_page_pool()
95 for (int i = 0; i < lan966x->num_phys_ports; ++i) { in lan966x_fdma_rx_alloc_page_pool()
98 if (!lan966x->ports[i]) in lan966x_fdma_rx_alloc_page_pool()
101 port = lan966x->ports[i]; in lan966x_fdma_rx_alloc_page_pool()
102 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq); in lan966x_fdma_rx_alloc_page_pool()
103 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL, in lan966x_fdma_rx_alloc_page_pool()
104 rx->page_pool); in lan966x_fdma_rx_alloc_page_pool()
107 return PTR_ERR_OR_ZERO(rx->page_pool); in lan966x_fdma_rx_alloc_page_pool()
112 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_alloc() local
113 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_alloc()
117 return PTR_ERR(rx->page_pool); in lan966x_fdma_rx_alloc()
119 err = fdma_alloc_coherent(lan966x->dev, fdma); in lan966x_fdma_rx_alloc()
123 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), in lan966x_fdma_rx_alloc()
131 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_start() local
132 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_start()
138 lan_wr(lower_32_bits((u64)fdma->dma), lan966x, in lan966x_fdma_rx_start()
139 FDMA_DCB_LLP(fdma->channel_id)); in lan966x_fdma_rx_start()
140 lan_wr(upper_32_bits((u64)fdma->dma), lan966x, in lan966x_fdma_rx_start()
141 FDMA_DCB_LLP1(fdma->channel_id)); in lan966x_fdma_rx_start()
143 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | in lan966x_fdma_rx_start()
147 lan966x, FDMA_CH_CFG(fdma->channel_id)); in lan966x_fdma_rx_start()
152 lan966x, FDMA_PORT_CTRL(0)); in lan966x_fdma_rx_start()
155 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_rx_start()
157 mask |= BIT(fdma->channel_id); in lan966x_fdma_rx_start()
160 lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_rx_start()
163 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), in lan966x_fdma_rx_start()
165 lan966x, FDMA_CH_ACTIVATE); in lan966x_fdma_rx_start()
170 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_disable() local
171 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_disable()
175 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), in lan966x_fdma_rx_disable()
177 lan966x, FDMA_CH_DISABLE); in lan966x_fdma_rx_disable()
179 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, in lan966x_fdma_rx_disable()
180 val, !(val & BIT(fdma->channel_id)), in lan966x_fdma_rx_disable()
183 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), in lan966x_fdma_rx_disable()
185 lan966x, FDMA_CH_DB_DISCARD); in lan966x_fdma_rx_disable()
190 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_reload() local
192 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->fdma.channel_id)), in lan966x_fdma_rx_reload()
194 lan966x, FDMA_CH_RELOAD); in lan966x_fdma_rx_reload()
199 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_alloc() local
200 struct fdma *fdma = &tx->fdma; in lan966x_fdma_tx_alloc()
203 tx->dcbs_buf = kcalloc(fdma->n_dcbs, sizeof(struct lan966x_tx_dcb_buf), in lan966x_fdma_tx_alloc()
205 if (!tx->dcbs_buf) in lan966x_fdma_tx_alloc()
206 return -ENOMEM; in lan966x_fdma_tx_alloc()
208 err = fdma_alloc_coherent(lan966x->dev, fdma); in lan966x_fdma_tx_alloc()
217 kfree(tx->dcbs_buf); in lan966x_fdma_tx_alloc()
218 return -ENOMEM; in lan966x_fdma_tx_alloc()
223 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_free() local
225 kfree(tx->dcbs_buf); in lan966x_fdma_tx_free()
226 fdma_free_coherent(lan966x->dev, &tx->fdma); in lan966x_fdma_tx_free()
231 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_activate() local
232 struct fdma *fdma = &tx->fdma; in lan966x_fdma_tx_activate()
238 lan_wr(lower_32_bits((u64)fdma->dma), lan966x, in lan966x_fdma_tx_activate()
239 FDMA_DCB_LLP(fdma->channel_id)); in lan966x_fdma_tx_activate()
240 lan_wr(upper_32_bits((u64)fdma->dma), lan966x, in lan966x_fdma_tx_activate()
241 FDMA_DCB_LLP1(fdma->channel_id)); in lan966x_fdma_tx_activate()
243 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | in lan966x_fdma_tx_activate()
247 lan966x, FDMA_CH_CFG(fdma->channel_id)); in lan966x_fdma_tx_activate()
252 lan966x, FDMA_PORT_CTRL(0)); in lan966x_fdma_tx_activate()
255 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_tx_activate()
257 mask |= BIT(fdma->channel_id); in lan966x_fdma_tx_activate()
260 lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_tx_activate()
263 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)), in lan966x_fdma_tx_activate()
265 lan966x, FDMA_CH_ACTIVATE); in lan966x_fdma_tx_activate()
270 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_disable() local
271 struct fdma *fdma = &tx->fdma; in lan966x_fdma_tx_disable()
275 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(fdma->channel_id)), in lan966x_fdma_tx_disable()
277 lan966x, FDMA_CH_DISABLE); in lan966x_fdma_tx_disable()
279 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, in lan966x_fdma_tx_disable()
280 val, !(val & BIT(fdma->channel_id)), in lan966x_fdma_tx_disable()
283 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(fdma->channel_id)), in lan966x_fdma_tx_disable()
285 lan966x, FDMA_CH_DB_DISCARD); in lan966x_fdma_tx_disable()
287 tx->activated = false; in lan966x_fdma_tx_disable()
292 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_reload() local
295 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->fdma.channel_id)), in lan966x_fdma_tx_reload()
297 lan966x, FDMA_CH_RELOAD); in lan966x_fdma_tx_reload()
300 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) in lan966x_fdma_wakeup_netdev() argument
305 for (i = 0; i < lan966x->num_phys_ports; ++i) { in lan966x_fdma_wakeup_netdev()
306 port = lan966x->ports[i]; in lan966x_fdma_wakeup_netdev()
310 if (netif_queue_stopped(port->dev)) in lan966x_fdma_wakeup_netdev()
311 netif_wake_queue(port->dev); in lan966x_fdma_wakeup_netdev()
315 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) in lan966x_fdma_stop_netdev() argument
320 for (i = 0; i < lan966x->num_phys_ports; ++i) { in lan966x_fdma_stop_netdev()
321 port = lan966x->ports[i]; in lan966x_fdma_stop_netdev()
325 netif_stop_queue(port->dev); in lan966x_fdma_stop_netdev()
329 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) in lan966x_fdma_tx_clear_buf() argument
331 struct lan966x_tx *tx = &lan966x->tx; in lan966x_fdma_tx_clear_buf()
332 struct lan966x_rx *rx = &lan966x->rx; in lan966x_fdma_tx_clear_buf()
334 struct fdma *fdma = &tx->fdma; in lan966x_fdma_tx_clear_buf()
343 spin_lock_irqsave(&lan966x->tx_lock, flags); in lan966x_fdma_tx_clear_buf()
344 for (i = 0; i < fdma->n_dcbs; ++i) { in lan966x_fdma_tx_clear_buf()
345 dcb_buf = &tx->dcbs_buf[i]; in lan966x_fdma_tx_clear_buf()
347 if (!dcb_buf->used) in lan966x_fdma_tx_clear_buf()
354 dcb_buf->dev->stats.tx_packets++; in lan966x_fdma_tx_clear_buf()
355 dcb_buf->dev->stats.tx_bytes += dcb_buf->len; in lan966x_fdma_tx_clear_buf()
357 dcb_buf->used = false; in lan966x_fdma_tx_clear_buf()
358 if (dcb_buf->use_skb) { in lan966x_fdma_tx_clear_buf()
359 dma_unmap_single(lan966x->dev, in lan966x_fdma_tx_clear_buf()
360 dcb_buf->dma_addr, in lan966x_fdma_tx_clear_buf()
361 dcb_buf->len, in lan966x_fdma_tx_clear_buf()
364 if (!dcb_buf->ptp) in lan966x_fdma_tx_clear_buf()
365 napi_consume_skb(dcb_buf->data.skb, weight); in lan966x_fdma_tx_clear_buf()
367 if (dcb_buf->xdp_ndo) in lan966x_fdma_tx_clear_buf()
368 dma_unmap_single(lan966x->dev, in lan966x_fdma_tx_clear_buf()
369 dcb_buf->dma_addr, in lan966x_fdma_tx_clear_buf()
370 dcb_buf->len, in lan966x_fdma_tx_clear_buf()
373 if (dcb_buf->xdp_ndo) in lan966x_fdma_tx_clear_buf()
374 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq); in lan966x_fdma_tx_clear_buf()
376 page_pool_recycle_direct(rx->page_pool, in lan966x_fdma_tx_clear_buf()
377 dcb_buf->data.page); in lan966x_fdma_tx_clear_buf()
386 lan966x_fdma_wakeup_netdev(lan966x); in lan966x_fdma_tx_clear_buf()
388 spin_unlock_irqrestore(&lan966x->tx_lock, flags); in lan966x_fdma_tx_clear_buf()
393 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_check_frame() local
394 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_check_frame()
400 page = rx->page[fdma->dcb_index][fdma->db_index]; in lan966x_fdma_rx_check_frame()
404 dma_sync_single_for_cpu(lan966x->dev, in lan966x_fdma_rx_check_frame()
405 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM, in lan966x_fdma_rx_check_frame()
406 FDMA_DCB_STATUS_BLOCKL(db->status), in lan966x_fdma_rx_check_frame()
411 if (WARN_ON(*src_port >= lan966x->num_phys_ports)) in lan966x_fdma_rx_check_frame()
414 port = lan966x->ports[*src_port]; in lan966x_fdma_rx_check_frame()
418 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status)); in lan966x_fdma_rx_check_frame()
424 struct lan966x *lan966x = rx->lan966x; in lan966x_fdma_rx_get_frame() local
425 struct fdma *fdma = &rx->fdma; in lan966x_fdma_rx_get_frame()
433 page = rx->page[fdma->dcb_index][fdma->db_index]; in lan966x_fdma_rx_get_frame()
435 skb = build_skb(page_address(page), fdma->db_size); in lan966x_fdma_rx_get_frame()
442 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); in lan966x_fdma_rx_get_frame()
444 lan966x_ifh_get_timestamp(skb->data, &timestamp); in lan966x_fdma_rx_get_frame()
446 skb->dev = lan966x->ports[src_port]->dev; in lan966x_fdma_rx_get_frame()
449 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) in lan966x_fdma_rx_get_frame()
450 skb_trim(skb, skb->len - ETH_FCS_LEN); in lan966x_fdma_rx_get_frame()
452 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp); in lan966x_fdma_rx_get_frame()
453 skb->protocol = eth_type_trans(skb, skb->dev); in lan966x_fdma_rx_get_frame()
455 if (lan966x->bridge_mask & BIT(src_port)) { in lan966x_fdma_rx_get_frame()
456 skb->offload_fwd_mark = 1; in lan966x_fdma_rx_get_frame()
459 if (!lan966x_hw_offload(lan966x, src_port, skb)) in lan966x_fdma_rx_get_frame()
460 skb->offload_fwd_mark = 0; in lan966x_fdma_rx_get_frame()
463 skb->dev->stats.rx_bytes += skb->len; in lan966x_fdma_rx_get_frame()
464 skb->dev->stats.rx_packets++; in lan966x_fdma_rx_get_frame()
469 page_pool_recycle_direct(rx->page_pool, page); in lan966x_fdma_rx_get_frame()
476 struct lan966x *lan966x = container_of(napi, struct lan966x, napi); in lan966x_fdma_napi_poll() local
477 struct lan966x_rx *rx = &lan966x->rx; in lan966x_fdma_napi_poll()
479 struct fdma *fdma = &rx->fdma; in lan966x_fdma_napi_poll()
484 dcb_reload = fdma->dcb_index; in lan966x_fdma_napi_poll()
486 lan966x_fdma_tx_clear_buf(lan966x, weight); in lan966x_fdma_napi_poll()
495 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) { in lan966x_fdma_napi_poll()
519 napi_gro_receive(&lan966x->napi, skb); in lan966x_fdma_napi_poll()
524 while (dcb_reload != fdma->dcb_index) { in lan966x_fdma_napi_poll()
527 dcb_reload &= fdma->n_dcbs - 1; in lan966x_fdma_napi_poll()
529 fdma_dcb_add(fdma, old_dcb, FDMA_DCB_INFO_DATAL(fdma->db_size), in lan966x_fdma_napi_poll()
539 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_napi_poll()
546 struct lan966x *lan966x = args; in lan966x_fdma_irq_handler() local
549 db = lan_rd(lan966x, FDMA_INTR_DB); in lan966x_fdma_irq_handler()
550 err = lan_rd(lan966x, FDMA_INTR_ERR); in lan966x_fdma_irq_handler()
553 lan_wr(0, lan966x, FDMA_INTR_DB_ENA); in lan966x_fdma_irq_handler()
554 lan_wr(db, lan966x, FDMA_INTR_DB); in lan966x_fdma_irq_handler()
556 napi_schedule(&lan966x->napi); in lan966x_fdma_irq_handler()
560 err_type = lan_rd(lan966x, FDMA_ERRORS); in lan966x_fdma_irq_handler()
564 lan_wr(err, lan966x, FDMA_INTR_ERR); in lan966x_fdma_irq_handler()
565 lan_wr(err_type, lan966x, FDMA_ERRORS); in lan966x_fdma_irq_handler()
574 struct fdma *fdma = &tx->fdma; in lan966x_fdma_get_next_dcb()
577 for (i = 0; i < fdma->n_dcbs; ++i) { in lan966x_fdma_get_next_dcb()
578 dcb_buf = &tx->dcbs_buf[i]; in lan966x_fdma_get_next_dcb()
579 if (!dcb_buf->used && in lan966x_fdma_get_next_dcb()
580 !fdma_is_last(&tx->fdma, &tx->fdma.dcbs[i])) in lan966x_fdma_get_next_dcb()
584 return -1; in lan966x_fdma_get_next_dcb()
589 struct lan966x *lan966x = tx->lan966x; in lan966x_fdma_tx_start() local
591 if (likely(lan966x->tx.activated)) { in lan966x_fdma_tx_start()
595 lan966x->tx.activated = true; in lan966x_fdma_tx_start()
602 struct lan966x *lan966x = port->lan966x; in lan966x_fdma_xmit_xdpf() local
604 struct lan966x_tx *tx = &lan966x->tx; in lan966x_fdma_xmit_xdpf()
612 spin_lock(&lan966x->tx_lock); in lan966x_fdma_xmit_xdpf()
617 netif_stop_queue(port->dev); in lan966x_fdma_xmit_xdpf()
623 next_dcb_buf = &tx->dcbs_buf[next_to_use]; in lan966x_fdma_xmit_xdpf()
629 if (xdpf->headroom < IFH_LEN_BYTES) { in lan966x_fdma_xmit_xdpf()
634 ifh = xdpf->data - IFH_LEN_BYTES; in lan966x_fdma_xmit_xdpf()
637 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); in lan966x_fdma_xmit_xdpf()
639 dma_addr = dma_map_single(lan966x->dev, in lan966x_fdma_xmit_xdpf()
640 xdpf->data - IFH_LEN_BYTES, in lan966x_fdma_xmit_xdpf()
641 xdpf->len + IFH_LEN_BYTES, in lan966x_fdma_xmit_xdpf()
643 if (dma_mapping_error(lan966x->dev, dma_addr)) { in lan966x_fdma_xmit_xdpf()
648 next_dcb_buf->data.xdpf = xdpf; in lan966x_fdma_xmit_xdpf()
649 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; in lan966x_fdma_xmit_xdpf()
656 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); in lan966x_fdma_xmit_xdpf()
659 dma_sync_single_for_device(lan966x->dev, in lan966x_fdma_xmit_xdpf()
664 next_dcb_buf->data.page = page; in lan966x_fdma_xmit_xdpf()
665 next_dcb_buf->len = len + IFH_LEN_BYTES; in lan966x_fdma_xmit_xdpf()
669 next_dcb_buf->use_skb = false; in lan966x_fdma_xmit_xdpf()
670 next_dcb_buf->xdp_ndo = !len; in lan966x_fdma_xmit_xdpf()
671 next_dcb_buf->dma_addr = dma_addr; in lan966x_fdma_xmit_xdpf()
672 next_dcb_buf->used = true; in lan966x_fdma_xmit_xdpf()
673 next_dcb_buf->ptp = false; in lan966x_fdma_xmit_xdpf()
674 next_dcb_buf->dev = port->dev; in lan966x_fdma_xmit_xdpf()
676 __fdma_dcb_add(&tx->fdma, in lan966x_fdma_xmit_xdpf()
683 FDMA_DCB_STATUS_BLOCKL(next_dcb_buf->len), in lan966x_fdma_xmit_xdpf()
691 spin_unlock(&lan966x->tx_lock); in lan966x_fdma_xmit_xdpf()
699 struct lan966x *lan966x = port->lan966x; in lan966x_fdma_xmit() local
701 struct lan966x_tx *tx = &lan966x->tx; in lan966x_fdma_xmit()
716 dev->stats.tx_dropped++; in lan966x_fdma_xmit()
721 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0); in lan966x_fdma_xmit()
722 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); in lan966x_fdma_xmit()
727 dev->stats.tx_dropped++; in lan966x_fdma_xmit()
735 memcpy(skb->data, ifh, IFH_LEN_BYTES); in lan966x_fdma_xmit()
738 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, in lan966x_fdma_xmit()
740 if (dma_mapping_error(lan966x->dev, dma_addr)) { in lan966x_fdma_xmit()
741 dev->stats.tx_dropped++; in lan966x_fdma_xmit()
747 next_dcb_buf = &tx->dcbs_buf[next_to_use]; in lan966x_fdma_xmit()
748 next_dcb_buf->use_skb = true; in lan966x_fdma_xmit()
749 next_dcb_buf->data.skb = skb; in lan966x_fdma_xmit()
750 next_dcb_buf->xdp_ndo = false; in lan966x_fdma_xmit()
751 next_dcb_buf->len = skb->len; in lan966x_fdma_xmit()
752 next_dcb_buf->dma_addr = dma_addr; in lan966x_fdma_xmit()
753 next_dcb_buf->used = true; in lan966x_fdma_xmit()
754 next_dcb_buf->ptp = false; in lan966x_fdma_xmit()
755 next_dcb_buf->dev = dev; in lan966x_fdma_xmit()
757 fdma_dcb_add(&tx->fdma, in lan966x_fdma_xmit()
764 FDMA_DCB_STATUS_BLOCKL(skb->len)); in lan966x_fdma_xmit()
766 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in lan966x_fdma_xmit()
767 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) in lan966x_fdma_xmit()
768 next_dcb_buf->ptp = true; in lan966x_fdma_xmit()
776 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in lan966x_fdma_xmit()
777 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) in lan966x_fdma_xmit()
784 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) in lan966x_fdma_get_max_mtu() argument
789 for (i = 0; i < lan966x->num_phys_ports; ++i) { in lan966x_fdma_get_max_mtu()
793 port = lan966x->ports[i]; in lan966x_fdma_get_max_mtu()
797 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); in lan966x_fdma_get_max_mtu()
805 static int lan966x_qsys_sw_status(struct lan966x *lan966x) in lan966x_qsys_sw_status() argument
807 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); in lan966x_qsys_sw_status()
810 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) in lan966x_fdma_reload() argument
817 memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma)); in lan966x_fdma_reload()
818 page_pool = lan966x->rx.page_pool; in lan966x_fdma_reload()
820 napi_synchronize(&lan966x->napi); in lan966x_fdma_reload()
821 napi_disable(&lan966x->napi); in lan966x_fdma_reload()
822 lan966x_fdma_stop_netdev(lan966x); in lan966x_fdma_reload()
824 lan966x_fdma_rx_disable(&lan966x->rx); in lan966x_fdma_reload()
825 lan966x_fdma_rx_free_pages(&lan966x->rx); in lan966x_fdma_reload()
826 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; in lan966x_fdma_reload()
827 lan966x->rx.max_mtu = new_mtu; in lan966x_fdma_reload()
828 err = lan966x_fdma_rx_alloc(&lan966x->rx); in lan966x_fdma_reload()
831 lan966x_fdma_rx_start(&lan966x->rx); in lan966x_fdma_reload()
833 fdma_free_coherent(lan966x->dev, &fdma_rx_old); in lan966x_fdma_reload()
837 lan966x_fdma_wakeup_netdev(lan966x); in lan966x_fdma_reload()
838 napi_enable(&lan966x->napi); in lan966x_fdma_reload()
842 lan966x->rx.page_pool = page_pool; in lan966x_fdma_reload()
843 memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma)); in lan966x_fdma_reload()
844 lan966x_fdma_rx_start(&lan966x->rx); in lan966x_fdma_reload()
849 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x) in lan966x_fdma_get_max_frame() argument
851 return lan966x_fdma_get_max_mtu(lan966x) + in lan966x_fdma_get_max_frame()
858 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu) in __lan966x_fdma_reload() argument
866 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); in __lan966x_fdma_reload()
869 readx_poll_timeout(lan966x_qsys_sw_status, lan966x, in __lan966x_fdma_reload()
878 err = lan966x_fdma_reload(lan966x, max_mtu); in __lan966x_fdma_reload()
883 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); in __lan966x_fdma_reload()
888 int lan966x_fdma_change_mtu(struct lan966x *lan966x) in lan966x_fdma_change_mtu() argument
892 max_mtu = lan966x_fdma_get_max_frame(lan966x); in lan966x_fdma_change_mtu()
893 if (max_mtu == lan966x->rx.max_mtu) in lan966x_fdma_change_mtu()
896 return __lan966x_fdma_reload(lan966x, max_mtu); in lan966x_fdma_change_mtu()
899 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x) in lan966x_fdma_reload_page_pool() argument
903 max_mtu = lan966x_fdma_get_max_frame(lan966x); in lan966x_fdma_reload_page_pool()
904 return __lan966x_fdma_reload(lan966x, max_mtu); in lan966x_fdma_reload_page_pool()
907 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) in lan966x_fdma_netdev_init() argument
909 if (lan966x->fdma_ndev) in lan966x_fdma_netdev_init()
912 lan966x->fdma_ndev = dev; in lan966x_fdma_netdev_init()
913 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll); in lan966x_fdma_netdev_init()
914 napi_enable(&lan966x->napi); in lan966x_fdma_netdev_init()
917 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) in lan966x_fdma_netdev_deinit() argument
919 if (lan966x->fdma_ndev == dev) { in lan966x_fdma_netdev_deinit()
920 netif_napi_del(&lan966x->napi); in lan966x_fdma_netdev_deinit()
921 lan966x->fdma_ndev = NULL; in lan966x_fdma_netdev_deinit()
925 int lan966x_fdma_init(struct lan966x *lan966x) in lan966x_fdma_init() argument
929 if (!lan966x->fdma) in lan966x_fdma_init()
932 lan966x->rx.lan966x = lan966x; in lan966x_fdma_init()
933 lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL; in lan966x_fdma_init()
934 lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX; in lan966x_fdma_init()
935 lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS; in lan966x_fdma_init()
936 lan966x->rx.fdma.priv = lan966x; in lan966x_fdma_init()
937 lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma); in lan966x_fdma_init()
938 lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; in lan966x_fdma_init()
939 lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; in lan966x_fdma_init()
940 lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb; in lan966x_fdma_init()
941 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); in lan966x_fdma_init()
942 lan966x->tx.lan966x = lan966x; in lan966x_fdma_init()
943 lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL; in lan966x_fdma_init()
944 lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX; in lan966x_fdma_init()
945 lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS; in lan966x_fdma_init()
946 lan966x->tx.fdma.priv = lan966x; in lan966x_fdma_init()
947 lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma); in lan966x_fdma_init()
948 lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order; in lan966x_fdma_init()
949 lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb; in lan966x_fdma_init()
950 lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb; in lan966x_fdma_init()
952 err = lan966x_fdma_rx_alloc(&lan966x->rx); in lan966x_fdma_init()
956 err = lan966x_fdma_tx_alloc(&lan966x->tx); in lan966x_fdma_init()
958 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); in lan966x_fdma_init()
962 lan966x_fdma_rx_start(&lan966x->rx); in lan966x_fdma_init()
967 void lan966x_fdma_deinit(struct lan966x *lan966x) in lan966x_fdma_deinit() argument
969 if (!lan966x->fdma) in lan966x_fdma_deinit()
972 lan966x_fdma_rx_disable(&lan966x->rx); in lan966x_fdma_deinit()
973 lan966x_fdma_tx_disable(&lan966x->tx); in lan966x_fdma_deinit()
975 napi_synchronize(&lan966x->napi); in lan966x_fdma_deinit()
976 napi_disable(&lan966x->napi); in lan966x_fdma_deinit()
978 lan966x_fdma_rx_free_pages(&lan966x->rx); in lan966x_fdma_deinit()
979 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); in lan966x_fdma_deinit()
980 page_pool_destroy(lan966x->rx.page_pool); in lan966x_fdma_deinit()
981 lan966x_fdma_tx_free(&lan966x->tx); in lan966x_fdma_deinit()