Lines Matching +full:queue +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
38 /* XSK buffer shall store at least Q-in-Q frame */
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
76 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_enable_irq()
82 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_disable_irq()
88 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); in tsnep_irq()
92 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); in tsnep_irq()
96 phy_mac_interrupt(adapter->netdev->phydev); in tsnep_irq()
98 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
99 if ((active & adapter->queue[0].irq_mask) != 0) { in tsnep_irq()
100 if (napi_schedule_prep(&adapter->queue[0].napi)) { in tsnep_irq()
101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); in tsnep_irq()
103 __napi_schedule(&adapter->queue[0].napi); in tsnep_irq()
112 struct tsnep_queue *queue = arg; in tsnep_irq_txrx() local
114 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
115 if (napi_schedule_prep(&queue->napi)) { in tsnep_irq_txrx()
116 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_irq_txrx()
118 __napi_schedule(&queue->napi); in tsnep_irq_txrx()
124 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) in tsnep_set_irq_coalesce() argument
127 return -ERANGE; in tsnep_set_irq_coalesce()
133 queue->irq_delay &= ~ECM_INT_DELAY_MASK; in tsnep_set_irq_coalesce()
134 queue->irq_delay |= usecs; in tsnep_set_irq_coalesce()
135 iowrite8(queue->irq_delay, queue->irq_delay_addr); in tsnep_set_irq_coalesce()
140 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) in tsnep_get_irq_coalesce() argument
144 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); in tsnep_get_irq_coalesce()
153 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_read()
158 if (!adapter->suppress_preamble) in tsnep_mdiobus_read()
162 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_read()
163 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_read()
174 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_write()
179 if (!adapter->suppress_preamble) in tsnep_mdiobus_write()
184 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_write()
185 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_write()
197 switch (adapter->phydev->speed) { in tsnep_set_link_mode()
208 iowrite32(mode, adapter->addr + ECM_STATUS); in tsnep_set_link_mode()
214 struct phy_device *phydev = netdev->phydev; in tsnep_phy_link_status_change()
216 if (phydev->link) in tsnep_phy_link_status_change()
219 phy_print_status(netdev->phydev); in tsnep_phy_link_status_change()
226 retval = phy_loopback(adapter->phydev, enable); in tsnep_phy_loopback()
233 netif_carrier_on(adapter->netdev); in tsnep_phy_loopback()
246 retval = phy_connect_direct(adapter->netdev, adapter->phydev, in tsnep_phy_open()
248 adapter->phy_mode); in tsnep_phy_open()
251 phydev = adapter->netdev->phydev; in tsnep_phy_open()
263 phy_ethtool_set_eee(adapter->phydev, &ethtool_keee); in tsnep_phy_open()
265 adapter->phydev->irq = PHY_MAC_INTERRUPT; in tsnep_phy_open()
266 phy_start(adapter->phydev); in tsnep_phy_open()
273 phy_stop(adapter->netdev->phydev); in tsnep_phy_close()
274 phy_disconnect(adapter->netdev->phydev); in tsnep_phy_close()
279 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
282 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
285 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
286 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
287 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
288 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
289 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
296 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
303 tx->page[i] = in tsnep_tx_ring_create()
304 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
306 if (!tx->page[i]) { in tsnep_tx_ring_create()
307 retval = -ENOMEM; in tsnep_tx_ring_create()
311 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
312 entry->desc_wb = (struct tsnep_tx_desc_wb *) in tsnep_tx_ring_create()
313 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
314 entry->desc = (struct tsnep_tx_desc *) in tsnep_tx_ring_create()
315 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_tx_ring_create()
316 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
317 entry->owner_user_flag = false; in tsnep_tx_ring_create()
321 entry = &tx->entry[i]; in tsnep_tx_ring_create()
322 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
323 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_tx_ring_create()
337 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
338 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
339 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
340 tx->write = 0; in tsnep_tx_init()
341 tx->read = 0; in tsnep_tx_init()
342 tx->owner_counter = 1; in tsnep_tx_init()
343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
362 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
369 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
374 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
383 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
385 entry->properties = 0; in tsnep_tx_activate()
387 if (entry->skb) { in tsnep_tx_activate()
388 entry->properties = length & TSNEP_DESC_LENGTH_MASK; in tsnep_tx_activate()
389 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_tx_activate()
390 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_activate()
391 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) in tsnep_tx_activate()
392 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; in tsnep_tx_activate()
412 entry->owner_user_flag = !entry->owner_user_flag; in tsnep_tx_activate()
415 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; in tsnep_tx_activate()
416 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
417 tx->owner_counter++; in tsnep_tx_activate()
418 if (tx->owner_counter == 4) in tsnep_tx_activate()
419 tx->owner_counter = 1; in tsnep_tx_activate()
420 tx->increment_owner_counter--; in tsnep_tx_activate()
421 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
422 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
424 entry->properties |= in tsnep_tx_activate()
425 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
427 if (entry->owner_user_flag) in tsnep_tx_activate()
428 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; in tsnep_tx_activate()
429 entry->desc->more_properties = in tsnep_tx_activate()
430 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); in tsnep_tx_activate()
431 if (entry->type & TSNEP_TX_TYPE_INLINE) in tsnep_tx_activate()
432 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; in tsnep_tx_activate()
439 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_tx_activate()
444 if (tx->read <= tx->write) in tsnep_tx_desc_available()
445 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
447 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
460 return -ENOMEM; in tsnep_tx_map_frag()
461 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; in tsnep_tx_map_frag()
467 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
472 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
476 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; in tsnep_tx_map_frag()
485 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
493 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
498 dma = dma_map_single(dmadev, skb->data, len, in tsnep_tx_map()
501 return -ENOMEM; in tsnep_tx_map()
502 entry->type = TSNEP_TX_TYPE_SKB_MAP; in tsnep_tx_map()
505 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
506 entry->type = TSNEP_TX_TYPE_SKB_INLINE; in tsnep_tx_map()
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in tsnep_tx_map()
518 entry->len = len; in tsnep_tx_map()
521 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
532 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
538 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
540 if (entry->len) { in tsnep_tx_unmap()
541 if (entry->type & TSNEP_TX_TYPE_MAP) in tsnep_tx_unmap()
546 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) in tsnep_tx_unmap()
551 map_len += entry->len; in tsnep_tx_unmap()
552 entry->len = 0; in tsnep_tx_unmap()
568 if (skb_shinfo(skb)->nr_frags > 0) in tsnep_xmit_frame_ring()
569 count += skb_shinfo(skb)->nr_frags; in tsnep_xmit_frame_ring()
572 /* ring full, shall not happen because queue is stopped if full in tsnep_xmit_frame_ring()
575 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
580 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
581 entry->skb = skb; in tsnep_xmit_frame_ring()
585 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
586 dev_kfree_skb_any(entry->skb); in tsnep_xmit_frame_ring()
587 entry->skb = NULL; in tsnep_xmit_frame_ring()
589 tx->dropped++; in tsnep_xmit_frame_ring()
595 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) in tsnep_xmit_frame_ring()
596 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in tsnep_xmit_frame_ring()
599 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
600 i == count - 1); in tsnep_xmit_frame_ring()
601 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
608 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
612 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
621 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
632 len = xdpf->len; in tsnep_xdp_tx_map()
634 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
637 xdpf->data; in tsnep_xdp_tx_map()
640 return -ENOMEM; in tsnep_xdp_tx_map()
642 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; in tsnep_xdp_tx_map()
645 virt_to_page(xdpf->data); in tsnep_xdp_tx_map()
650 dma += sizeof(*xdpf) + xdpf->headroom; in tsnep_xdp_tx_map()
654 entry->type = TSNEP_TX_TYPE_XDP_TX; in tsnep_xdp_tx_map()
657 entry->len = len; in tsnep_xdp_tx_map()
660 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
665 frag = &shinfo->frags[i]; in tsnep_xdp_tx_map()
683 count += shinfo->nr_frags; in tsnep_xdp_xmit_frame_ring()
686 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
692 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
693 entry->xdpf = xdpf; in tsnep_xdp_xmit_frame_ring()
697 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
698 entry->xdpf = NULL; in tsnep_xdp_xmit_frame_ring()
700 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
707 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
708 i == count - 1); in tsnep_xdp_xmit_frame_ring()
709 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
719 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
744 /* Avoid transmit queue timeout since we share it with the slow path */ in tsnep_xdp_xmit_back()
758 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
759 entry->zc = true; in tsnep_xdp_tx_map_zc()
761 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
762 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
764 entry->type = TSNEP_TX_TYPE_XSK; in tsnep_xdp_tx_map_zc()
765 entry->len = xdpd->len; in tsnep_xdp_tx_map_zc()
767 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
769 return xdpd->len; in tsnep_xdp_tx_map_zc()
779 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
780 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
786 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
790 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
795 desc_available -= MAX_SKB_FRAGS + 1; in tsnep_xdp_xmit_zc()
797 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
820 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
824 if (tx->read == tx->write) in tsnep_tx_poll()
827 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
828 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
830 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_poll()
839 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
840 skb_shinfo(entry->skb)->nr_frags > 0) in tsnep_tx_poll()
841 count += skb_shinfo(entry->skb)->nr_frags; in tsnep_tx_poll()
842 else if ((entry->type & TSNEP_TX_TYPE_XDP) && in tsnep_tx_poll()
843 xdp_frame_has_frags(entry->xdpf)) in tsnep_tx_poll()
844 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; in tsnep_tx_poll()
846 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
848 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
849 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && in tsnep_tx_poll()
850 (__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
855 if (skb_shinfo(entry->skb)->tx_flags & in tsnep_tx_poll()
858 __le64_to_cpu(entry->desc_wb->counter); in tsnep_tx_poll()
861 __le64_to_cpu(entry->desc_wb->timestamp); in tsnep_tx_poll()
866 skb_tstamp_tx(entry->skb, &hwtstamps); in tsnep_tx_poll()
869 if (entry->type & TSNEP_TX_TYPE_SKB) in tsnep_tx_poll()
870 napi_consume_skb(entry->skb, napi_budget); in tsnep_tx_poll()
871 else if (entry->type & TSNEP_TX_TYPE_XDP) in tsnep_tx_poll()
872 xdp_return_frame_rx_napi(entry->xdpf); in tsnep_tx_poll()
876 entry->skb = NULL; in tsnep_tx_poll()
878 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
880 tx->packets++; in tsnep_tx_poll()
881 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
883 budget--; in tsnep_tx_poll()
886 if (tx->xsk_pool) { in tsnep_tx_poll()
888 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
889 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
890 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
910 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
913 if (tx->read != tx->write) { in tsnep_tx_pending()
914 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
915 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_pending()
917 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_pending()
944 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
946 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
951 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
952 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
953 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
955 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
956 xsk_buff_free(entry->xdp); in tsnep_rx_ring_cleanup()
958 entry->page = NULL; in tsnep_rx_ring_cleanup()
961 if (rx->page_pool) in tsnep_rx_ring_cleanup()
962 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
964 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
967 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
968 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
969 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
970 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
971 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
976 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
978 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
986 rx->page[i] = in tsnep_rx_ring_create()
987 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
989 if (!rx->page[i]) { in tsnep_rx_ring_create()
990 retval = -ENOMEM; in tsnep_rx_ring_create()
994 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
995 entry->desc_wb = (struct tsnep_rx_desc_wb *) in tsnep_rx_ring_create()
996 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
997 entry->desc = (struct tsnep_rx_desc *) in tsnep_rx_ring_create()
998 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_rx_ring_create()
999 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1011 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1012 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1013 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1014 rx->page_pool = NULL; in tsnep_rx_ring_create()
1019 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1020 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1021 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_rx_ring_create()
1027 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1031 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1035 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1036 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1037 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1038 rx->write = 0; in tsnep_rx_init()
1039 rx->read = 0; in tsnep_rx_init()
1040 rx->owner_counter = 1; in tsnep_rx_init()
1041 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1044 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1049 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1052 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1056 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1057 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1062 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1064 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1065 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1067 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1070 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1077 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1079 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1085 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1092 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { in tsnep_rx_alloc_page_buffer()
1093 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1094 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1095 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1097 return -ENOMEM; in tsnep_rx_alloc_page_buffer()
1104 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1107 entry->page = page; in tsnep_rx_set_page()
1108 entry->len = TSNEP_MAX_RX_BUF_SIZE; in tsnep_rx_set_page()
1109 entry->dma = page_pool_get_dma_addr(entry->page); in tsnep_rx_set_page()
1110 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1113 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1115 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1118 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1120 return -ENOMEM; in tsnep_rx_alloc_buffer()
1121 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1126 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1128 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1129 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1131 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1132 read->page = NULL; in tsnep_rx_reuse_buffer()
1135 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1137 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1140 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; in tsnep_rx_activate()
1141 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_rx_activate()
1142 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1143 rx->owner_counter++; in tsnep_rx_activate()
1144 if (rx->owner_counter == 4) in tsnep_rx_activate()
1145 rx->owner_counter = 1; in tsnep_rx_activate()
1146 rx->increment_owner_counter--; in tsnep_rx_activate()
1147 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1148 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1150 entry->properties |= in tsnep_rx_activate()
1151 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1159 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_rx_activate()
1162 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1168 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1170 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1171 rx->alloc_failed++; in tsnep_rx_alloc()
1176 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1181 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1185 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1190 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1194 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1196 tsnep_rx_enable(rx); in tsnep_rx_refill()
1201 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1204 entry->xdp = xdp; in tsnep_rx_set_xdp()
1205 entry->len = TSNEP_XSK_RX_BUF_SIZE; in tsnep_rx_set_xdp()
1206 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); in tsnep_rx_set_xdp()
1207 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1210 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1212 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1213 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1215 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1216 read->xdp = NULL; in tsnep_rx_reuse_buffer_zc()
1219 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1224 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1226 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1227 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1229 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1230 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1233 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1236 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1237 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1242 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1247 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1252 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1254 if (entry->xdp) in tsnep_rx_free_zc()
1255 xsk_buff_free(entry->xdp); in tsnep_rx_free_zc()
1256 entry->xdp = NULL; in tsnep_rx_free_zc()
1260 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1264 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1266 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1271 static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available) in tsnep_xsk_rx_need_wakeup() argument
1274 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1276 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1279 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1287 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; in tsnep_xdp_run_prog()
1294 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1299 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1304 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1308 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1314 sync = xdp->data_end - xdp->data_hard_start - in tsnep_xdp_run_prog()
1317 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1323 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1332 /* XDP_REDIRECT is the main action for zero-copy */ in tsnep_xdp_run_prog_zc()
1334 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1344 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1349 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1353 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1374 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1385 __skb_put(skb, length - ETH_FCS_LEN); in tsnep_build_skb()
1387 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1393 skb_shinfo(skb)->tx_flags |= in tsnep_build_skb()
1396 hwtstamps->netdev_data = rx_inline; in tsnep_build_skb()
1399 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1400 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1405 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1410 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1414 rx->packets++; in tsnep_rx_page()
1415 rx->bytes += length; in tsnep_rx_page()
1416 if (skb->pkt_type == PACKET_MULTICAST) in tsnep_rx_page()
1417 rx->multicast++; in tsnep_rx_page()
1421 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1423 rx->dropped++; in tsnep_rx_page()
1427 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1430 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1442 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1443 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1444 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1446 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1447 rx->tx_queue_index); in tsnep_rx_poll()
1448 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1450 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1453 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1454 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1455 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1457 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll()
1464 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1466 if (!entry->page) { in tsnep_rx_poll()
1468 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1469 * RX processing in tsnep_rx_poll()
1471 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1474 rx->dropped++; in tsnep_rx_poll()
1485 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); in tsnep_rx_poll()
1486 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1488 dma_sync_single_range_for_cpu(dmadev, entry->dma, in tsnep_rx_poll()
1491 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1493 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1496 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll()
1498 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1504 xdp_prepare_buff(&xdp, page_address(entry->page), in tsnep_rx_poll()
1506 length - ETH_FCS_LEN, false); in tsnep_rx_poll()
1508 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1511 rx->packets++; in tsnep_rx_poll()
1512 rx->bytes += length; in tsnep_rx_poll()
1514 entry->page = NULL; in tsnep_rx_poll()
1520 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1521 entry->page = NULL; in tsnep_rx_poll()
1525 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1528 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1533 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1546 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1547 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1549 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1550 rx->tx_queue_index); in tsnep_rx_poll_zc()
1551 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1554 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1555 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1556 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1558 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll_zc()
1565 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1567 if (!entry->xdp) { in tsnep_rx_poll_zc()
1569 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1570 * RX processing in tsnep_rx_poll_zc()
1572 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1575 rx->dropped++; in tsnep_rx_poll_zc()
1586 prefetch(entry->xdp->data); in tsnep_rx_poll_zc()
1587 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1589 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); in tsnep_rx_poll_zc()
1590 xsk_buff_dma_sync_for_cpu(entry->xdp); in tsnep_rx_poll_zc()
1592 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1594 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1597 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1599 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1605 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1606 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1608 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1611 rx->packets++; in tsnep_rx_poll_zc()
1612 rx->bytes += length; in tsnep_rx_poll_zc()
1614 entry->xdp = NULL; in tsnep_rx_poll_zc()
1620 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1623 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, in tsnep_rx_poll_zc()
1625 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1627 rx->dropped++; in tsnep_rx_poll_zc()
1629 xsk_buff_free(entry->xdp); in tsnep_rx_poll_zc()
1630 entry->xdp = NULL; in tsnep_rx_poll_zc()
1634 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1637 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1639 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1640 tsnep_xsk_rx_need_wakeup(rx, desc_available); in tsnep_rx_poll_zc()
1648 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1652 if (rx->read != rx->write) { in tsnep_rx_pending()
1653 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1654 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_pending()
1656 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_pending()
1663 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1668 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1672 tsnep_rx_init(rx); in tsnep_rx_open()
1674 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1675 if (rx->xsk_pool) in tsnep_rx_open()
1676 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1678 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1680 retval = -ENOMEM; in tsnep_rx_open()
1688 if (rx->xsk_pool) { in tsnep_rx_open()
1689 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1697 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1701 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1703 if (rx->xsk_pool) in tsnep_rx_close()
1704 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1706 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1709 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1711 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1714 tsnep_rx_init(rx); in tsnep_rx_reopen()
1717 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1722 entry->desc->properties = 0; in tsnep_rx_reopen()
1723 entry->desc_wb->properties = 0; in tsnep_rx_reopen()
1727 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1728 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1729 rx->write++; in tsnep_rx_reopen()
1737 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1739 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1743 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1749 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1750 TSNEP_RING_SIZE - 1); in tsnep_rx_reopen_xsk()
1753 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1758 if (entry->page) { in tsnep_rx_reopen_xsk()
1759 *page = entry->page; in tsnep_rx_reopen_xsk()
1760 entry->page = NULL; in tsnep_rx_reopen_xsk()
1768 entry->desc->properties = 0; in tsnep_rx_reopen_xsk()
1769 entry->desc_wb->properties = 0; in tsnep_rx_reopen_xsk()
1772 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1773 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1774 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1775 rx->write++; in tsnep_rx_reopen_xsk()
1777 allocated--; in tsnep_rx_reopen_xsk()
1785 if (xsk_uses_need_wakeup(rx->xsk_pool)) in tsnep_rx_reopen_xsk()
1786 tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx)); in tsnep_rx_reopen_xsk()
1789 static bool tsnep_pending(struct tsnep_queue *queue) in tsnep_pending() argument
1791 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1794 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1802 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, in tsnep_poll() local
1807 if (queue->tx) in tsnep_poll()
1808 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1814 if (queue->rx) { in tsnep_poll()
1815 done = queue->rx->xsk_pool ? in tsnep_poll()
1816 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1817 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1827 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1833 if (tsnep_pending(queue)) { in tsnep_poll()
1834 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1839 return min(done, budget - 1); in tsnep_poll()
1842 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) in tsnep_request_irq() argument
1844 const char *name = netdev_name(queue->adapter->netdev); in tsnep_request_irq()
1850 sprintf(queue->name, "%s-mac", name); in tsnep_request_irq()
1852 dev = queue->adapter; in tsnep_request_irq()
1854 if (queue->tx && queue->rx) in tsnep_request_irq()
1855 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", in tsnep_request_irq()
1856 name, queue->rx->queue_index); in tsnep_request_irq()
1857 else if (queue->tx) in tsnep_request_irq()
1858 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1859 name, queue->tx->queue_index); in tsnep_request_irq()
1861 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1862 name, queue->rx->queue_index); in tsnep_request_irq()
1864 dev = queue; in tsnep_request_irq()
1867 retval = request_irq(queue->irq, handler, 0, queue->name, dev); in tsnep_request_irq()
1870 memset(queue->name, 0, sizeof(queue->name)); in tsnep_request_irq()
1876 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) in tsnep_free_irq() argument
1880 if (!strlen(queue->name)) in tsnep_free_irq()
1884 dev = queue->adapter; in tsnep_free_irq()
1886 dev = queue; in tsnep_free_irq()
1888 free_irq(queue->irq, dev); in tsnep_free_irq()
1889 memset(queue->name, 0, sizeof(queue->name)); in tsnep_free_irq()
1892 static void tsnep_queue_close(struct tsnep_queue *queue, bool first) in tsnep_queue_close() argument
1894 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1896 tsnep_free_irq(queue, first); in tsnep_queue_close()
1898 if (rx) { in tsnep_queue_close()
1899 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1900 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1901 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1902 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1905 netif_napi_del(&queue->napi); in tsnep_queue_close()
1909 struct tsnep_queue *queue, bool first) in tsnep_queue_open() argument
1911 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1912 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open()
1915 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); in tsnep_queue_open()
1917 if (rx) { in tsnep_queue_open()
1918 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1920 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1921 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1922 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1924 rx->tx_queue_index = 0; in tsnep_queue_open()
1930 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1931 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1934 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1936 rx->page_pool); in tsnep_queue_open()
1939 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1940 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1943 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1948 if (rx->xsk_pool) in tsnep_queue_open()
1949 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1952 retval = tsnep_request_irq(queue, first); in tsnep_queue_open()
1954 netif_err(adapter, drv, adapter->netdev, in tsnep_queue_open()
1955 "can't get assigned irq %d.\n", queue->irq); in tsnep_queue_open()
1962 tsnep_queue_close(queue, first); in tsnep_queue_open()
1967 static void tsnep_queue_enable(struct tsnep_queue *queue) in tsnep_queue_enable() argument
1969 napi_enable(&queue->napi); in tsnep_queue_enable()
1970 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_enable()
1972 if (queue->tx) in tsnep_queue_enable()
1973 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1975 if (queue->rx) in tsnep_queue_enable()
1976 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1979 static void tsnep_queue_disable(struct tsnep_queue *queue) in tsnep_queue_disable() argument
1981 if (queue->tx) in tsnep_queue_disable()
1982 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
1984 napi_disable(&queue->napi); in tsnep_queue_disable()
1985 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_disable()
1987 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
1990 if (queue->rx) in tsnep_queue_disable()
1991 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
1999 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
2000 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2001 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2005 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2006 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2011 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); in tsnep_netdev_open()
2016 retval = netif_set_real_num_tx_queues(adapter->netdev, in tsnep_netdev_open()
2017 adapter->num_tx_queues); in tsnep_netdev_open()
2020 retval = netif_set_real_num_rx_queues(adapter->netdev, in tsnep_netdev_open()
2021 adapter->num_rx_queues); in tsnep_netdev_open()
2030 for (i = 0; i < adapter->num_queues; i++) in tsnep_netdev_open()
2031 tsnep_queue_enable(&adapter->queue[i]); in tsnep_netdev_open()
2038 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
2039 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_open()
2041 if (adapter->queue[i].rx) in tsnep_netdev_open()
2042 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2043 if (adapter->queue[i].tx) in tsnep_netdev_open()
2044 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2057 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_close()
2058 tsnep_queue_disable(&adapter->queue[i]); in tsnep_netdev_close()
2060 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_close()
2062 if (adapter->queue[i].rx) in tsnep_netdev_close()
2063 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2064 if (adapter->queue[i].tx) in tsnep_netdev_close()
2065 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2071 int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) in tsnep_enable_xsk() argument
2073 bool running = netif_running(queue->adapter->netdev); in tsnep_enable_xsk()
2078 return -EOPNOTSUPP; in tsnep_enable_xsk()
2080 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2081 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2083 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2084 return -ENOMEM; in tsnep_enable_xsk()
2085 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2086 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2088 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2089 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2090 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2092 return -ENOMEM; in tsnep_enable_xsk()
2095 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2098 tsnep_queue_disable(queue); in tsnep_enable_xsk()
2100 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2101 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2104 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2105 tsnep_queue_enable(queue); in tsnep_enable_xsk()
2111 void tsnep_disable_xsk(struct tsnep_queue *queue) in tsnep_disable_xsk() argument
2113 bool running = netif_running(queue->adapter->netdev); in tsnep_disable_xsk()
2116 tsnep_queue_disable(queue); in tsnep_disable_xsk()
2118 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2120 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2121 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2124 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2125 tsnep_queue_enable(queue); in tsnep_disable_xsk()
2128 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2129 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2130 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2131 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2140 if (queue_mapping >= adapter->num_tx_queues) in tsnep_netdev_xmit_frame()
2143 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2150 return -EINVAL; in tsnep_netdev_ioctl()
2153 return phy_mii_ioctl(netdev->phydev, ifr, cmd); in tsnep_netdev_ioctl()
2163 if (netdev->flags & IFF_PROMISC) { in tsnep_netdev_set_multicast()
2166 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { in tsnep_netdev_set_multicast()
2169 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); in tsnep_netdev_set_multicast()
2180 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_netdev_get_stats64()
2181 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2182 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2183 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2185 for (i = 0; i < adapter->num_rx_queues; i++) { in tsnep_netdev_get_stats64()
2186 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2187 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2188 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2189 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2191 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + in tsnep_netdev_get_stats64()
2195 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2198 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2201 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2202 stats->rx_fifo_errors += val; in tsnep_netdev_get_stats64()
2205 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2206 stats->rx_frame_errors += val; in tsnep_netdev_get_stats64()
2209 reg = ioread32(adapter->addr + ECM_STAT); in tsnep_netdev_get_stats64()
2211 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2213 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2214 stats->rx_crc_errors += val; in tsnep_netdev_get_stats64()
2216 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2221 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_set_address()
2223 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_set_address()
2225 ether_addr_copy(adapter->mac_address, addr); in tsnep_mac_set_address()
2226 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", in tsnep_mac_set_address()
2239 eth_hw_addr_set(netdev, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2240 tsnep_mac_set_address(adapter, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2249 netdev_features_t changed = netdev->features ^ features; in tsnep_netdev_set_features()
2265 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; in tsnep_netdev_get_tstamp()
2269 timestamp = __le64_to_cpu(rx_inline->counter); in tsnep_netdev_get_tstamp()
2271 timestamp = __le64_to_cpu(rx_inline->timestamp); in tsnep_netdev_get_tstamp()
2280 switch (bpf->command) { in tsnep_netdev_bpf()
2282 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); in tsnep_netdev_bpf()
2284 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, in tsnep_netdev_bpf()
2285 bpf->xsk.queue_id); in tsnep_netdev_bpf()
2287 return -EOPNOTSUPP; in tsnep_netdev_bpf()
2294 cpu &= TSNEP_MAX_QUEUES - 1; in tsnep_xdp_get_tx()
2296 while (cpu >= adapter->num_tx_queues) in tsnep_xdp_get_tx()
2297 cpu -= adapter->num_tx_queues; in tsnep_xdp_get_tx()
2299 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2313 return -EINVAL; in tsnep_netdev_xdp_xmit()
2316 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2326 /* avoid transmit queue timeout since we share it with the slow in tsnep_netdev_xdp_xmit()
2344 struct tsnep_queue *queue; in tsnep_netdev_xsk_wakeup() local
2346 if (queue_id >= adapter->num_rx_queues || in tsnep_netdev_xsk_wakeup()
2347 queue_id >= adapter->num_tx_queues) in tsnep_netdev_xsk_wakeup()
2348 return -EINVAL; in tsnep_netdev_xsk_wakeup()
2350 queue = &adapter->queue[queue_id]; in tsnep_netdev_xsk_wakeup()
2352 if (!napi_if_scheduled_mark_missed(&queue->napi)) in tsnep_netdev_xsk_wakeup()
2353 napi_schedule(&queue->napi); in tsnep_netdev_xsk_wakeup()
2378 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2381 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); in tsnep_mac_init()
2384 * - device tree in tsnep_mac_init()
2385 * - valid MAC address already set in tsnep_mac_init()
2386 * - MAC address register if valid in tsnep_mac_init()
2387 * - random MAC address in tsnep_mac_init()
2389 retval = of_get_mac_address(adapter->pdev->dev.of_node, in tsnep_mac_init()
2390 adapter->mac_address); in tsnep_mac_init()
2391 if (retval == -EPROBE_DEFER) in tsnep_mac_init()
2393 if (retval && !is_valid_ether_addr(adapter->mac_address)) { in tsnep_mac_init()
2394 *(u32 *)adapter->mac_address = in tsnep_mac_init()
2395 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_init()
2396 *(u16 *)(adapter->mac_address + sizeof(u32)) = in tsnep_mac_init()
2397 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_init()
2398 if (!is_valid_ether_addr(adapter->mac_address)) in tsnep_mac_init()
2399 eth_random_addr(adapter->mac_address); in tsnep_mac_init()
2402 tsnep_mac_set_address(adapter, adapter->mac_address); in tsnep_mac_init()
2403 eth_hw_addr_set(adapter->netdev, adapter->mac_address); in tsnep_mac_init()
2410 struct device_node *np = adapter->pdev->dev.of_node; in tsnep_mdio_init()
2418 adapter->suppress_preamble = in tsnep_mdio_init()
2419 of_property_read_bool(np, "suppress-preamble"); in tsnep_mdio_init()
2422 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); in tsnep_mdio_init()
2423 if (!adapter->mdiobus) { in tsnep_mdio_init()
2424 retval = -ENOMEM; in tsnep_mdio_init()
2429 adapter->mdiobus->priv = (void *)adapter; in tsnep_mdio_init()
2430 adapter->mdiobus->parent = &adapter->pdev->dev; in tsnep_mdio_init()
2431 adapter->mdiobus->read = tsnep_mdiobus_read; in tsnep_mdio_init()
2432 adapter->mdiobus->write = tsnep_mdiobus_write; in tsnep_mdio_init()
2433 adapter->mdiobus->name = TSNEP "-mdiobus"; in tsnep_mdio_init()
2434 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", in tsnep_mdio_init()
2435 adapter->pdev->name); in tsnep_mdio_init()
2438 adapter->mdiobus->phy_mask = 0x0000001; in tsnep_mdio_init()
2440 retval = of_mdiobus_register(adapter->mdiobus, np); in tsnep_mdio_init()
2453 retval = of_get_phy_mode(adapter->pdev->dev.of_node, in tsnep_phy_init()
2454 &adapter->phy_mode); in tsnep_phy_init()
2456 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; in tsnep_phy_init()
2458 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", in tsnep_phy_init()
2460 adapter->phydev = of_phy_find_device(phy_node); in tsnep_phy_init()
2462 if (!adapter->phydev && adapter->mdiobus) in tsnep_phy_init()
2463 adapter->phydev = phy_find_first(adapter->mdiobus); in tsnep_phy_init()
2464 if (!adapter->phydev) in tsnep_phy_init()
2465 return -EIO; in tsnep_phy_init()
2477 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2478 if (platform_irq_count(adapter->pdev) == 1) in tsnep_queue_init()
2479 retval = platform_get_irq(adapter->pdev, 0); in tsnep_queue_init()
2481 retval = platform_get_irq_byname(adapter->pdev, "mac"); in tsnep_queue_init()
2484 adapter->num_tx_queues = 1; in tsnep_queue_init()
2485 adapter->num_rx_queues = 1; in tsnep_queue_init()
2486 adapter->num_queues = 1; in tsnep_queue_init()
2487 adapter->queue[0].adapter = adapter; in tsnep_queue_init()
2488 adapter->queue[0].irq = retval; in tsnep_queue_init()
2489 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2490 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2491 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2492 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2493 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2494 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2495 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2496 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2497 adapter->queue[0].irq_mask = irq_mask; in tsnep_queue_init()
2498 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; in tsnep_queue_init()
2499 retval = tsnep_set_irq_coalesce(&adapter->queue[0], in tsnep_queue_init()
2504 adapter->netdev->irq = adapter->queue[0].irq; in tsnep_queue_init()
2506 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2510 sprintf(name, "txrx-%d", i); in tsnep_queue_init()
2511 retval = platform_get_irq_byname_optional(adapter->pdev, name); in tsnep_queue_init()
2515 adapter->num_tx_queues++; in tsnep_queue_init()
2516 adapter->num_rx_queues++; in tsnep_queue_init()
2517 adapter->num_queues++; in tsnep_queue_init()
2518 adapter->queue[i].adapter = adapter; in tsnep_queue_init()
2519 adapter->queue[i].irq = retval; in tsnep_queue_init()
2520 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2521 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2522 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2523 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()
2524 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2525 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2526 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2527 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()
2528 adapter->queue[i].irq_mask = in tsnep_queue_init()
2530 adapter->queue[i].irq_delay_addr = in tsnep_queue_init()
2531 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; in tsnep_queue_init()
2532 retval = tsnep_set_irq_coalesce(&adapter->queue[i], in tsnep_queue_init()
2552 netdev = devm_alloc_etherdev_mqs(&pdev->dev, in tsnep_probe()
2556 return -ENODEV; in tsnep_probe()
2557 SET_NETDEV_DEV(netdev, &pdev->dev); in tsnep_probe()
2560 adapter->pdev = pdev; in tsnep_probe()
2561 adapter->dmadev = &pdev->dev; in tsnep_probe()
2562 adapter->netdev = netdev; in tsnep_probe()
2563 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | in tsnep_probe()
2567 netdev->min_mtu = ETH_MIN_MTU; in tsnep_probe()
2568 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; in tsnep_probe()
2570 mutex_init(&adapter->gate_control_lock); in tsnep_probe()
2571 mutex_init(&adapter->rxnfc_lock); in tsnep_probe()
2572 INIT_LIST_HEAD(&adapter->rxnfc_rules); in tsnep_probe()
2574 adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io); in tsnep_probe()
2575 if (IS_ERR(adapter->addr)) in tsnep_probe()
2576 return PTR_ERR(adapter->addr); in tsnep_probe()
2577 netdev->mem_start = io->start; in tsnep_probe()
2578 netdev->mem_end = io->end; in tsnep_probe()
2580 type = ioread32(adapter->addr + ECM_TYPE); in tsnep_probe()
2584 adapter->gate_control = type & ECM_GATE_CONTROL; in tsnep_probe()
2585 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; in tsnep_probe()
2593 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, in tsnep_probe()
2596 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); in tsnep_probe()
2624 netdev->netdev_ops = &tsnep_netdev_ops; in tsnep_probe()
2625 netdev->ethtool_ops = &tsnep_ethtool_ops; in tsnep_probe()
2626 netdev->features = NETIF_F_SG; in tsnep_probe()
2627 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; in tsnep_probe()
2629 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in tsnep_probe()
2641 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, in tsnep_probe()
2643 if (adapter->gate_control) in tsnep_probe()
2644 dev_info(&adapter->pdev->dev, "gate control detected\n"); in tsnep_probe()
2656 if (adapter->mdiobus) in tsnep_probe()
2657 mdiobus_unregister(adapter->mdiobus); in tsnep_probe()
2666 unregister_netdev(adapter->netdev); in tsnep_remove()
2674 if (adapter->mdiobus) in tsnep_remove()
2675 mdiobus_unregister(adapter->mdiobus); in tsnep_remove()
2696 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");