Lines Matching refs:rx_swbd
741 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
779 struct enetc_rx_swbd rx_swbd = { in enetc_recycle_xdp_tx_buff() local
791 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff()
794 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
795 rx_swbd.page_offset, in enetc_recycle_xdp_tx_buff()
797 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
806 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
807 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
808 __free_page(rx_swbd.page); in enetc_recycle_xdp_tx_buff()
914 struct enetc_rx_swbd *rx_swbd) in enetc_new_page() argument
925 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
927 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
934 rx_swbd->dma = addr; in enetc_new_page()
935 rx_swbd->page = page; in enetc_new_page()
936 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
943 struct enetc_rx_swbd *rx_swbd; in enetc_refill_rx_ring() local
948 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
953 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
954 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
961 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
962 rx_swbd->page_offset); in enetc_refill_rx_ring()
967 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
1055 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff() local
1057 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
1058 rx_swbd->page_offset, in enetc_get_rx_buff()
1059 size, rx_swbd->dir); in enetc_get_rx_buff()
1060 return rx_swbd; in enetc_get_rx_buff()
1065 struct enetc_rx_swbd *rx_swbd) in enetc_put_rx_buff() argument
1069 enetc_reuse_page(rx_ring, rx_swbd); in enetc_put_rx_buff()
1071 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
1072 rx_swbd->page_offset, in enetc_put_rx_buff()
1073 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
1075 rx_swbd->page = NULL; in enetc_put_rx_buff()
1080 struct enetc_rx_swbd *rx_swbd) in enetc_flip_rx_buff() argument
1082 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
1083 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
1084 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
1086 enetc_put_rx_buff(rx_ring, rx_swbd); in enetc_flip_rx_buff()
1088 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
1089 rx_swbd->dir); in enetc_flip_rx_buff()
1090 rx_swbd->page = NULL; in enetc_flip_rx_buff()
1097 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_skb() local
1101 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
1111 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_map_rx_buff_to_skb()
1119 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_skb() local
1121 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
1122 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
1124 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_add_rx_buff_to_skb()
1134 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1141 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1423 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_xdp() local
1424 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1427 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1437 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_xdp() local
1441 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1451 if (page_is_pfmemalloc(rx_swbd->page)) in enetc_add_rx_buff_to_xdp()
1455 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, in enetc_add_rx_buff_to_xdp()
1500 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd() local
1504 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1505 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1506 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1507 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1508 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1525 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1634 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1647 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1887 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); in enetc_alloc_rx_resource()
1888 if (!res->rx_swbd) in enetc_alloc_rx_resource()
1893 vfree(res->rx_swbd); in enetc_alloc_rx_resource()
1903 vfree(res->rx_swbd); in enetc_free_rx_resource()
1962 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; in enetc_assign_rx_resource()
2013 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring() local
2015 if (!rx_swbd->page) in enetc_free_rx_ring()
2018 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
2019 rx_swbd->dir); in enetc_free_rx_ring()
2020 __free_page(rx_swbd->page); in enetc_free_rx_ring()
2021 rx_swbd->page = NULL; in enetc_free_rx_ring()