Lines Matching refs:tx_ring
8 static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_xdp_req_id() argument
12 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_xdp_req_id()
16 return handle_invalid_req_id(tx_ring, req_id, tx_info, true); in validate_xdp_req_id()
19 static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring, in ena_xdp_tx_map_frame() argument
24 struct ena_adapter *adapter = tx_ring->adapter; in ena_xdp_tx_map_frame()
35 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_xdp_tx_map_frame()
37 push_len = min_t(u32, size, tx_ring->tx_max_header_size); in ena_xdp_tx_map_frame()
48 dma = dma_map_single(tx_ring->dev, in ena_xdp_tx_map_frame()
52 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_xdp_tx_map_frame()
68 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_xdp_tx_map_frame()
69 &tx_ring->syncp); in ena_xdp_tx_map_frame()
75 int ena_xdp_xmit_frame(struct ena_ring *tx_ring, in ena_xdp_xmit_frame() argument
85 next_to_use = tx_ring->next_to_use; in ena_xdp_xmit_frame()
86 req_id = tx_ring->free_ids[next_to_use]; in ena_xdp_xmit_frame()
87 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_xdp_xmit_frame()
90 rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx); in ena_xdp_xmit_frame()
97 tx_ring, in ena_xdp_xmit_frame()
109 ena_ring_tx_doorbell(tx_ring); in ena_xdp_xmit_frame()
114 ena_unmap_tx_buff(tx_ring, tx_info); in ena_xdp_xmit_frame()
125 struct ena_ring *tx_ring; in ena_xdp_xmit() local
140 tx_ring = &adapter->tx_ring[qid]; in ena_xdp_xmit()
143 spin_lock(&tx_ring->xdp_tx_lock); in ena_xdp_xmit()
146 if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0)) in ena_xdp_xmit()
153 ena_ring_tx_doorbell(tx_ring); in ena_xdp_xmit()
155 spin_unlock(&tx_ring->xdp_tx_lock); in ena_xdp_xmit()
367 static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_xdp_irq() argument
375 if (unlikely(!tx_ring)) in ena_clean_xdp_irq()
377 next_to_clean = tx_ring->next_to_clean; in ena_clean_xdp_irq()
383 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_xdp_irq()
387 handle_invalid_req_id(tx_ring, req_id, NULL, true); in ena_clean_xdp_irq()
392 rc = validate_xdp_req_id(tx_ring, req_id); in ena_clean_xdp_irq()
396 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_xdp_irq()
402 ena_unmap_tx_buff(tx_ring, tx_info); in ena_clean_xdp_irq()
407 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_xdp_irq()
409 tx_ring->ring_size); in ena_clean_xdp_irq()
411 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_xdp_irq()
412 "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id); in ena_clean_xdp_irq()
415 tx_ring->next_to_clean = next_to_clean; in ena_clean_xdp_irq()
416 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_xdp_irq()
418 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_xdp_irq()
420 tx_ring->qid, tx_pkts); in ena_clean_xdp_irq()
431 struct ena_ring *tx_ring; in ena_xdp_io_poll() local
435 tx_ring = ena_napi->tx_ring; in ena_xdp_io_poll()
437 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_xdp_io_poll()
438 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_xdp_io_poll()
443 work_done = ena_clean_xdp_irq(tx_ring, budget); in ena_xdp_io_poll()
448 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) { in ena_xdp_io_poll()
452 ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1, in ena_xdp_io_poll()
453 &tx_ring->syncp); in ena_xdp_io_poll()
455 ena_unmask_interrupt(tx_ring, NULL); in ena_xdp_io_poll()
457 ena_update_ring_numa_node(tx_ring, NULL); in ena_xdp_io_poll()
463 u64_stats_update_begin(&tx_ring->syncp); in ena_xdp_io_poll()
464 tx_ring->tx_stats.tx_poll++; in ena_xdp_io_poll()
465 u64_stats_update_end(&tx_ring->syncp); in ena_xdp_io_poll()
466 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_xdp_io_poll()