Lines Matching refs:tx_ring
53 struct ena_ring *tx_ring; in ena_tx_timeout() local
62 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout()
64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout()
65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout()
216 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
259 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
263 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
272 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
273 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
274 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
275 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
279 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
280 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
281 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
282 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
283 if (!tx_ring->free_ids) in ena_setup_tx_resources()
287 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
288 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
289 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
290 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
291 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
296 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
297 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
300 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
302 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
303 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
304 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
305 tx_ring->numa_node = node; in ena_setup_tx_resources()
309 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
310 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
312 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
313 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
326 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
328 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
329 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
331 vfree(tx_ring->free_ids); in ena_free_tx_resources()
332 tx_ring->free_ids = NULL; in ena_free_tx_resources()
334 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
335 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
685 void ena_unmap_tx_buff(struct ena_ring *tx_ring, in ena_unmap_tx_buff() argument
699 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_buff()
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_buff()
718 static void ena_free_tx_bufs(struct ena_ring *tx_ring) in ena_free_tx_bufs() argument
724 is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid); in ena_free_tx_bufs()
726 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
727 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
733 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
735 tx_ring->qid, i); in ena_free_tx_bufs()
738 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
740 tx_ring->qid, i); in ena_free_tx_bufs()
743 ena_unmap_tx_buff(tx_ring, tx_info); in ena_free_tx_bufs()
752 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
753 tx_ring->qid)); in ena_free_tx_bufs()
758 struct ena_ring *tx_ring; in ena_free_all_tx_bufs() local
762 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
763 ena_free_tx_bufs(tx_ring); in ena_free_all_tx_bufs()
819 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
823 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
827 return handle_invalid_req_id(tx_ring, req_id, tx_info, false); in validate_tx_req_id()
830 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_tx_irq() argument
841 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
842 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
848 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
852 handle_invalid_req_id(tx_ring, req_id, NULL, false); in ena_clean_tx_irq()
857 rc = validate_tx_req_id(tx_ring, req_id); in ena_clean_tx_irq()
861 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
870 ena_unmap_tx_buff(tx_ring, tx_info); in ena_clean_tx_irq()
872 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
873 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
881 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
883 tx_ring->ring_size); in ena_clean_tx_irq()
886 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
887 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
891 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
893 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
900 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
905 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
908 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
910 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_clean_tx_irq()
911 &tx_ring->syncp); in ena_clean_tx_irq()
1391 void ena_unmask_interrupt(struct ena_ring *tx_ring, in ena_unmask_interrupt() argument
1394 u32 rx_interval = tx_ring->smoothed_interval; in ena_unmask_interrupt()
1410 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1413 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, in ena_unmask_interrupt()
1414 &tx_ring->syncp); in ena_unmask_interrupt()
1421 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1424 void ena_update_ring_numa_node(struct ena_ring *tx_ring, in ena_update_ring_numa_node() argument
1431 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1434 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1440 if (likely(tx_ring->numa_node == numa_node)) in ena_update_ring_numa_node()
1446 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1447 tx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1463 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1470 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1473 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1475 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1476 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1481 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); in ena_io_poll()
1491 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1492 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
1512 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1513 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1521 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1522 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1523 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1524 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1526 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_io_poll()
1789 struct ena_ring *rx_ring, *tx_ring; in ena_init_napi_in_range() local
1794 tx_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
1805 napi->tx_ring = tx_ring; in ena_init_napi_in_range()
1889 struct ena_ring *tx_ring; in ena_create_io_tx_queue() local
1896 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1906 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
1907 ctx.numa_node = tx_ring->numa_node; in ena_create_io_tx_queue()
1918 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1919 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1928 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
2036 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2110 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
2190 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2468 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, in ena_check_and_linearize_skb() argument
2476 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2479 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2480 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2483 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); in ena_check_and_linearize_skb()
2487 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, in ena_check_and_linearize_skb()
2488 &tx_ring->syncp); in ena_check_and_linearize_skb()
2494 static int ena_tx_map_skb(struct ena_ring *tx_ring, in ena_tx_map_skb() argument
2500 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2512 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2523 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2525 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2528 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, in ena_tx_map_skb()
2529 &tx_ring->syncp); in ena_tx_map_skb()
2536 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2544 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2546 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2571 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
2573 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2586 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_tx_map_skb()
2587 &tx_ring->syncp); in ena_tx_map_skb()
2593 ena_unmap_tx_buff(tx_ring, tx_info); in ena_tx_map_skb()
2604 struct ena_ring *tx_ring; in ena_start_xmit() local
2613 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
2616 rc = ena_check_and_linearize_skb(tx_ring, skb); in ena_start_xmit()
2620 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
2621 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
2622 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
2627 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); in ena_start_xmit()
2639 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); in ena_start_xmit()
2642 tx_ring, in ena_start_xmit()
2656 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2657 tx_ring->sgl_size + 2))) { in ena_start_xmit()
2662 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, in ena_start_xmit()
2663 &tx_ring->syncp); in ena_start_xmit()
2675 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2678 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_start_xmit()
2679 &tx_ring->syncp); in ena_start_xmit()
2689 ena_ring_tx_doorbell(tx_ring); in ena_start_xmit()
2694 ena_unmap_tx_buff(tx_ring, tx_info); in ena_start_xmit()
2805 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2818 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
2821 start = u64_stats_fetch_begin(&tx_ring->syncp); in ena_get_stats64()
2822 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
2823 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
2824 } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); in ena_get_stats64()
2895 if (adapter->tx_ring->ring_size) in ena_calc_io_queue_size()
2896 tx_queue_size = adapter->tx_ring->ring_size; in ena_calc_io_queue_size()
3304 txr = &adapter->tx_ring[i]; in ena_restore_device()
3403 struct ena_ring *tx_ring) in check_missing_comp_in_tx_queue() argument
3405 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); in check_missing_comp_in_tx_queue()
3418 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3419 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3435 tx_ring->qid); in check_missing_comp_in_tx_queue()
3445 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in check_missing_comp_in_tx_queue()
3467 tx_ring->qid, i, time_since_last_napi, napi_scheduled); in check_missing_comp_in_tx_queue()
3486 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, in check_missing_comp_in_tx_queue()
3487 &tx_ring->syncp); in check_missing_comp_in_tx_queue()
3494 struct ena_ring *tx_ring; in check_for_missing_completions() local
3520 tx_ring = &adapter->tx_ring[qid]; in check_for_missing_completions()
3523 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); in check_for_missing_completions()