Lines Matching refs:tx

17 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)  in gve_has_free_tx_qpl_bufs()  argument
21 if (!tx->dqo.qpl) in gve_has_free_tx_qpl_bufs()
24 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
25 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
26 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
32 tx->dqo_tx.free_tx_qpl_buf_cnt = in gve_has_free_tx_qpl_bufs()
33 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
35 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
36 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
37 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
43 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) in gve_alloc_tx_qpl_buf() argument
47 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
53 tx->dqo_tx.free_tx_qpl_buf_head = in gve_alloc_tx_qpl_buf()
54 atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_alloc_tx_qpl_buf()
55 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
62 tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; in gve_alloc_tx_qpl_buf()
68 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, in gve_free_tx_qpl_bufs() argument
80 tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; in gve_free_tx_qpl_bufs()
85 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); in gve_free_tx_qpl_bufs()
87 tx->dqo.tx_qpl_buf_next[index] = old_head; in gve_free_tx_qpl_bufs()
88 if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, in gve_free_tx_qpl_bufs()
95 atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_free_tx_qpl_bufs()
100 static bool gve_has_pending_packet(struct gve_tx_ring *tx) in gve_has_pending_packet() argument
103 if (tx->dqo_tx.free_pending_packets != -1) in gve_has_pending_packet()
107 if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1) in gve_has_pending_packet()
114 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
119 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
125 tx->dqo_tx.free_pending_packets = in gve_alloc_pending_packet()
126 atomic_xchg(&tx->dqo_compl.free_pending_packets, -1); in gve_alloc_pending_packet()
127 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
133 pending_packet = &tx->dqo.pending_packets[index]; in gve_alloc_pending_packet()
136 tx->dqo_tx.free_pending_packets = pending_packet->next; in gve_alloc_pending_packet()
143 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
146 s16 index = pending_packet - tx->dqo.pending_packets; in gve_free_pending_packet()
150 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets); in gve_free_pending_packet()
153 if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets, in gve_free_pending_packet()
162 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) in gve_tx_clean_pending_packets() argument
166 for (i = 0; i < tx->dqo.num_pending_packets; i++) { in gve_tx_clean_pending_packets()
168 &tx->dqo.pending_packets[i]; in gve_tx_clean_pending_packets()
173 dma_unmap_single(tx->dev, in gve_tx_clean_pending_packets()
178 dma_unmap_page(tx->dev, in gve_tx_clean_pending_packets()
194 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_stop_ring_dqo() local
200 gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); in gve_tx_stop_ring_dqo()
201 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_stop_ring_dqo()
202 gve_tx_clean_pending_packets(tx); in gve_tx_stop_ring_dqo()
206 static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_free_ring_dqo() argument
210 int idx = tx->q_num; in gve_tx_free_ring_dqo()
214 if (tx->q_resources) { in gve_tx_free_ring_dqo()
215 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_dqo()
216 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_dqo()
217 tx->q_resources = NULL; in gve_tx_free_ring_dqo()
220 if (tx->dqo.compl_ring) { in gve_tx_free_ring_dqo()
221 bytes = sizeof(tx->dqo.compl_ring[0]) * in gve_tx_free_ring_dqo()
222 (tx->dqo.complq_mask + 1); in gve_tx_free_ring_dqo()
223 dma_free_coherent(hdev, bytes, tx->dqo.compl_ring, in gve_tx_free_ring_dqo()
224 tx->complq_bus_dqo); in gve_tx_free_ring_dqo()
225 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
228 if (tx->dqo.tx_ring) { in gve_tx_free_ring_dqo()
229 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_free_ring_dqo()
230 dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus); in gve_tx_free_ring_dqo()
231 tx->dqo.tx_ring = NULL; in gve_tx_free_ring_dqo()
234 kvfree(tx->dqo.pending_packets); in gve_tx_free_ring_dqo()
235 tx->dqo.pending_packets = NULL; in gve_tx_free_ring_dqo()
237 kvfree(tx->dqo.tx_qpl_buf_next); in gve_tx_free_ring_dqo()
238 tx->dqo.tx_qpl_buf_next = NULL; in gve_tx_free_ring_dqo()
240 if (tx->dqo.qpl) { in gve_tx_free_ring_dqo()
241 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_free_ring_dqo()
242 gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id); in gve_tx_free_ring_dqo()
243 tx->dqo.qpl = NULL; in gve_tx_free_ring_dqo()
249 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) in gve_tx_qpl_buf_init() argument
252 tx->dqo.qpl->num_entries; in gve_tx_qpl_buf_init()
255 tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, in gve_tx_qpl_buf_init()
256 sizeof(tx->dqo.tx_qpl_buf_next[0]), in gve_tx_qpl_buf_init()
258 if (!tx->dqo.tx_qpl_buf_next) in gve_tx_qpl_buf_init()
261 tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; in gve_tx_qpl_buf_init()
265 tx->dqo.tx_qpl_buf_next[i] = i + 1; in gve_tx_qpl_buf_init()
266 tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; in gve_tx_qpl_buf_init()
268 atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_tx_qpl_buf_init()
275 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_start_ring_dqo() local
279 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_start_ring_dqo()
285 struct gve_tx_ring *tx, in gve_tx_alloc_ring_dqo() argument
295 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_dqo()
296 tx->q_num = idx; in gve_tx_alloc_ring_dqo()
297 tx->dev = hdev; in gve_tx_alloc_ring_dqo()
298 atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); in gve_tx_alloc_ring_dqo()
301 tx->mask = cfg->ring_size - 1; in gve_tx_alloc_ring_dqo()
302 tx->dqo.complq_mask = tx->mask; in gve_tx_alloc_ring_dqo()
310 num_pending_packets = tx->dqo.complq_mask + 1; in gve_tx_alloc_ring_dqo()
316 (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL; in gve_tx_alloc_ring_dqo()
323 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); in gve_tx_alloc_ring_dqo()
324 tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets, in gve_tx_alloc_ring_dqo()
325 sizeof(tx->dqo.pending_packets[0]), in gve_tx_alloc_ring_dqo()
327 if (!tx->dqo.pending_packets) in gve_tx_alloc_ring_dqo()
331 for (i = 0; i < tx->dqo.num_pending_packets - 1; i++) in gve_tx_alloc_ring_dqo()
332 tx->dqo.pending_packets[i].next = i + 1; in gve_tx_alloc_ring_dqo()
334 tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1; in gve_tx_alloc_ring_dqo()
335 atomic_set_release(&tx->dqo_compl.free_pending_packets, -1); in gve_tx_alloc_ring_dqo()
336 tx->dqo_compl.miss_completions.head = -1; in gve_tx_alloc_ring_dqo()
337 tx->dqo_compl.miss_completions.tail = -1; in gve_tx_alloc_ring_dqo()
338 tx->dqo_compl.timed_out_completions.head = -1; in gve_tx_alloc_ring_dqo()
339 tx->dqo_compl.timed_out_completions.tail = -1; in gve_tx_alloc_ring_dqo()
341 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_alloc_ring_dqo()
342 tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
343 if (!tx->dqo.tx_ring) in gve_tx_alloc_ring_dqo()
346 bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1); in gve_tx_alloc_ring_dqo()
347 tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes, in gve_tx_alloc_ring_dqo()
348 &tx->complq_bus_dqo, in gve_tx_alloc_ring_dqo()
350 if (!tx->dqo.compl_ring) in gve_tx_alloc_ring_dqo()
353 tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_alloc_ring_dqo()
354 &tx->q_resources_bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
355 if (!tx->q_resources) in gve_tx_alloc_ring_dqo()
359 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_alloc_ring_dqo()
362 tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_tx_alloc_ring_dqo()
364 if (!tx->dqo.qpl) in gve_tx_alloc_ring_dqo()
367 if (gve_tx_qpl_buf_init(tx)) in gve_tx_alloc_ring_dqo()
374 gve_tx_free_ring_dqo(priv, tx, cfg); in gve_tx_alloc_ring_dqo()
381 struct gve_tx_ring *tx = cfg->tx; in gve_tx_alloc_rings_dqo() local
392 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_dqo()
394 if (!tx) in gve_tx_alloc_rings_dqo()
396 } else if (!tx) { in gve_tx_alloc_rings_dqo()
403 err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i); in gve_tx_alloc_rings_dqo()
412 cfg->tx = tx; in gve_tx_alloc_rings_dqo()
417 gve_tx_free_ring_dqo(priv, &tx[j], cfg); in gve_tx_alloc_rings_dqo()
419 kvfree(tx); in gve_tx_alloc_rings_dqo()
426 struct gve_tx_ring *tx = cfg->tx; in gve_tx_free_rings_dqo() local
429 if (!tx) in gve_tx_free_rings_dqo()
433 gve_tx_free_ring_dqo(priv, &tx[i], cfg); in gve_tx_free_rings_dqo()
436 kvfree(tx); in gve_tx_free_rings_dqo()
437 cfg->tx = NULL; in gve_tx_free_rings_dqo()
442 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) in num_avail_tx_slots() argument
444 u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask; in num_avail_tx_slots()
446 return tx->mask - num_used; in num_avail_tx_slots()
449 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, in gve_has_avail_slots_tx_dqo() argument
452 return gve_has_pending_packet(tx) && in gve_has_avail_slots_tx_dqo()
453 num_avail_tx_slots(tx) >= desc_count && in gve_has_avail_slots_tx_dqo()
454 gve_has_free_tx_qpl_bufs(tx, buf_count); in gve_has_avail_slots_tx_dqo()
460 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, in gve_maybe_stop_tx_dqo() argument
463 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
467 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); in gve_maybe_stop_tx_dqo()
469 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
473 tx->stop_queue++; in gve_maybe_stop_tx_dqo()
474 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
482 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); in gve_maybe_stop_tx_dqo()
484 if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
487 netif_tx_start_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
488 tx->wake_queue++; in gve_maybe_stop_tx_dqo()
509 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, in gve_tx_fill_pkt_desc_dqo() argument
517 &tx->dqo.tx_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
532 *desc_idx = (*desc_idx + 1) & tx->mask; in gve_tx_fill_pkt_desc_dqo()
622 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy_dqo() argument
646 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
647 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
654 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, in gve_tx_add_skb_no_copy_dqo()
665 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
666 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
673 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, in gve_tx_add_skb_no_copy_dqo()
681 dma_unmap_single(tx->dev, in gve_tx_add_skb_no_copy_dqo()
686 dma_unmap_page(tx->dev, in gve_tx_add_skb_no_copy_dqo()
700 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, in gve_tx_buf_get_addr() argument
707 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; in gve_tx_buf_get_addr()
708 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; in gve_tx_buf_get_addr()
711 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_copy_dqo() argument
727 index = gve_alloc_tx_qpl_buf(tx); in gve_tx_add_skb_copy_dqo()
731 gve_tx_buf_get_addr(tx, index, &va, &dma_addr); in gve_tx_add_skb_copy_dqo()
737 dma_sync_single_for_device(tx->dev, dma_addr, in gve_tx_add_skb_copy_dqo()
739 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, in gve_tx_add_skb_copy_dqo()
747 ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; in gve_tx_add_skb_copy_dqo()
754 gve_free_tx_qpl_bufs(tx, pkt); in gve_tx_add_skb_copy_dqo()
763 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_dqo() argument
767 u32 desc_idx = tx->dqo_tx.tail; in gve_tx_add_skb_dqo()
772 pkt = gve_alloc_pending_packet(tx); in gve_tx_add_skb_dqo()
774 completion_tag = pkt - tx->dqo.pending_packets; in gve_tx_add_skb_dqo()
783 gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, in gve_tx_add_skb_dqo()
785 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
788 gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, in gve_tx_add_skb_dqo()
790 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
792 if (tx->dqo.qpl) { in gve_tx_add_skb_dqo()
793 if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
798 if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
804 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; in gve_tx_add_skb_dqo()
807 tx->dqo_tx.tail = desc_idx; in gve_tx_add_skb_dqo()
813 u32 last_desc_idx = (desc_idx - 1) & tx->mask; in gve_tx_add_skb_dqo()
815 (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; in gve_tx_add_skb_dqo()
819 tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; in gve_tx_add_skb_dqo()
820 tx->dqo_tx.last_re_idx = last_desc_idx; in gve_tx_add_skb_dqo()
828 gve_free_pending_packet(tx, pkt); in gve_tx_add_skb_dqo()
925 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_try_tx_skb() argument
934 if (tx->dqo.qpl) { in gve_try_tx_skb()
957 if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + in gve_try_tx_skb()
963 if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) in gve_try_tx_skb()
966 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_try_tx_skb()
971 tx->dropped_pkt++; in gve_try_tx_skb()
980 struct gve_tx_ring *tx; in gve_tx_dqo() local
982 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx_dqo()
983 if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) { in gve_tx_dqo()
988 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
992 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx_dqo()
995 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
999 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, in add_to_list() argument
1004 index = pending_packet - tx->dqo.pending_packets; in add_to_list()
1010 tx->dqo.pending_packets[old_tail].next = index; in add_to_list()
1016 static void remove_from_list(struct gve_tx_ring *tx, in remove_from_list() argument
1029 tx->dqo.pending_packets[prev_index].next = next_index; in remove_from_list()
1035 tx->dqo.pending_packets[next_index].prev = prev_index; in remove_from_list()
1061 struct gve_tx_ring *tx, bool is_napi, in gve_handle_packet_completion() argument
1067 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_packet_completion()
1073 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_packet_completion()
1083 remove_from_list(tx, in gve_handle_packet_completion()
1084 &tx->dqo_compl.timed_out_completions, in gve_handle_packet_completion()
1086 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1100 remove_from_list(tx, &tx->dqo_compl.miss_completions, in gve_handle_packet_completion()
1111 tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; in gve_handle_packet_completion()
1112 if (tx->dqo.qpl) in gve_handle_packet_completion()
1113 gve_free_tx_qpl_bufs(tx, pending_packet); in gve_handle_packet_completion()
1115 gve_unmap_packet(tx->dev, pending_packet); in gve_handle_packet_completion()
1121 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1125 struct gve_tx_ring *tx, u16 compl_tag, in gve_handle_miss_completion() argument
1130 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_miss_completion()
1136 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_miss_completion()
1151 add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); in gve_handle_miss_completion()
1158 struct gve_tx_ring *tx) in remove_miss_completions() argument
1163 next_index = tx->dqo_compl.miss_completions.head; in remove_miss_completions()
1165 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_miss_completions()
1171 remove_from_list(tx, &tx->dqo_compl.miss_completions, in remove_miss_completions()
1178 if (tx->dqo.qpl) in remove_miss_completions()
1179 gve_free_tx_qpl_bufs(tx, pending_packet); in remove_miss_completions()
1181 gve_unmap_packet(tx->dev, pending_packet); in remove_miss_completions()
1186 tx->dropped_pkt++; in remove_miss_completions()
1189 (int)(pending_packet - tx->dqo.pending_packets)); in remove_miss_completions()
1199 add_to_list(tx, &tx->dqo_compl.timed_out_completions, in remove_miss_completions()
1205 struct gve_tx_ring *tx) in remove_timed_out_completions() argument
1210 next_index = tx->dqo_compl.timed_out_completions.head; in remove_timed_out_completions()
1212 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_timed_out_completions()
1218 remove_from_list(tx, &tx->dqo_compl.timed_out_completions, in remove_timed_out_completions()
1220 gve_free_pending_packet(tx, pending_packet); in remove_timed_out_completions()
1224 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done_dqo() argument
1238 &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_clean_tx_done_dqo()
1241 if (compl_desc->generation == tx->dqo_compl.cur_gen_bit) in gve_clean_tx_done_dqo()
1245 prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) & in gve_clean_tx_done_dqo()
1246 tx->dqo.complq_mask]); in gve_clean_tx_done_dqo()
1256 atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head); in gve_clean_tx_done_dqo()
1261 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1265 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1274 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1280 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1287 tx->dqo_compl.head = in gve_clean_tx_done_dqo()
1288 (tx->dqo_compl.head + 1) & tx->dqo.complq_mask; in gve_clean_tx_done_dqo()
1290 tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0; in gve_clean_tx_done_dqo()
1294 netdev_tx_completed_queue(tx->netdev_txq, in gve_clean_tx_done_dqo()
1298 remove_miss_completions(priv, tx); in gve_clean_tx_done_dqo()
1299 remove_timed_out_completions(priv, tx); in gve_clean_tx_done_dqo()
1301 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done_dqo()
1302 tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; in gve_clean_tx_done_dqo()
1303 tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; in gve_clean_tx_done_dqo()
1304 u64_stats_update_end(&tx->statss); in gve_clean_tx_done_dqo()
1311 struct gve_tx_ring *tx = block->tx; in gve_tx_poll_dqo() local
1315 int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx, in gve_tx_poll_dqo()
1321 if (netif_tx_queue_stopped(tx->netdev_txq) && in gve_tx_poll_dqo()
1323 tx->wake_queue++; in gve_tx_poll_dqo()
1324 netif_tx_wake_queue(tx->netdev_txq); in gve_tx_poll_dqo()
1329 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_tx_poll_dqo()
1330 return compl_desc->generation != tx->dqo_compl.cur_gen_bit; in gve_tx_poll_dqo()