Lines Matching refs:tx_queue
26 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument
29 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer()
31 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
36 efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
83 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_copy() argument
93 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_copy()
95 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); in efx_enqueue_skb_copy()
106 ++tx_queue->insert_count; in efx_enqueue_skb_copy()
139 netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, in __efx_siena_enqueue_skb() argument
142 unsigned int old_insert_count = tx_queue->insert_count; in __efx_siena_enqueue_skb()
159 rc = efx_siena_tx_tso_fallback(tx_queue, skb); in __efx_siena_enqueue_skb()
160 tx_queue->tso_fallbacks++; in __efx_siena_enqueue_skb()
166 if (efx_enqueue_skb_copy(tx_queue, skb)) in __efx_siena_enqueue_skb()
168 tx_queue->cb_packets++; in __efx_siena_enqueue_skb()
173 if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments))) in __efx_siena_enqueue_skb()
176 efx_tx_maybe_stop_queue(tx_queue); in __efx_siena_enqueue_skb()
178 tx_queue->xmit_pending = true; in __efx_siena_enqueue_skb()
181 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) in __efx_siena_enqueue_skb()
182 efx_tx_send_pending(tx_queue->channel); in __efx_siena_enqueue_skb()
184 tx_queue->tx_packets++; in __efx_siena_enqueue_skb()
189 efx_siena_enqueue_unwind(tx_queue, old_insert_count); in __efx_siena_enqueue_skb()
197 efx_tx_send_pending(tx_queue->channel); in __efx_siena_enqueue_skb()
212 struct efx_tx_queue *tx_queue; in efx_siena_xdp_tx_buffers() local
229 tx_queue = efx->xdp_tx_queues[cpu]; in efx_siena_xdp_tx_buffers()
230 if (unlikely(!tx_queue)) in efx_siena_xdp_tx_buffers()
233 if (!tx_queue->initialised) in efx_siena_xdp_tx_buffers()
237 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); in efx_siena_xdp_tx_buffers()
243 if (netif_tx_queue_stopped(tx_queue->core_txq)) in efx_siena_xdp_tx_buffers()
245 efx_tx_maybe_stop_queue(tx_queue); in efx_siena_xdp_tx_buffers()
252 tx_queue->read_count - tx_queue->insert_count; in efx_siena_xdp_tx_buffers()
261 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); in efx_siena_xdp_tx_buffers()
273 tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); in efx_siena_xdp_tx_buffers()
279 tx_queue->tx_packets++; in efx_siena_xdp_tx_buffers()
284 efx_nic_push_buffers(tx_queue); in efx_siena_xdp_tx_buffers()
288 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); in efx_siena_xdp_tx_buffers()
303 struct efx_tx_queue *tx_queue; in efx_siena_hard_start_xmit() local
327 tx_queue = efx_get_tx_queue(efx, index, type); in efx_siena_hard_start_xmit()
328 if (WARN_ON_ONCE(!tx_queue)) { in efx_siena_hard_start_xmit()
343 return __efx_siena_enqueue_skb(tx_queue, skb); in efx_siena_hard_start_xmit()
346 void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) in efx_siena_init_tx_queue_core_txq() argument
348 struct efx_nic *efx = tx_queue->efx; in efx_siena_init_tx_queue_core_txq()
351 tx_queue->core_txq = in efx_siena_init_tx_queue_core_txq()
353 tx_queue->channel->channel + in efx_siena_init_tx_queue_core_txq()
354 ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? in efx_siena_init_tx_queue_core_txq()