Lines Matching refs:tx_queue
23 int ef100_tx_probe(struct efx_tx_queue *tx_queue) in ef100_tx_probe() argument
26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, in ef100_tx_probe()
27 (tx_queue->ptr_mask + 2) * in ef100_tx_probe()
32 void ef100_tx_init(struct efx_tx_queue *tx_queue) in ef100_tx_init() argument
35 tx_queue->core_txq = in ef100_tx_init()
36 netdev_get_tx_queue(tx_queue->efx->net_dev, in ef100_tx_init()
37 tx_queue->channel->channel - in ef100_tx_init()
38 tx_queue->efx->tx_channel_offset); in ef100_tx_init()
46 tx_queue->tso_version = 3; in ef100_tx_init()
47 if (efx_mcdi_tx_init(tx_queue)) in ef100_tx_init()
48 netdev_WARN(tx_queue->efx->net_dev, in ef100_tx_init()
49 "failed to initialise TXQ %d\n", tx_queue->queue); in ef100_tx_init()
52 static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in ef100_tx_can_tso() argument
54 struct efx_nic *efx = tx_queue->efx; in ef100_tx_can_tso()
93 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in ef100_tx_can_tso()
98 ++tx_queue->insert_count; in ef100_tx_can_tso()
102 static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in ef100_tx_desc() argument
104 if (likely(tx_queue->txd.addr)) in ef100_tx_desc()
105 return ((efx_oword_t *)tx_queue->txd.addr) + index; in ef100_tx_desc()
110 static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) in ef100_notify_tx_desc() argument
115 tx_queue->xmit_pending = false; in ef100_notify_tx_desc()
117 if (unlikely(tx_queue->notify_count == tx_queue->write_count)) in ef100_notify_tx_desc()
120 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef100_notify_tx_desc()
123 efx_writed_page(tx_queue->efx, ®, in ef100_notify_tx_desc()
124 ER_GZ_TX_RING_DOORBELL, tx_queue->queue); in ef100_notify_tx_desc()
125 tx_queue->notify_count = tx_queue->write_count; in ef100_notify_tx_desc()
128 static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) in ef100_tx_push_buffers() argument
130 ef100_notify_tx_desc(tx_queue); in ef100_tx_push_buffers()
131 ++tx_queue->pushes; in ef100_tx_push_buffers()
255 static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, in ef100_tx_make_descriptors() argument
260 unsigned int old_write_count = tx_queue->write_count; in ef100_tx_make_descriptors()
266 unsigned int nr_descs = tx_queue->insert_count - old_write_count; in ef100_tx_make_descriptors()
278 write_ptr = new_write_count & tx_queue->ptr_mask; in ef100_tx_make_descriptors()
279 txd = ef100_tx_desc(tx_queue, write_ptr); in ef100_tx_make_descriptors()
282 tx_queue->packet_write_count = new_write_count; in ef100_tx_make_descriptors()
295 write_ptr = new_write_count & tx_queue->ptr_mask; in ef100_tx_make_descriptors()
296 buffer = &tx_queue->buffer[write_ptr]; in ef100_tx_make_descriptors()
297 txd = ef100_tx_desc(tx_queue, write_ptr); in ef100_tx_make_descriptors()
301 tx_queue->packet_write_count = new_write_count; in ef100_tx_make_descriptors()
305 ef100_make_send_desc(tx_queue->efx, skb, in ef100_tx_make_descriptors()
311 ef100_make_tso_desc(tx_queue->efx, skb, in ef100_tx_make_descriptors()
328 } while (new_write_count != tx_queue->insert_count); in ef100_tx_make_descriptors()
332 tx_queue->write_count = new_write_count; in ef100_tx_make_descriptors()
343 void ef100_tx_write(struct efx_tx_queue *tx_queue) in ef100_tx_write() argument
345 ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL); in ef100_tx_write()
346 ef100_tx_push_buffers(tx_queue); in ef100_tx_write()
355 struct efx_tx_queue *tx_queue = in ef100_ev_tx() local
357 unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & in ef100_ev_tx()
358 tx_queue->ptr_mask; in ef100_ev_tx()
360 return efx_xmit_done(tx_queue, tx_index); in ef100_ev_tx()
370 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, in ef100_enqueue_skb() argument
373 return __ef100_enqueue_skb(tx_queue, skb, NULL); in ef100_enqueue_skb()
376 int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, in __ef100_enqueue_skb() argument
379 unsigned int old_insert_count = tx_queue->insert_count; in __ef100_enqueue_skb()
380 struct efx_nic *efx = tx_queue->efx; in __ef100_enqueue_skb()
386 if (!tx_queue->buffer || !tx_queue->ptr_mask) { in __ef100_enqueue_skb()
395 if (segments && !ef100_tx_can_tso(tx_queue, skb)) { in __ef100_enqueue_skb()
396 rc = efx_tx_tso_fallback(tx_queue, skb); in __ef100_enqueue_skb()
397 tx_queue->tso_fallbacks++; in __ef100_enqueue_skb()
405 struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in __ef100_enqueue_skb()
413 if (netif_tx_queue_stopped(tx_queue->core_txq) || in __ef100_enqueue_skb()
425 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in __ef100_enqueue_skb()
431 efx_for_each_channel_tx_queue(txq2, tx_queue->channel) in __ef100_enqueue_skb()
434 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in __ef100_enqueue_skb()
444 tx_queue->insert_count++; in __ef100_enqueue_skb()
448 rc = efx_tx_map_data(tx_queue, skb, segments); in __ef100_enqueue_skb()
451 ef100_tx_make_descriptors(tx_queue, skb, segments, efv); in __ef100_enqueue_skb()
453 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in __ef100_enqueue_skb()
462 netif_tx_stop_queue(tx_queue->core_txq); in __ef100_enqueue_skb()
468 efx_for_each_channel_tx_queue(txq2, tx_queue->channel) in __ef100_enqueue_skb()
470 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in __ef100_enqueue_skb()
472 netif_tx_start_queue(tx_queue->core_txq); in __ef100_enqueue_skb()
475 tx_queue->xmit_pending = true; in __ef100_enqueue_skb()
485 __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) || in __ef100_enqueue_skb()
486 tx_queue->write_count - tx_queue->notify_count > 255) in __ef100_enqueue_skb()
487 ef100_tx_push_buffers(tx_queue); in __ef100_enqueue_skb()
490 tx_queue->tso_bursts++; in __ef100_enqueue_skb()
491 tx_queue->tso_packets += segments; in __ef100_enqueue_skb()
492 tx_queue->tx_packets += segments; in __ef100_enqueue_skb()
494 tx_queue->tx_packets++; in __ef100_enqueue_skb()
499 efx_enqueue_unwind(tx_queue, old_insert_count); in __ef100_enqueue_skb()
508 if (tx_queue->xmit_pending && !xmit_more) in __ef100_enqueue_skb()
509 ef100_tx_push_buffers(tx_queue); in __ef100_enqueue_skb()