Lines Matching +full:num +full:- +full:rxq
1 // SPDX-License-Identifier: GPL-2.0-only
15 #define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
22 * idpf_buf_lifo_push - push a buffer pointer onto stack
31 if (unlikely(stack->top == stack->size)) in idpf_buf_lifo_push()
32 return -ENOSPC; in idpf_buf_lifo_push()
34 stack->bufs[stack->top++] = buf; in idpf_buf_lifo_push()
40 * idpf_buf_lifo_pop - pop a buffer pointer from stack
45 if (unlikely(!stack->top)) in idpf_buf_lifo_pop()
48 return stack->bufs[--stack->top]; in idpf_buf_lifo_pop()
52 * idpf_tx_timeout - Respond to a Tx Hang
60 adapter->tx_timeout_count++; in idpf_tx_timeout()
63 adapter->tx_timeout_count, txqueue); in idpf_tx_timeout()
65 set_bit(IDPF_HR_FUNC_RESET, adapter->flags); in idpf_tx_timeout()
66 queue_delayed_work(adapter->vc_event_wq, in idpf_tx_timeout()
67 &adapter->vc_event_task, in idpf_tx_timeout()
73 * idpf_tx_buf_rel_all - Free any empty Tx buffers
82 .dev = txq->dev, in idpf_tx_buf_rel_all()
89 if (!txq->tx_buf) in idpf_tx_buf_rel_all()
93 for (i = 0; i < txq->desc_count; i++) in idpf_tx_buf_rel_all()
94 libeth_tx_complete(&txq->tx_buf[i], &cp); in idpf_tx_buf_rel_all()
96 kfree(txq->tx_buf); in idpf_tx_buf_rel_all()
97 txq->tx_buf = NULL; in idpf_tx_buf_rel_all()
102 buf_stack = &txq->stash->buf_stack; in idpf_tx_buf_rel_all()
103 if (!buf_stack->bufs) in idpf_tx_buf_rel_all()
110 hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash, in idpf_tx_buf_rel_all()
115 libeth_tx_complete(&stash->buf, &cp); in idpf_tx_buf_rel_all()
116 hash_del(&stash->hlist); in idpf_tx_buf_rel_all()
120 for (i = 0; i < buf_stack->size; i++) in idpf_tx_buf_rel_all()
121 kfree(buf_stack->bufs[i]); in idpf_tx_buf_rel_all()
123 kfree(buf_stack->bufs); in idpf_tx_buf_rel_all()
124 buf_stack->bufs = NULL; in idpf_tx_buf_rel_all()
128 * idpf_tx_desc_rel - Free Tx resources per queue
136 netdev_tx_reset_subqueue(txq->netdev, txq->idx); in idpf_tx_desc_rel()
138 if (!txq->desc_ring) in idpf_tx_desc_rel()
141 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); in idpf_tx_desc_rel()
142 txq->desc_ring = NULL; in idpf_tx_desc_rel()
143 txq->next_to_use = 0; in idpf_tx_desc_rel()
144 txq->next_to_clean = 0; in idpf_tx_desc_rel()
148 * idpf_compl_desc_rel - Free completion resources per queue
155 if (!complq->comp) in idpf_compl_desc_rel()
158 dma_free_coherent(complq->netdev->dev.parent, complq->size, in idpf_compl_desc_rel()
159 complq->comp, complq->dma); in idpf_compl_desc_rel()
160 complq->comp = NULL; in idpf_compl_desc_rel()
161 complq->next_to_use = 0; in idpf_compl_desc_rel()
162 complq->next_to_clean = 0; in idpf_compl_desc_rel()
166 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
175 if (!vport->txq_grps) in idpf_tx_desc_rel_all()
178 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_rel_all()
179 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_tx_desc_rel_all()
181 for (j = 0; j < txq_grp->num_txq; j++) in idpf_tx_desc_rel_all()
182 idpf_tx_desc_rel(txq_grp->txqs[j]); in idpf_tx_desc_rel_all()
184 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_rel_all()
185 idpf_compl_desc_rel(txq_grp->complq); in idpf_tx_desc_rel_all()
190 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
204 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; in idpf_tx_buf_alloc_all()
205 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); in idpf_tx_buf_alloc_all()
206 if (!tx_q->tx_buf) in idpf_tx_buf_alloc_all()
207 return -ENOMEM; in idpf_tx_buf_alloc_all()
212 buf_stack = &tx_q->stash->buf_stack; in idpf_tx_buf_alloc_all()
214 /* Initialize tx buf stack for out-of-order completions if in idpf_tx_buf_alloc_all()
217 buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), in idpf_tx_buf_alloc_all()
219 if (!buf_stack->bufs) in idpf_tx_buf_alloc_all()
220 return -ENOMEM; in idpf_tx_buf_alloc_all()
222 buf_stack->size = tx_q->desc_count; in idpf_tx_buf_alloc_all()
223 buf_stack->top = tx_q->desc_count; in idpf_tx_buf_alloc_all()
225 for (i = 0; i < tx_q->desc_count; i++) { in idpf_tx_buf_alloc_all()
226 buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), in idpf_tx_buf_alloc_all()
228 if (!buf_stack->bufs[i]) in idpf_tx_buf_alloc_all()
229 return -ENOMEM; in idpf_tx_buf_alloc_all()
236 * idpf_tx_desc_alloc - Allocate the Tx descriptors
245 struct device *dev = tx_q->dev; in idpf_tx_desc_alloc()
252 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); in idpf_tx_desc_alloc()
255 tx_q->size = ALIGN(tx_q->size, 4096); in idpf_tx_desc_alloc()
256 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, in idpf_tx_desc_alloc()
258 if (!tx_q->desc_ring) { in idpf_tx_desc_alloc()
260 tx_q->size); in idpf_tx_desc_alloc()
261 err = -ENOMEM; in idpf_tx_desc_alloc()
265 tx_q->next_to_use = 0; in idpf_tx_desc_alloc()
266 tx_q->next_to_clean = 0; in idpf_tx_desc_alloc()
278 * idpf_compl_desc_alloc - allocate completion descriptors
282 * Return: 0 on success, -errno on failure.
287 complq->size = array_size(complq->desc_count, sizeof(*complq->comp)); in idpf_compl_desc_alloc()
289 complq->comp = dma_alloc_coherent(complq->netdev->dev.parent, in idpf_compl_desc_alloc()
290 complq->size, &complq->dma, in idpf_compl_desc_alloc()
292 if (!complq->comp) in idpf_compl_desc_alloc()
293 return -ENOMEM; in idpf_compl_desc_alloc()
295 complq->next_to_use = 0; in idpf_compl_desc_alloc()
296 complq->next_to_clean = 0; in idpf_compl_desc_alloc()
303 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
316 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_alloc_all()
317 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { in idpf_tx_desc_alloc_all()
318 struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; in idpf_tx_desc_alloc_all()
324 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
330 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_alloc_all()
333 txq->compl_tag_cur_gen = 0; in idpf_tx_desc_alloc_all()
339 bufidx_mask = txq->desc_count - 1; in idpf_tx_desc_alloc_all()
341 txq->compl_tag_gen_s++; in idpf_tx_desc_alloc_all()
344 txq->compl_tag_gen_s++; in idpf_tx_desc_alloc_all()
346 gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - in idpf_tx_desc_alloc_all()
347 txq->compl_tag_gen_s; in idpf_tx_desc_alloc_all()
348 txq->compl_tag_gen_max = GETMAXVAL(gen_bits); in idpf_tx_desc_alloc_all()
352 * ring size-1 since we can have size values in idpf_tx_desc_alloc_all()
355 txq->compl_tag_bufid_m = in idpf_tx_desc_alloc_all()
356 GETMAXVAL(txq->compl_tag_gen_s); in idpf_tx_desc_alloc_all()
359 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_alloc_all()
363 err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); in idpf_tx_desc_alloc_all()
365 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
380 * idpf_rx_page_rel - Release an rx buffer page
385 if (unlikely(!rx_buf->page)) in idpf_rx_page_rel()
388 page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); in idpf_rx_page_rel()
390 rx_buf->page = NULL; in idpf_rx_page_rel()
391 rx_buf->offset = 0; in idpf_rx_page_rel()
395 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
401 .fqes = bufq->hdr_buf, in idpf_rx_hdr_buf_rel_all()
402 .pp = bufq->hdr_pp, in idpf_rx_hdr_buf_rel_all()
405 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_hdr_buf_rel_all()
406 idpf_rx_page_rel(&bufq->hdr_buf[i]); in idpf_rx_hdr_buf_rel_all()
409 bufq->hdr_buf = NULL; in idpf_rx_hdr_buf_rel_all()
410 bufq->hdr_pp = NULL; in idpf_rx_hdr_buf_rel_all()
414 * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
420 .fqes = bufq->buf, in idpf_rx_buf_rel_bufq()
421 .pp = bufq->pp, in idpf_rx_buf_rel_bufq()
425 if (!bufq->buf) in idpf_rx_buf_rel_bufq()
429 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_buf_rel_bufq()
430 idpf_rx_page_rel(&bufq->buf[i]); in idpf_rx_buf_rel_bufq()
436 bufq->buf = NULL; in idpf_rx_buf_rel_bufq()
437 bufq->pp = NULL; in idpf_rx_buf_rel_bufq()
441 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
442 * @rxq: queue to be cleaned
444 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) in idpf_rx_buf_rel_all() argument
447 .fqes = rxq->rx_buf, in idpf_rx_buf_rel_all()
448 .pp = rxq->pp, in idpf_rx_buf_rel_all()
451 if (!rxq->rx_buf) in idpf_rx_buf_rel_all()
454 for (u32 i = 0; i < rxq->desc_count; i++) in idpf_rx_buf_rel_all()
455 idpf_rx_page_rel(&rxq->rx_buf[i]); in idpf_rx_buf_rel_all()
458 rxq->rx_buf = NULL; in idpf_rx_buf_rel_all()
459 rxq->pp = NULL; in idpf_rx_buf_rel_all()
463 * idpf_rx_desc_rel - Free a specific Rx q resources
464 * @rxq: queue to clean the resources from
470 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev, in idpf_rx_desc_rel() argument
473 if (!rxq) in idpf_rx_desc_rel()
476 if (rxq->skb) { in idpf_rx_desc_rel()
477 dev_kfree_skb_any(rxq->skb); in idpf_rx_desc_rel()
478 rxq->skb = NULL; in idpf_rx_desc_rel()
482 idpf_rx_buf_rel_all(rxq); in idpf_rx_desc_rel()
484 rxq->next_to_alloc = 0; in idpf_rx_desc_rel()
485 rxq->next_to_clean = 0; in idpf_rx_desc_rel()
486 rxq->next_to_use = 0; in idpf_rx_desc_rel()
487 if (!rxq->desc_ring) in idpf_rx_desc_rel()
490 dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma); in idpf_rx_desc_rel()
491 rxq->desc_ring = NULL; in idpf_rx_desc_rel()
495 * idpf_rx_desc_rel_bufq - free buffer queue resources
507 bufq->next_to_alloc = 0; in idpf_rx_desc_rel_bufq()
508 bufq->next_to_clean = 0; in idpf_rx_desc_rel_bufq()
509 bufq->next_to_use = 0; in idpf_rx_desc_rel_bufq()
511 if (!bufq->split_buf) in idpf_rx_desc_rel_bufq()
514 dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); in idpf_rx_desc_rel_bufq()
515 bufq->split_buf = NULL; in idpf_rx_desc_rel_bufq()
519 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
526 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_rel_all()
531 if (!vport->rxq_grps) in idpf_rx_desc_rel_all()
534 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_rel_all()
535 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_rel_all()
537 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rx_desc_rel_all()
538 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) in idpf_rx_desc_rel_all()
539 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, in idpf_rx_desc_rel_all()
544 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_rel_all()
546 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, in idpf_rx_desc_rel_all()
549 if (!rx_qgrp->splitq.bufq_sets) in idpf_rx_desc_rel_all()
552 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_rel_all()
554 &rx_qgrp->splitq.bufq_sets[j]; in idpf_rx_desc_rel_all()
556 idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); in idpf_rx_desc_rel_all()
562 * idpf_rx_buf_hw_update - Store the new tail and head values
568 bufq->next_to_use = val; in idpf_rx_buf_hw_update()
570 if (unlikely(!bufq->tail)) in idpf_rx_buf_hw_update()
574 writel(val, bufq->tail); in idpf_rx_buf_hw_update()
578 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
586 .count = bufq->desc_count, in idpf_rx_hdr_buf_alloc_all()
588 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_hdr_buf_alloc_all()
592 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_hdr_buf_alloc_all()
596 bufq->hdr_pp = fq.pp; in idpf_rx_hdr_buf_alloc_all()
597 bufq->hdr_buf = fq.fqes; in idpf_rx_hdr_buf_alloc_all()
598 bufq->hdr_truesize = fq.truesize; in idpf_rx_hdr_buf_alloc_all()
599 bufq->rx_hbuf_size = fq.buf_len; in idpf_rx_hdr_buf_alloc_all()
605 * idpf_rx_post_buf_refill - Post buffer id to refill queue
611 u32 nta = refillq->next_to_use; in idpf_rx_post_buf_refill()
614 refillq->ring[nta] = in idpf_rx_post_buf_refill()
619 if (unlikely(++nta == refillq->desc_count)) { in idpf_rx_post_buf_refill()
624 refillq->next_to_use = nta; in idpf_rx_post_buf_refill()
628 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
638 .count = bufq->desc_count, in idpf_rx_post_buf_desc()
640 u16 nta = bufq->next_to_alloc; in idpf_rx_post_buf_desc()
643 splitq_rx_desc = &bufq->split_buf[nta]; in idpf_rx_post_buf_desc()
646 fq.pp = bufq->hdr_pp; in idpf_rx_post_buf_desc()
647 fq.fqes = bufq->hdr_buf; in idpf_rx_post_buf_desc()
648 fq.truesize = bufq->hdr_truesize; in idpf_rx_post_buf_desc()
654 splitq_rx_desc->hdr_addr = cpu_to_le64(addr); in idpf_rx_post_buf_desc()
657 fq.pp = bufq->pp; in idpf_rx_post_buf_desc()
658 fq.fqes = bufq->buf; in idpf_rx_post_buf_desc()
659 fq.truesize = bufq->truesize; in idpf_rx_post_buf_desc()
665 splitq_rx_desc->pkt_addr = cpu_to_le64(addr); in idpf_rx_post_buf_desc()
666 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); in idpf_rx_post_buf_desc()
669 if (unlikely(nta == bufq->desc_count)) in idpf_rx_post_buf_desc()
671 bufq->next_to_alloc = nta; in idpf_rx_post_buf_desc()
677 * idpf_rx_post_init_bufs - Post initial buffers to bufq
693 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, in idpf_rx_post_init_bufs()
700 * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
701 * @rxq: queue for which the buffers are allocated
703 * Return: 0 on success, -ENOMEM on failure.
705 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq) in idpf_rx_buf_alloc_singleq() argument
707 if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) in idpf_rx_buf_alloc_singleq()
713 idpf_rx_buf_rel_all(rxq); in idpf_rx_buf_alloc_singleq()
715 return -ENOMEM; in idpf_rx_buf_alloc_singleq()
719 * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
720 * @rxq: buffer queue to create page pool for
722 * Return: 0 on success, -errno on failure.
724 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq) in idpf_rx_bufs_init_singleq() argument
727 .count = rxq->desc_count, in idpf_rx_bufs_init_singleq()
729 .nid = idpf_q_vector_to_mem(rxq->q_vector), in idpf_rx_bufs_init_singleq()
733 ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); in idpf_rx_bufs_init_singleq()
737 rxq->pp = fq.pp; in idpf_rx_bufs_init_singleq()
738 rxq->rx_buf = fq.fqes; in idpf_rx_bufs_init_singleq()
739 rxq->truesize = fq.truesize; in idpf_rx_bufs_init_singleq()
740 rxq->rx_buf_size = fq.buf_len; in idpf_rx_bufs_init_singleq()
742 return idpf_rx_buf_alloc_singleq(rxq); in idpf_rx_bufs_init_singleq()
746 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
763 err = -ENOMEM; in idpf_rx_buf_alloc_all()
773 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
783 .truesize = bufq->truesize, in idpf_rx_bufs_init()
784 .count = bufq->desc_count, in idpf_rx_bufs_init()
787 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_bufs_init()
791 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_bufs_init()
795 bufq->pp = fq.pp; in idpf_rx_bufs_init()
796 bufq->buf = fq.fqes; in idpf_rx_bufs_init()
797 bufq->truesize = fq.truesize; in idpf_rx_bufs_init()
798 bufq->rx_buf_size = fq.buf_len; in idpf_rx_bufs_init()
804 * idpf_rx_bufs_init_all - Initialize all RX bufs
811 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_rx_bufs_init_all()
814 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_bufs_init_all()
815 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_bufs_init_all()
818 /* Allocate bufs for the rxq itself in singleq */ in idpf_rx_bufs_init_all()
820 int num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_bufs_init_all()
825 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_bufs_init_all()
835 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_bufs_init_all()
839 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_bufs_init_all()
840 q->truesize = truesize; in idpf_rx_bufs_init_all()
848 truesize = q->truesize >> 1; in idpf_rx_bufs_init_all()
856 * idpf_rx_desc_alloc - Allocate queue Rx resources
858 * @rxq: Rx queue for which the resources are setup
863 struct idpf_rx_queue *rxq) in idpf_rx_desc_alloc() argument
865 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_alloc()
867 rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc); in idpf_rx_desc_alloc()
870 rxq->size = ALIGN(rxq->size, 4096); in idpf_rx_desc_alloc()
871 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, in idpf_rx_desc_alloc()
872 &rxq->dma, GFP_KERNEL); in idpf_rx_desc_alloc()
873 if (!rxq->desc_ring) { in idpf_rx_desc_alloc()
875 rxq->size); in idpf_rx_desc_alloc()
876 return -ENOMEM; in idpf_rx_desc_alloc()
879 rxq->next_to_alloc = 0; in idpf_rx_desc_alloc()
880 rxq->next_to_clean = 0; in idpf_rx_desc_alloc()
881 rxq->next_to_use = 0; in idpf_rx_desc_alloc()
882 idpf_queue_set(GEN_CHK, rxq); in idpf_rx_desc_alloc()
888 * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
892 * Return: 0 on success, -ENOMEM on failure.
897 struct device *dev = &vport->adapter->pdev->dev; in idpf_bufq_desc_alloc()
899 bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); in idpf_bufq_desc_alloc()
901 bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, in idpf_bufq_desc_alloc()
903 if (!bufq->split_buf) in idpf_bufq_desc_alloc()
904 return -ENOMEM; in idpf_bufq_desc_alloc()
906 bufq->next_to_alloc = 0; in idpf_bufq_desc_alloc()
907 bufq->next_to_clean = 0; in idpf_bufq_desc_alloc()
908 bufq->next_to_use = 0; in idpf_bufq_desc_alloc()
916 * idpf_rx_desc_alloc_all - allocate all RX queues resources
927 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_alloc_all()
928 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_alloc_all()
929 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
930 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rx_desc_alloc_all()
932 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rx_desc_alloc_all()
937 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
938 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_rx_desc_alloc_all()
940 q = rx_qgrp->singleq.rxqs[j]; in idpf_rx_desc_alloc_all()
944 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
951 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
954 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_alloc_all()
957 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_desc_alloc_all()
961 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
978 * idpf_txq_group_rel - Release all resources for txq groups
986 if (!vport->txq_grps) in idpf_txq_group_rel()
989 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_rel()
990 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_rel()
993 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_rel()
994 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_txq_group_rel()
996 for (j = 0; j < txq_grp->num_txq; j++) { in idpf_txq_group_rel()
997 kfree(txq_grp->txqs[j]); in idpf_txq_group_rel()
998 txq_grp->txqs[j] = NULL; in idpf_txq_group_rel()
1004 kfree(txq_grp->complq); in idpf_txq_group_rel()
1005 txq_grp->complq = NULL; in idpf_txq_group_rel()
1008 kfree(txq_grp->stashes); in idpf_txq_group_rel()
1010 kfree(vport->txq_grps); in idpf_txq_group_rel()
1011 vport->txq_grps = NULL; in idpf_txq_group_rel()
1015 * idpf_rxq_sw_queue_rel - Release software queue resources
1022 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { in idpf_rxq_sw_queue_rel()
1023 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; in idpf_rxq_sw_queue_rel()
1025 for (j = 0; j < bufq_set->num_refillqs; j++) { in idpf_rxq_sw_queue_rel()
1026 kfree(bufq_set->refillqs[j].ring); in idpf_rxq_sw_queue_rel()
1027 bufq_set->refillqs[j].ring = NULL; in idpf_rxq_sw_queue_rel()
1029 kfree(bufq_set->refillqs); in idpf_rxq_sw_queue_rel()
1030 bufq_set->refillqs = NULL; in idpf_rxq_sw_queue_rel()
1035 * idpf_rxq_group_rel - Release all resources for rxq groups
1036 * @vport: vport to release rxq groups on
1042 if (!vport->rxq_grps) in idpf_rxq_group_rel()
1045 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_rel()
1046 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_rel()
1050 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_rel()
1051 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_rxq_group_rel()
1053 kfree(rx_qgrp->splitq.rxq_sets[j]); in idpf_rxq_group_rel()
1054 rx_qgrp->splitq.rxq_sets[j] = NULL; in idpf_rxq_group_rel()
1058 kfree(rx_qgrp->splitq.bufq_sets); in idpf_rxq_group_rel()
1059 rx_qgrp->splitq.bufq_sets = NULL; in idpf_rxq_group_rel()
1061 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_rxq_group_rel()
1063 kfree(rx_qgrp->singleq.rxqs[j]); in idpf_rxq_group_rel()
1064 rx_qgrp->singleq.rxqs[j] = NULL; in idpf_rxq_group_rel()
1068 kfree(vport->rxq_grps); in idpf_rxq_group_rel()
1069 vport->rxq_grps = NULL; in idpf_rxq_group_rel()
1073 * idpf_vport_queue_grp_rel_all - Release all queue groups
1083 * idpf_vport_queues_rel - Free memory for all queues
1094 kfree(vport->txqs); in idpf_vport_queues_rel()
1095 vport->txqs = NULL; in idpf_vport_queues_rel()
1099 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1102 * We get a queue index from skb->queue_mapping and we need a fast way to
1112 vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), in idpf_vport_init_fast_path_txqs()
1115 if (!vport->txqs) in idpf_vport_init_fast_path_txqs()
1116 return -ENOMEM; in idpf_vport_init_fast_path_txqs()
1118 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_vport_init_fast_path_txqs()
1119 struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; in idpf_vport_init_fast_path_txqs()
1121 for (j = 0; j < tx_grp->num_txq; j++, k++) { in idpf_vport_init_fast_path_txqs()
1122 vport->txqs[k] = tx_grp->txqs[j]; in idpf_vport_init_fast_path_txqs()
1123 vport->txqs[k]->idx = k; in idpf_vport_init_fast_path_txqs()
1131 * idpf_vport_init_num_qs - Initialize number of queues
1139 u16 idx = vport->idx; in idpf_vport_init_num_qs()
1141 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_init_num_qs()
1142 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_init_num_qs()
1143 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_init_num_qs()
1147 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { in idpf_vport_init_num_qs()
1148 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_init_num_qs()
1149 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_init_num_qs()
1152 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_init_num_qs()
1153 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); in idpf_vport_init_num_qs()
1154 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_init_num_qs()
1155 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); in idpf_vport_init_num_qs()
1158 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_vport_init_num_qs()
1159 vport->num_bufqs_per_qgrp = 0; in idpf_vport_init_num_qs()
1164 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_init_num_qs()
1168 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1174 int num_bufqs = vport->num_bufqs_per_qgrp; in idpf_vport_calc_num_q_desc()
1176 u16 idx = vport->idx; in idpf_vport_calc_num_q_desc()
1179 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_calc_num_q_desc()
1180 num_req_txq_desc = config_data->num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1181 num_req_rxq_desc = config_data->num_req_rxq_desc; in idpf_vport_calc_num_q_desc()
1183 vport->complq_desc_count = 0; in idpf_vport_calc_num_q_desc()
1185 vport->txq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1186 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_vport_calc_num_q_desc()
1187 vport->complq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1188 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) in idpf_vport_calc_num_q_desc()
1189 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1193 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1194 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_desc()
1195 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1200 vport->rxq_desc_count = num_req_rxq_desc; in idpf_vport_calc_num_q_desc()
1202 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1205 if (!vport->bufq_desc_count[i]) in idpf_vport_calc_num_q_desc()
1206 vport->bufq_desc_count[i] = in idpf_vport_calc_num_q_desc()
1207 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, in idpf_vport_calc_num_q_desc()
1213 * idpf_vport_calc_total_qs - Calculate total number of queues
1232 vport_config = adapter->vport_config[vport_idx]; in idpf_vport_calc_total_qs()
1234 num_req_tx_qs = vport_config->user_config.num_req_tx_qs; in idpf_vport_calc_total_qs()
1235 num_req_rx_qs = vport_config->user_config.num_req_rx_qs; in idpf_vport_calc_total_qs()
1239 /* Restrict num of queues to cpus online as a default in idpf_vport_calc_total_qs()
1245 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); in idpf_vport_calc_total_qs()
1246 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); in idpf_vport_calc_total_qs()
1247 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); in idpf_vport_calc_total_qs()
1248 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); in idpf_vport_calc_total_qs()
1251 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { in idpf_vport_calc_total_qs()
1253 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * in idpf_vport_calc_total_qs()
1255 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * in idpf_vport_calc_total_qs()
1261 vport_msg->num_tx_q = cpu_to_le16(num_qs); in idpf_vport_calc_total_qs()
1262 vport_msg->num_tx_complq = 0; in idpf_vport_calc_total_qs()
1264 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { in idpf_vport_calc_total_qs()
1266 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * in idpf_vport_calc_total_qs()
1268 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * in idpf_vport_calc_total_qs()
1274 vport_msg->num_rx_q = cpu_to_le16(num_qs); in idpf_vport_calc_total_qs()
1275 vport_msg->num_rx_bufq = 0; in idpf_vport_calc_total_qs()
1282 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1287 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_groups()
1288 vport->num_txq_grp = vport->num_txq; in idpf_vport_calc_num_q_groups()
1290 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1292 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_num_q_groups()
1293 vport->num_rxq_grp = vport->num_rxq; in idpf_vport_calc_num_q_groups()
1295 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1299 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1307 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_numq_per_grp()
1310 *num_txq = vport->num_txq; in idpf_vport_calc_numq_per_grp()
1312 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_numq_per_grp()
1315 *num_rxq = vport->num_rxq; in idpf_vport_calc_numq_per_grp()
1319 * idpf_rxq_set_descids - set the descids supported by this queue
1327 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_set_descids()
1328 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; in idpf_rxq_set_descids()
1330 if (vport->base_rxd) in idpf_rxq_set_descids()
1331 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; in idpf_rxq_set_descids()
1333 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; in idpf_rxq_set_descids()
1338 * idpf_txq_group_alloc - Allocate all txq group resources
1349 vport->txq_grps = kcalloc(vport->num_txq_grp, in idpf_txq_group_alloc()
1350 sizeof(*vport->txq_grps), GFP_KERNEL); in idpf_txq_group_alloc()
1351 if (!vport->txq_grps) in idpf_txq_group_alloc()
1352 return -ENOMEM; in idpf_txq_group_alloc()
1354 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_alloc()
1355 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_alloc()
1358 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_alloc()
1359 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_txq_group_alloc()
1360 struct idpf_adapter *adapter = vport->adapter; in idpf_txq_group_alloc()
1364 tx_qgrp->vport = vport; in idpf_txq_group_alloc()
1365 tx_qgrp->num_txq = num_txq; in idpf_txq_group_alloc()
1367 for (j = 0; j < tx_qgrp->num_txq; j++) { in idpf_txq_group_alloc()
1368 tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), in idpf_txq_group_alloc()
1370 if (!tx_qgrp->txqs[j]) in idpf_txq_group_alloc()
1380 tx_qgrp->stashes = stashes; in idpf_txq_group_alloc()
1383 for (j = 0; j < tx_qgrp->num_txq; j++) { in idpf_txq_group_alloc()
1384 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; in idpf_txq_group_alloc()
1386 q->dev = &adapter->pdev->dev; in idpf_txq_group_alloc()
1387 q->desc_count = vport->txq_desc_count; in idpf_txq_group_alloc()
1388 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); in idpf_txq_group_alloc()
1389 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); in idpf_txq_group_alloc()
1390 q->netdev = vport->netdev; in idpf_txq_group_alloc()
1391 q->txq_grp = tx_qgrp; in idpf_txq_group_alloc()
1394 q->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1396 vport->crc_enable); in idpf_txq_group_alloc()
1403 q->stash = &stashes[j]; in idpf_txq_group_alloc()
1404 hash_init(q->stash->sched_buf_hash); in idpf_txq_group_alloc()
1413 tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, in idpf_txq_group_alloc()
1414 sizeof(*tx_qgrp->complq), in idpf_txq_group_alloc()
1416 if (!tx_qgrp->complq) in idpf_txq_group_alloc()
1419 tx_qgrp->complq->desc_count = vport->complq_desc_count; in idpf_txq_group_alloc()
1420 tx_qgrp->complq->txq_grp = tx_qgrp; in idpf_txq_group_alloc()
1421 tx_qgrp->complq->netdev = vport->netdev; in idpf_txq_group_alloc()
1422 tx_qgrp->complq->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1425 idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq); in idpf_txq_group_alloc()
1433 return -ENOMEM; in idpf_txq_group_alloc()
1437 * idpf_rxq_group_alloc - Allocate all rxq group resources
1438 * @vport: vport to allocate rxq groups for
1448 vport->rxq_grps = kcalloc(vport->num_rxq_grp, in idpf_rxq_group_alloc()
1450 if (!vport->rxq_grps) in idpf_rxq_group_alloc()
1451 return -ENOMEM; in idpf_rxq_group_alloc()
1455 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_alloc()
1456 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_alloc()
1459 rx_qgrp->vport = vport; in idpf_rxq_group_alloc()
1460 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1461 rx_qgrp->singleq.num_rxq = num_rxq; in idpf_rxq_group_alloc()
1463 rx_qgrp->singleq.rxqs[j] = in idpf_rxq_group_alloc()
1464 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), in idpf_rxq_group_alloc()
1466 if (!rx_qgrp->singleq.rxqs[j]) { in idpf_rxq_group_alloc()
1467 err = -ENOMEM; in idpf_rxq_group_alloc()
1473 rx_qgrp->splitq.num_rxq_sets = num_rxq; in idpf_rxq_group_alloc()
1476 rx_qgrp->splitq.rxq_sets[j] = in idpf_rxq_group_alloc()
1479 if (!rx_qgrp->splitq.rxq_sets[j]) { in idpf_rxq_group_alloc()
1480 err = -ENOMEM; in idpf_rxq_group_alloc()
1485 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, in idpf_rxq_group_alloc()
1488 if (!rx_qgrp->splitq.bufq_sets) { in idpf_rxq_group_alloc()
1489 err = -ENOMEM; in idpf_rxq_group_alloc()
1493 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rxq_group_alloc()
1495 &rx_qgrp->splitq.bufq_sets[j]; in idpf_rxq_group_alloc()
1499 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rxq_group_alloc()
1500 q->desc_count = vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1501 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; in idpf_rxq_group_alloc()
1505 bufq_set->num_refillqs = num_rxq; in idpf_rxq_group_alloc()
1506 bufq_set->refillqs = kcalloc(num_rxq, swq_size, in idpf_rxq_group_alloc()
1508 if (!bufq_set->refillqs) { in idpf_rxq_group_alloc()
1509 err = -ENOMEM; in idpf_rxq_group_alloc()
1512 for (k = 0; k < bufq_set->num_refillqs; k++) { in idpf_rxq_group_alloc()
1514 &bufq_set->refillqs[k]; in idpf_rxq_group_alloc()
1516 refillq->desc_count = in idpf_rxq_group_alloc()
1517 vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1520 refillq->ring = kcalloc(refillq->desc_count, in idpf_rxq_group_alloc()
1521 sizeof(*refillq->ring), in idpf_rxq_group_alloc()
1523 if (!refillq->ring) { in idpf_rxq_group_alloc()
1524 err = -ENOMEM; in idpf_rxq_group_alloc()
1534 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1535 q = rx_qgrp->singleq.rxqs[j]; in idpf_rxq_group_alloc()
1538 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_rxq_group_alloc()
1539 rx_qgrp->splitq.rxq_sets[j]->refillq[0] = in idpf_rxq_group_alloc()
1540 &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; in idpf_rxq_group_alloc()
1541 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) in idpf_rxq_group_alloc()
1542 rx_qgrp->splitq.rxq_sets[j]->refillq[1] = in idpf_rxq_group_alloc()
1543 &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; in idpf_rxq_group_alloc()
1548 q->desc_count = vport->rxq_desc_count; in idpf_rxq_group_alloc()
1549 q->rx_ptype_lkup = vport->rx_ptype_lkup; in idpf_rxq_group_alloc()
1550 q->netdev = vport->netdev; in idpf_rxq_group_alloc()
1551 q->bufq_sets = rx_qgrp->splitq.bufq_sets; in idpf_rxq_group_alloc()
1552 q->idx = (i * num_rxq) + j; in idpf_rxq_group_alloc()
1553 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; in idpf_rxq_group_alloc()
1554 q->rx_max_pkt_size = vport->netdev->mtu + in idpf_rxq_group_alloc()
1568 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1597 * idpf_vport_queues_alloc - Allocate memory for all queues
1632 * idpf_tx_handle_sw_marker - Handle queue marker packet
1637 struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev); in idpf_tx_handle_sw_marker()
1638 struct idpf_vport *vport = priv->vport; in idpf_tx_handle_sw_marker()
1645 for (i = 0; i < vport->num_txq; i++) in idpf_tx_handle_sw_marker()
1649 if (idpf_queue_has(SW_MARKER, vport->txqs[i])) in idpf_tx_handle_sw_marker()
1653 set_bit(IDPF_VPORT_SW_MARKER, vport->flags); in idpf_tx_handle_sw_marker()
1654 wake_up(&vport->sw_marker_wq); in idpf_tx_handle_sw_marker()
1658 * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1673 .dev = txq->dev, in idpf_tx_clean_stashed_bufs()
1679 hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, in idpf_tx_clean_stashed_bufs()
1681 if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag)) in idpf_tx_clean_stashed_bufs()
1684 hash_del(&stash->hlist); in idpf_tx_clean_stashed_bufs()
1685 libeth_tx_complete(&stash->buf, &cp); in idpf_tx_clean_stashed_bufs()
1688 idpf_buf_lifo_push(&txq->stash->buf_stack, stash); in idpf_tx_clean_stashed_bufs()
1693 * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1703 if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) in idpf_stash_flow_sch_buffers()
1706 stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); in idpf_stash_flow_sch_buffers()
1708 net_err_ratelimited("%s: No out-of-order TX buffers left!\n", in idpf_stash_flow_sch_buffers()
1709 netdev_name(txq->netdev)); in idpf_stash_flow_sch_buffers()
1711 return -ENOMEM; in idpf_stash_flow_sch_buffers()
1715 stash->buf.skb = tx_buf->skb; in idpf_stash_flow_sch_buffers()
1716 stash->buf.bytes = tx_buf->bytes; in idpf_stash_flow_sch_buffers()
1717 stash->buf.packets = tx_buf->packets; in idpf_stash_flow_sch_buffers()
1718 stash->buf.type = tx_buf->type; in idpf_stash_flow_sch_buffers()
1719 stash->buf.nr_frags = tx_buf->nr_frags; in idpf_stash_flow_sch_buffers()
1720 dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); in idpf_stash_flow_sch_buffers()
1721 dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); in idpf_stash_flow_sch_buffers()
1722 idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf); in idpf_stash_flow_sch_buffers()
1725 hash_add(txq->stash->sched_buf_hash, &stash->hlist, in idpf_stash_flow_sch_buffers()
1726 idpf_tx_buf_compl_tag(&stash->buf)); in idpf_stash_flow_sch_buffers()
1728 tx_buf->type = LIBETH_SQE_EMPTY; in idpf_stash_flow_sch_buffers()
1735 if (unlikely(++(ntc) == (txq)->desc_count)) { \
1737 buf = (txq)->tx_buf; \
1738 desc = &(txq)->flex_tx[0]; \
1746 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1751 * @descs_only: true if queue is using flow-based scheduling and should
1754 * Cleans the queue descriptor ring. If the queue is using queue-based
1756 * flow-based scheduling, only the descriptors are cleaned at this time.
1759 * this function when using flow-based scheduling.
1774 u32 ntc = tx_q->next_to_clean; in idpf_tx_splitq_clean()
1776 .dev = tx_q->dev, in idpf_tx_splitq_clean()
1783 tx_desc = &tx_q->flex_tx[ntc]; in idpf_tx_splitq_clean()
1784 next_pending_desc = &tx_q->flex_tx[end]; in idpf_tx_splitq_clean()
1785 tx_buf = &tx_q->tx_buf[ntc]; in idpf_tx_splitq_clean()
1794 if (tx_buf->type <= LIBETH_SQE_CTX) in idpf_tx_splitq_clean()
1797 if (unlikely(tx_buf->type != LIBETH_SQE_SKB)) in idpf_tx_splitq_clean()
1800 eop_idx = tx_buf->rs_idx; in idpf_tx_splitq_clean()
1803 if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) { in idpf_tx_splitq_clean()
1833 tx_q->next_to_clean = ntc; in idpf_tx_splitq_clean()
1842 if (unlikely((ntc) == (txq)->desc_count)) { \
1843 buf = (txq)->tx_buf; \
1849 * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1864 u16 idx = compl_tag & txq->compl_tag_bufid_m; in idpf_tx_clean_buf_ring()
1867 .dev = txq->dev, in idpf_tx_clean_buf_ring()
1873 tx_buf = &txq->tx_buf[idx]; in idpf_tx_clean_buf_ring()
1875 if (unlikely(tx_buf->type <= LIBETH_SQE_CTX || in idpf_tx_clean_buf_ring()
1879 if (tx_buf->type == LIBETH_SQE_SKB) in idpf_tx_clean_buf_ring()
1902 ntc = txq->next_to_clean; in idpf_tx_clean_buf_ring()
1903 tx_buf = &txq->tx_buf[ntc]; in idpf_tx_clean_buf_ring()
1905 if (tx_buf->type == LIBETH_SQE_CTX) in idpf_tx_clean_buf_ring()
1915 if (unlikely(tx_buf != &txq->tx_buf[orig_idx] && in idpf_tx_clean_buf_ring()
1924 txq->next_to_clean = idx; in idpf_tx_clean_buf_ring()
1930 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1948 u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); in idpf_tx_handle_rs_completion()
1954 compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); in idpf_tx_handle_rs_completion()
1964 * idpf_tx_clean_complq - Reclaim resources on completion queue
1975 s16 ntc = complq->next_to_clean; in idpf_tx_clean_complq()
1981 complq_budget = complq->clean_budget; in idpf_tx_clean_complq()
1982 tx_desc = &complq->comp[ntc]; in idpf_tx_clean_complq()
1983 ntc -= complq->desc_count; in idpf_tx_clean_complq()
1994 gen = le16_get_bits(tx_desc->qid_comptype_gen, in idpf_tx_clean_complq()
2000 rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen, in idpf_tx_clean_complq()
2002 if (rel_tx_qid >= complq->txq_grp->num_txq || in idpf_tx_clean_complq()
2003 !complq->txq_grp->txqs[rel_tx_qid]) { in idpf_tx_clean_complq()
2004 netdev_err(complq->netdev, "TxQ not found\n"); in idpf_tx_clean_complq()
2007 tx_q = complq->txq_grp->txqs[rel_tx_qid]; in idpf_tx_clean_complq()
2010 ctype = le16_get_bits(tx_desc->qid_comptype_gen, in idpf_tx_clean_complq()
2014 hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); in idpf_tx_clean_complq()
2027 netdev_err(tx_q->netdev, in idpf_tx_clean_complq()
2032 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_clean_complq()
2033 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); in idpf_tx_clean_complq()
2034 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); in idpf_tx_clean_complq()
2035 tx_q->cleaned_pkts += cleaned_stats.packets; in idpf_tx_clean_complq()
2036 tx_q->cleaned_bytes += cleaned_stats.bytes; in idpf_tx_clean_complq()
2037 complq->num_completions++; in idpf_tx_clean_complq()
2038 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_clean_complq()
2044 ntc -= complq->desc_count; in idpf_tx_clean_complq()
2045 tx_desc = &complq->comp[0]; in idpf_tx_clean_complq()
2052 complq_budget--; in idpf_tx_clean_complq()
2058 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > in idpf_tx_clean_complq()
2062 np = netdev_priv(complq->netdev); in idpf_tx_clean_complq()
2063 for (i = 0; i < complq->txq_grp->num_txq; ++i) { in idpf_tx_clean_complq()
2064 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; in idpf_tx_clean_complq()
2069 if (!tx_q->cleaned_bytes) in idpf_tx_clean_complq()
2072 *cleaned += tx_q->cleaned_pkts; in idpf_tx_clean_complq()
2075 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_clean_complq()
2078 np->state != __IDPF_VPORT_UP || in idpf_tx_clean_complq()
2079 !netif_carrier_ok(tx_q->netdev); in idpf_tx_clean_complq()
2081 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, in idpf_tx_clean_complq()
2088 tx_q->cleaned_bytes = 0; in idpf_tx_clean_complq()
2089 tx_q->cleaned_pkts = 0; in idpf_tx_clean_complq()
2092 ntc += complq->desc_count; in idpf_tx_clean_complq()
2093 complq->next_to_clean = ntc; in idpf_tx_clean_complq()
2099 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2110 desc->q.qw1.cmd_dtype = in idpf_tx_splitq_build_ctb()
2111 le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M); in idpf_tx_splitq_build_ctb()
2112 desc->q.qw1.cmd_dtype |= in idpf_tx_splitq_build_ctb()
2114 desc->q.qw1.buf_size = cpu_to_le16(size); in idpf_tx_splitq_build_ctb()
2115 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); in idpf_tx_splitq_build_ctb()
2119 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2130 desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; in idpf_tx_splitq_build_flow_desc()
2131 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); in idpf_tx_splitq_build_flow_desc()
2132 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); in idpf_tx_splitq_build_flow_desc()
2136 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2152 if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > in idpf_tx_maybe_stop_splitq()
2153 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) in idpf_tx_maybe_stop_splitq()
2165 netif_stop_subqueue(tx_q->netdev, tx_q->idx); in idpf_tx_maybe_stop_splitq()
2168 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2169 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_maybe_stop_splitq()
2170 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_maybe_stop_splitq()
2172 return -EBUSY; in idpf_tx_maybe_stop_splitq()
2176 * idpf_tx_buf_hw_update - Store the new tail value
2190 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_buf_hw_update()
2191 tx_q->next_to_use = val; in idpf_tx_buf_hw_update()
2194 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_buf_hw_update()
2195 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_buf_hw_update()
2196 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_buf_hw_update()
2201 * applicable for weak-ordered memory model archs, in idpf_tx_buf_hw_update()
2202 * such as IA-64). in idpf_tx_buf_hw_update()
2208 writel(val, tx_q->tail); in idpf_tx_buf_hw_update()
2212 * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2230 for (i = 0; i < shinfo->nr_frags; i++) { in idpf_tx_desc_count_required()
2233 size = skb_frag_size(&shinfo->frags[i]); in idpf_tx_desc_count_required()
2245 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { in idpf_tx_desc_count_required()
2249 count = idpf_size_to_txd_count(skb->len); in idpf_tx_desc_count_required()
2250 u64_stats_update_begin(&txq->stats_sync); in idpf_tx_desc_count_required()
2251 u64_stats_inc(&txq->q_stats.linearize); in idpf_tx_desc_count_required()
2252 u64_stats_update_end(&txq->stats_sync); in idpf_tx_desc_count_required()
2259 * idpf_tx_dma_map_error - handle TX DMA map errors
2270 .dev = txq->dev, in idpf_tx_dma_map_error()
2274 u64_stats_update_begin(&txq->stats_sync); in idpf_tx_dma_map_error()
2275 u64_stats_inc(&txq->q_stats.dma_map_errs); in idpf_tx_dma_map_error()
2276 u64_stats_update_end(&txq->stats_sync); in idpf_tx_dma_map_error()
2282 tx_buf = &txq->tx_buf[idx]; in idpf_tx_dma_map_error()
2287 idx = txq->desc_count; in idpf_tx_dma_map_error()
2288 idx--; in idpf_tx_dma_map_error()
2298 tx_desc = &txq->flex_tx[idx]; in idpf_tx_dma_map_error()
2301 idx = txq->desc_count; in idpf_tx_dma_map_error()
2302 idx--; in idpf_tx_dma_map_error()
2310 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2318 if (ntu == txq->desc_count) { in idpf_tx_splitq_bump_ntu()
2320 txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); in idpf_tx_splitq_bump_ntu()
2327 * idpf_tx_splitq_map - Build the Tx flex descriptor
2343 u16 i = tx_q->next_to_use; in idpf_tx_splitq_map()
2350 skb = first->skb; in idpf_tx_splitq_map()
2352 td_cmd = params->offload.td_cmd; in idpf_tx_splitq_map()
2354 data_len = skb->data_len; in idpf_tx_splitq_map()
2357 tx_desc = &tx_q->flex_tx[i]; in idpf_tx_splitq_map()
2359 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); in idpf_tx_splitq_map()
2362 first->nr_frags = 0; in idpf_tx_splitq_map()
2364 params->compl_tag = in idpf_tx_splitq_map()
2365 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; in idpf_tx_splitq_map()
2367 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in idpf_tx_splitq_map()
2370 if (dma_mapping_error(tx_q->dev, dma)) in idpf_tx_splitq_map()
2373 first->nr_frags++; in idpf_tx_splitq_map()
2374 idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; in idpf_tx_splitq_map()
2375 tx_buf->type = LIBETH_SQE_FRAG; in idpf_tx_splitq_map()
2382 tx_desc->q.buf_addr = cpu_to_le64(dma); in idpf_tx_splitq_map()
2385 * single descriptor i.e. frag size > 16K-1. We will need to in idpf_tx_splitq_map()
2394 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2396 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2401 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2404 * ------------------------------------------------------------ in idpf_tx_splitq_map()
2410 * 4K - (DMA addr lower order bits) = in idpf_tx_splitq_map()
2415 * 13784 = 12K + (4096-2600) in idpf_tx_splitq_map()
2424 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); in idpf_tx_splitq_map()
2429 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2430 tx_buf = tx_q->tx_buf; in idpf_tx_splitq_map()
2431 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2433 tx_q->compl_tag_cur_gen = in idpf_tx_splitq_map()
2450 tx_buf->type = LIBETH_SQE_EMPTY; in idpf_tx_splitq_map()
2454 * max_data will be >= 12K and <= 16K-1. On any in idpf_tx_splitq_map()
2459 size -= max_data; in idpf_tx_splitq_map()
2467 tx_desc->q.buf_addr = cpu_to_le64(dma); in idpf_tx_splitq_map()
2475 if (unlikely(++i == tx_q->desc_count)) { in idpf_tx_splitq_map()
2476 tx_buf = tx_q->tx_buf; in idpf_tx_splitq_map()
2477 tx_desc = &tx_q->flex_tx[0]; in idpf_tx_splitq_map()
2479 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); in idpf_tx_splitq_map()
2486 data_len -= size; in idpf_tx_splitq_map()
2488 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, in idpf_tx_splitq_map()
2495 first->type = LIBETH_SQE_SKB; in idpf_tx_splitq_map()
2498 first->rs_idx = i; in idpf_tx_splitq_map()
2499 td_cmd |= params->eop_cmd; in idpf_tx_splitq_map()
2503 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_map()
2506 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); in idpf_tx_splitq_map()
2507 netdev_tx_sent_queue(nq, first->bytes); in idpf_tx_splitq_map()
2513 * idpf_tso - computes mss and TSO length to prepare for TSO
2549 if (ip.v4->version == 4) { in idpf_tso()
2550 ip.v4->tot_len = 0; in idpf_tso()
2551 ip.v4->check = 0; in idpf_tso()
2552 } else if (ip.v6->version == 6) { in idpf_tso()
2553 ip.v6->payload_len = 0; in idpf_tso()
2559 paylen = skb->len - l4_start; in idpf_tso()
2561 switch (shinfo->gso_type & ~SKB_GSO_DODGY) { in idpf_tso()
2564 csum_replace_by_diff(&l4.tcp->check, in idpf_tso()
2566 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; in idpf_tso()
2569 csum_replace_by_diff(&l4.udp->check, in idpf_tso()
2572 off->tso_hdr_len = sizeof(struct udphdr) + l4_start; in idpf_tso()
2573 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); in idpf_tso()
2576 return -EINVAL; in idpf_tso()
2579 off->tso_len = skb->len - off->tso_hdr_len; in idpf_tso()
2580 off->mss = shinfo->gso_size; in idpf_tso()
2581 off->tso_segs = shinfo->gso_segs; in idpf_tso()
2583 off->tx_flags |= IDPF_TX_FLAGS_TSO; in idpf_tso()
2589 * __idpf_chk_linearize - Check skb is not using too many buffers
2594 * such we need to check cases where we have max_bufs-1 fragments or more as we
2596 * for the segment payload in the first descriptor, and another max_buf-1 for
2605 /* no need to check if number of frags is less than max_bufs - 1 */ in __idpf_chk_linearize()
2606 nr_frags = shinfo->nr_frags; in __idpf_chk_linearize()
2607 if (nr_frags < (max_bufs - 1)) in __idpf_chk_linearize()
2611 * of max_bufs-2 fragments totals at least gso_size. in __idpf_chk_linearize()
2613 nr_frags -= max_bufs - 2; in __idpf_chk_linearize()
2614 frag = &shinfo->frags[0]; in __idpf_chk_linearize()
2618 * provides one byte which is why we are limited to max_bufs-2 in __idpf_chk_linearize()
2622 sum = 1 - shinfo->gso_size; in __idpf_chk_linearize()
2634 for (stale = &shinfo->frags[0];; stale++) { in __idpf_chk_linearize()
2646 int align_pad = -(skb_frag_off(stale)) & in __idpf_chk_linearize()
2647 (IDPF_TX_MAX_READ_REQ_SIZE - 1); in __idpf_chk_linearize()
2649 sum -= align_pad; in __idpf_chk_linearize()
2650 stale_size -= align_pad; in __idpf_chk_linearize()
2653 sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; in __idpf_chk_linearize()
2654 stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; in __idpf_chk_linearize()
2662 if (!nr_frags--) in __idpf_chk_linearize()
2665 sum -= stale_size; in __idpf_chk_linearize()
2672 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2678 * packet. We have to do some special checking around the boundary (max_bufs-1)
2695 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2705 int i = txq->next_to_use; in idpf_tx_splitq_get_ctx_desc()
2707 txq->tx_buf[i].type = LIBETH_SQE_CTX; in idpf_tx_splitq_get_ctx_desc()
2710 desc = &txq->flex_ctx[i]; in idpf_tx_splitq_get_ctx_desc()
2711 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); in idpf_tx_splitq_get_ctx_desc()
2717 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2723 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_drop_skb()
2724 u64_stats_inc(&tx_q->q_stats.skb_drops); in idpf_tx_drop_skb()
2725 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_drop_skb()
2727 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_drop_skb()
2735 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2760 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_splitq_frame()
2770 ctx_desc->tso.qw1.cmd_dtype = in idpf_tx_splitq_frame()
2773 ctx_desc->tso.qw0.flex_tlen = in idpf_tx_splitq_frame()
2776 ctx_desc->tso.qw0.mss_rt = in idpf_tx_splitq_frame()
2779 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; in idpf_tx_splitq_frame()
2781 u64_stats_update_begin(&tx_q->stats_sync); in idpf_tx_splitq_frame()
2782 u64_stats_inc(&tx_q->q_stats.lso_pkts); in idpf_tx_splitq_frame()
2783 u64_stats_update_end(&tx_q->stats_sync); in idpf_tx_splitq_frame()
2787 first = &tx_q->tx_buf[tx_q->next_to_use]; in idpf_tx_splitq_frame()
2788 first->skb = skb; in idpf_tx_splitq_frame()
2791 first->packets = tx_params.offload.tso_segs; in idpf_tx_splitq_frame()
2792 first->bytes = skb->len + in idpf_tx_splitq_frame()
2793 ((first->packets - 1) * tx_params.offload.tso_hdr_len); in idpf_tx_splitq_frame()
2795 first->packets = 1; in idpf_tx_splitq_frame()
2796 first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); in idpf_tx_splitq_frame()
2807 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { in idpf_tx_splitq_frame()
2809 tx_q->txq_grp->num_completions_pending++; in idpf_tx_splitq_frame()
2812 if (skb->ip_summed == CHECKSUM_PARTIAL) in idpf_tx_splitq_frame()
2819 if (skb->ip_summed == CHECKSUM_PARTIAL) in idpf_tx_splitq_frame()
2829 * idpf_tx_start - Selects the right Tx queue to send buffer
2840 if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { in idpf_tx_start()
2846 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; in idpf_tx_start()
2851 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { in idpf_tx_start()
2852 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); in idpf_tx_start()
2857 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_start()
2864 * idpf_rx_hash - set the hash value in the skb
2865 * @rxq: Rx descriptor ring packet is being transacted on
2871 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb, in idpf_rx_hash() argument
2877 if (!libeth_rx_pt_has_hash(rxq->netdev, decoded)) in idpf_rx_hash()
2880 hash = le16_to_cpu(rx_desc->hash1) | in idpf_rx_hash()
2881 (rx_desc->ff2_mirrid_hash2.hash2 << 16) | in idpf_rx_hash()
2882 (rx_desc->hash3 << 24); in idpf_rx_hash()
2888 * idpf_rx_csum - Indicate in skb if checksum is good
2889 * @rxq: Rx descriptor ring packet is being transacted on
2894 * skb->protocol must be set before this function is called
2896 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, in idpf_rx_csum() argument
2903 if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) in idpf_rx_csum()
2927 skb->ip_summed = CHECKSUM_UNNECESSARY; in idpf_rx_csum()
2931 skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum)); in idpf_rx_csum()
2932 skb->ip_summed = CHECKSUM_COMPLETE; in idpf_rx_csum()
2937 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_csum()
2938 u64_stats_inc(&rxq->q_stats.hw_csum_err); in idpf_rx_csum()
2939 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_csum()
2943 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2954 qword0 = rx_desc->status_err0_qw0; in idpf_rx_splitq_extract_csum_bits()
2955 qword1 = rx_desc->status_err0_qw1; in idpf_rx_splitq_extract_csum_bits()
2968 le16_get_bits(rx_desc->ptype_err_fflags0, in idpf_rx_splitq_extract_csum_bits()
2970 csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); in idpf_rx_splitq_extract_csum_bits()
2976 * idpf_rx_rsc - Set the RSC fields in the skb
2977 * @rxq : Rx descriptor ring packet is being transacted on
2987 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, in idpf_rx_rsc() argument
2997 return -EINVAL; in idpf_rx_rsc()
2999 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); in idpf_rx_rsc()
3001 return -EINVAL; in idpf_rx_rsc()
3007 return -EINVAL; in idpf_rx_rsc()
3009 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); in idpf_rx_rsc()
3013 NAPI_GRO_CB(skb)->count = rsc_segments; in idpf_rx_rsc()
3014 skb_shinfo(skb)->gso_size = rsc_seg_len; in idpf_rx_rsc()
3017 len = skb->len - skb_transport_offset(skb); in idpf_rx_rsc()
3022 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in idpf_rx_rsc()
3028 tcp_hdr(skb)->check = in idpf_rx_rsc()
3029 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); in idpf_rx_rsc()
3033 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in idpf_rx_rsc()
3035 tcp_hdr(skb)->check = in idpf_rx_rsc()
3036 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); in idpf_rx_rsc()
3041 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_rsc()
3042 u64_stats_inc(&rxq->q_stats.rsc_pkts); in idpf_rx_rsc()
3043 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_rsc()
3049 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3050 * @rxq: Rx descriptor ring packet is being transacted on
3059 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, in idpf_rx_process_skb_fields() argument
3066 rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, in idpf_rx_process_skb_fields()
3068 decoded = rxq->rx_ptype_lkup[rx_ptype]; in idpf_rx_process_skb_fields()
3071 idpf_rx_hash(rxq, skb, rx_desc, decoded); in idpf_rx_process_skb_fields()
3073 skb->protocol = eth_type_trans(skb, rxq->netdev); in idpf_rx_process_skb_fields()
3075 if (le16_get_bits(rx_desc->hdrlen_flags, in idpf_rx_process_skb_fields()
3077 return idpf_rx_rsc(rxq, skb, rx_desc, decoded); in idpf_rx_process_skb_fields()
3080 idpf_rx_csum(rxq, skb, csum_bits, decoded); in idpf_rx_process_skb_fields()
3082 skb_record_rx_queue(skb, rxq->idx); in idpf_rx_process_skb_fields()
3088 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3093 * This function will add the data contained in rx_buf->page to the skb.
3100 u32 hr = rx_buf->page->pp->p.offset; in idpf_rx_add_frag()
3102 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, in idpf_rx_add_frag()
3103 rx_buf->offset + hr, size, rx_buf->truesize); in idpf_rx_add_frag()
3107 * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3115 * the header split is active since it doesn't reserve any head- or tailroom.
3132 dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; in idpf_rx_hsplit_wa()
3133 src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; in idpf_rx_hsplit_wa()
3136 buf->offset += copy; in idpf_rx_hsplit_wa()
3142 * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3151 u32 hr = buf->page->pp->p.offset; in idpf_rx_build_skb()
3155 va = page_address(buf->page) + buf->offset; in idpf_rx_build_skb()
3158 skb = napi_build_skb(va, buf->truesize); in idpf_rx_build_skb()
3171 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3184 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3188 * otherwise return false indicating that this is in fact a non-EOP buffer.
3193 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, in idpf_rx_splitq_is_eop()
3198 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3199 * @rxq: Rx descriptor queue to retrieve receive buffer queue
3209 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) in idpf_rx_splitq_clean() argument
3213 struct sk_buff *skb = rxq->skb; in idpf_rx_splitq_clean()
3214 u16 ntc = rxq->next_to_clean; in idpf_rx_splitq_clean()
3229 rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; in idpf_rx_splitq_clean()
3237 gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3240 if (idpf_queue_has(GEN_CHK, rxq) != gen_id) in idpf_rx_splitq_clean()
3244 rx_desc->rxdid_ucast); in idpf_rx_splitq_clean()
3246 IDPF_RX_BUMP_NTC(rxq, ntc); in idpf_rx_splitq_clean()
3247 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3248 u64_stats_inc(&rxq->q_stats.bad_descs); in idpf_rx_splitq_clean()
3249 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3253 pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3256 bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, in idpf_rx_splitq_clean()
3259 rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); in idpf_rx_splitq_clean()
3260 refillq = rxq_set->refillq[bufq_id]; in idpf_rx_splitq_clean()
3262 /* retrieve buffer from the rxq */ in idpf_rx_splitq_clean()
3263 rx_bufq = &rxq->bufq_sets[bufq_id].bufq; in idpf_rx_splitq_clean()
3265 buf_id = le16_to_cpu(rx_desc->buf_id); in idpf_rx_splitq_clean()
3267 rx_buf = &rx_bufq->buf[buf_id]; in idpf_rx_splitq_clean()
3269 if (!rx_bufq->hdr_pp) in idpf_rx_splitq_clean()
3274 if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT))) in idpf_rx_splitq_clean()
3280 hdr_len = le16_get_bits(rx_desc->hdrlen_flags, in idpf_rx_splitq_clean()
3285 hdr = &rx_bufq->hdr_buf[buf_id]; in idpf_rx_splitq_clean()
3289 pkt_len -= hdr_len; in idpf_rx_splitq_clean()
3291 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3292 u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); in idpf_rx_splitq_clean()
3293 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3301 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3302 u64_stats_inc(&rxq->q_stats.hsplit_pkts); in idpf_rx_splitq_clean()
3303 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3306 hdr->page = NULL; in idpf_rx_splitq_clean()
3322 rx_buf->page = NULL; in idpf_rx_splitq_clean()
3325 IDPF_RX_BUMP_NTC(rxq, ntc); in idpf_rx_splitq_clean()
3338 total_rx_bytes += skb->len; in idpf_rx_splitq_clean()
3341 if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) { in idpf_rx_splitq_clean()
3348 napi_gro_receive(rxq->napi, skb); in idpf_rx_splitq_clean()
3355 rxq->next_to_clean = ntc; in idpf_rx_splitq_clean()
3357 rxq->skb = skb; in idpf_rx_splitq_clean()
3358 u64_stats_update_begin(&rxq->stats_sync); in idpf_rx_splitq_clean()
3359 u64_stats_add(&rxq->q_stats.packets, total_rx_pkts); in idpf_rx_splitq_clean()
3360 u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes); in idpf_rx_splitq_clean()
3361 u64_stats_update_end(&rxq->stats_sync); in idpf_rx_splitq_clean()
3368 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3379 .pp = bufq->pp, in idpf_rx_update_bufq_desc()
3380 .fqes = bufq->buf, in idpf_rx_update_bufq_desc()
3381 .truesize = bufq->truesize, in idpf_rx_update_bufq_desc()
3382 .count = bufq->desc_count, in idpf_rx_update_bufq_desc()
3388 return -ENOMEM; in idpf_rx_update_bufq_desc()
3390 buf_desc->pkt_addr = cpu_to_le64(addr); in idpf_rx_update_bufq_desc()
3391 buf_desc->qword0.buf_id = cpu_to_le16(buf_id); in idpf_rx_update_bufq_desc()
3396 fq.pp = bufq->hdr_pp; in idpf_rx_update_bufq_desc()
3397 fq.fqes = bufq->hdr_buf; in idpf_rx_update_bufq_desc()
3398 fq.truesize = bufq->hdr_truesize; in idpf_rx_update_bufq_desc()
3402 return -ENOMEM; in idpf_rx_update_bufq_desc()
3404 buf_desc->hdr_addr = cpu_to_le64(addr); in idpf_rx_update_bufq_desc()
3410 * idpf_rx_clean_refillq - Clean refill queue buffers
3420 u16 bufq_nta = bufq->next_to_alloc; in idpf_rx_clean_refillq()
3421 u16 ntc = refillq->next_to_clean; in idpf_rx_clean_refillq()
3424 buf_desc = &bufq->split_buf[bufq_nta]; in idpf_rx_clean_refillq()
3427 while (likely(cleaned < refillq->desc_count)) { in idpf_rx_clean_refillq()
3428 u32 buf_id, refill_desc = refillq->ring[ntc]; in idpf_rx_clean_refillq()
3440 if (unlikely(++ntc == refillq->desc_count)) { in idpf_rx_clean_refillq()
3445 if (unlikely(++bufq_nta == bufq->desc_count)) { in idpf_rx_clean_refillq()
3446 buf_desc = &bufq->split_buf[0]; in idpf_rx_clean_refillq()
3462 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + in idpf_rx_clean_refillq()
3463 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) in idpf_rx_clean_refillq()
3468 refillq->next_to_clean = ntc; in idpf_rx_clean_refillq()
3469 bufq->next_to_alloc = bufq_nta; in idpf_rx_clean_refillq()
3473 * idpf_rx_clean_refillq_all - Clean all refill queues
3486 page_pool_nid_changed(bufq->pp, nid); in idpf_rx_clean_refillq_all()
3487 if (bufq->hdr_pp) in idpf_rx_clean_refillq_all()
3488 page_pool_nid_changed(bufq->hdr_pp, nid); in idpf_rx_clean_refillq_all()
3491 for (i = 0; i < bufq_set->num_refillqs; i++) in idpf_rx_clean_refillq_all()
3492 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); in idpf_rx_clean_refillq_all()
3496 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3506 q_vector->total_events++; in idpf_vport_intr_clean_queues()
3507 napi_schedule(&q_vector->napi); in idpf_vport_intr_clean_queues()
3513 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3521 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_del_all()
3522 netif_napi_del(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_del_all()
3526 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3533 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_dis_all()
3534 napi_disable(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_dis_all()
3538 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3545 for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_rel()
3546 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_rel()
3548 kfree(q_vector->complq); in idpf_vport_intr_rel()
3549 q_vector->complq = NULL; in idpf_vport_intr_rel()
3550 kfree(q_vector->bufq); in idpf_vport_intr_rel()
3551 q_vector->bufq = NULL; in idpf_vport_intr_rel()
3552 kfree(q_vector->tx); in idpf_vport_intr_rel()
3553 q_vector->tx = NULL; in idpf_vport_intr_rel()
3554 kfree(q_vector->rx); in idpf_vport_intr_rel()
3555 q_vector->rx = NULL; in idpf_vport_intr_rel()
3557 free_cpumask_var(q_vector->affinity_mask); in idpf_vport_intr_rel()
3560 kfree(vport->q_vectors); in idpf_vport_intr_rel()
3561 vport->q_vectors = NULL; in idpf_vport_intr_rel()
3565 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3570 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_rel_irq()
3573 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_rel_irq()
3574 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_rel_irq()
3581 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_rel_irq()
3582 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_rel_irq()
3591 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3596 struct idpf_q_vector *q_vector = vport->q_vectors; in idpf_vport_intr_dis_irq_all()
3599 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) in idpf_vport_intr_dis_irq_all()
3604 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3618 itr_val = q_vector->intr_reg.dyn_ctl_intena_m | in idpf_vport_intr_buildreg_itr()
3619 (type << q_vector->intr_reg.dyn_ctl_itridx_s) | in idpf_vport_intr_buildreg_itr()
3620 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); in idpf_vport_intr_buildreg_itr()
3626 * idpf_update_dim_sample - Update dim sample with packets and bytes
3640 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); in idpf_update_dim_sample()
3641 dim_sample->comp_ctr = 0; in idpf_update_dim_sample()
3647 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) in idpf_update_dim_sample()
3648 dim->state = DIM_START_MEASURE; in idpf_update_dim_sample()
3652 * idpf_net_dim - Update net DIM algorithm
3658 * This function is a no-op if the queue is not configured to dynamic ITR.
3666 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) in idpf_net_dim()
3669 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { in idpf_net_dim()
3670 struct idpf_tx_queue *txq = q_vector->tx[i]; in idpf_net_dim()
3674 start = u64_stats_fetch_begin(&txq->stats_sync); in idpf_net_dim()
3675 packets += u64_stats_read(&txq->q_stats.packets); in idpf_net_dim()
3676 bytes += u64_stats_read(&txq->q_stats.bytes); in idpf_net_dim()
3677 } while (u64_stats_fetch_retry(&txq->stats_sync, start)); in idpf_net_dim()
3680 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, in idpf_net_dim()
3682 net_dim(&q_vector->tx_dim, dim_sample); in idpf_net_dim()
3685 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) in idpf_net_dim()
3688 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { in idpf_net_dim()
3689 struct idpf_rx_queue *rxq = q_vector->rx[i]; in idpf_net_dim() local
3693 start = u64_stats_fetch_begin(&rxq->stats_sync); in idpf_net_dim()
3694 packets += u64_stats_read(&rxq->q_stats.packets); in idpf_net_dim()
3695 bytes += u64_stats_read(&rxq->q_stats.bytes); in idpf_net_dim()
3696 } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); in idpf_net_dim()
3699 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, in idpf_net_dim()
3701 net_dim(&q_vector->rx_dim, dim_sample); in idpf_net_dim()
3705 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3708 * Update the net_dim() algorithm and re-enable the interrupt associated with
3715 /* net_dim() updates ITR out-of-band using a work item */ in idpf_vport_intr_update_itr_ena_irq()
3718 q_vector->wb_on_itr = false; in idpf_vport_intr_update_itr_ena_irq()
3722 writel(intval, q_vector->intr_reg.dyn_ctl); in idpf_vport_intr_update_itr_ena_irq()
3726 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3731 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_req_irq()
3735 drv_name = dev_driver_string(&adapter->pdev->dev); in idpf_vport_intr_req_irq()
3736 if_name = netdev_name(vport->netdev); in idpf_vport_intr_req_irq()
3738 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_req_irq()
3739 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_req_irq()
3742 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
3743 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_req_irq()
3745 if (q_vector->num_rxq && q_vector->num_txq) in idpf_vport_intr_req_irq()
3747 else if (q_vector->num_rxq) in idpf_vport_intr_req_irq()
3749 else if (q_vector->num_txq) in idpf_vport_intr_req_irq()
3754 name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name, in idpf_vport_intr_req_irq()
3760 netdev_err(vport->netdev, in idpf_vport_intr_req_irq()
3765 irq_set_affinity_hint(irq_num, q_vector->affinity_mask); in idpf_vport_intr_req_irq()
3771 while (--vector >= 0) { in idpf_vport_intr_req_irq()
3772 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
3773 irq_num = adapter->msix_entries[vidx].vector; in idpf_vport_intr_req_irq()
3774 kfree(free_irq(irq_num, &vport->q_vectors[vector])); in idpf_vport_intr_req_irq()
3781 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3790 if (tx && !q_vector->tx) in idpf_vport_intr_write_itr()
3792 else if (!tx && !q_vector->rx) in idpf_vport_intr_write_itr()
3795 intr_reg = &q_vector->intr_reg; in idpf_vport_intr_write_itr()
3797 tx ? intr_reg->tx_itr : intr_reg->rx_itr); in idpf_vport_intr_write_itr()
3801 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3810 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_ena_irq_all()
3811 struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; in idpf_vport_intr_ena_irq_all()
3814 if (qv->num_txq) { in idpf_vport_intr_ena_irq_all()
3815 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); in idpf_vport_intr_ena_irq_all()
3816 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
3818 itr : qv->tx_itr_value, in idpf_vport_intr_ena_irq_all()
3822 if (qv->num_rxq) { in idpf_vport_intr_ena_irq_all()
3823 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); in idpf_vport_intr_ena_irq_all()
3824 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
3826 itr : qv->rx_itr_value, in idpf_vport_intr_ena_irq_all()
3830 if (qv->num_txq || qv->num_rxq) in idpf_vport_intr_ena_irq_all()
3836 * idpf_vport_intr_deinit - Release all vector associations for the vport
3848 * idpf_tx_dim_work - Call back from the stack
3860 vport = q_vector->vport; in idpf_tx_dim_work()
3862 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) in idpf_tx_dim_work()
3863 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; in idpf_tx_dim_work()
3866 itr = vport->tx_itr_profile[dim->profile_ix]; in idpf_tx_dim_work()
3870 dim->state = DIM_START_MEASURE; in idpf_tx_dim_work()
3874 * idpf_rx_dim_work - Call back from the stack
3886 vport = q_vector->vport; in idpf_rx_dim_work()
3888 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) in idpf_rx_dim_work()
3889 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; in idpf_rx_dim_work()
3892 itr = vport->rx_itr_profile[dim->profile_ix]; in idpf_rx_dim_work()
3896 dim->state = DIM_START_MEASURE; in idpf_rx_dim_work()
3900 * idpf_init_dim - Set up dynamic interrupt moderation
3905 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); in idpf_init_dim()
3906 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in idpf_init_dim()
3907 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; in idpf_init_dim()
3909 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); in idpf_init_dim()
3910 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in idpf_init_dim()
3911 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; in idpf_init_dim()
3915 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3922 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_napi_ena_all()
3923 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; in idpf_vport_intr_napi_ena_all()
3926 napi_enable(&q_vector->napi); in idpf_vport_intr_napi_ena_all()
3931 * idpf_tx_splitq_clean_all- Clean completion queues
3941 u16 num_complq = q_vec->num_complq; in idpf_tx_splitq_clean_all()
3951 clean_complete &= idpf_tx_clean_complq(q_vec->complq[i], in idpf_tx_splitq_clean_all()
3958 * idpf_rx_splitq_clean_all- Clean completion queues
3968 u16 num_rxq = q_vec->num_rxq; in idpf_rx_splitq_clean_all()
3979 struct idpf_rx_queue *rxq = q_vec->rx[i]; in idpf_rx_splitq_clean_all() local
3982 pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); in idpf_rx_splitq_clean_all()
3992 for (i = 0; i < q_vec->num_bufq; i++) in idpf_rx_splitq_clean_all()
3993 idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); in idpf_rx_splitq_clean_all()
3999 * idpf_vport_splitq_napi_poll - NAPI handler
4026 work_done = min_t(int, work_done, budget - 1); in idpf_vport_splitq_napi_poll()
4028 /* Exit the polling mode, but don't re-enable interrupts if stack might in idpf_vport_splitq_napi_poll()
4029 * poll us due to busy-polling in idpf_vport_splitq_napi_poll()
4036 /* Switch to poll mode in the tear-down path after sending disable in idpf_vport_splitq_napi_poll()
4040 if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, in idpf_vport_splitq_napi_poll()
4041 q_vector->tx[0]))) in idpf_vport_splitq_napi_poll()
4048 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4055 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_vport_intr_map_vector_to_qs()
4056 u16 num_txq_grp = vport->num_txq_grp; in idpf_vport_intr_map_vector_to_qs()
4061 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { in idpf_vport_intr_map_vector_to_qs()
4064 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4067 rx_qgrp = &vport->rxq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4069 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_vport_intr_map_vector_to_qs()
4071 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_vport_intr_map_vector_to_qs()
4077 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_vport_intr_map_vector_to_qs()
4079 q = rx_qgrp->singleq.rxqs[j]; in idpf_vport_intr_map_vector_to_qs()
4080 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4081 q_index = q->q_vector->num_rxq; in idpf_vport_intr_map_vector_to_qs()
4082 q->q_vector->rx[q_index] = q; in idpf_vport_intr_map_vector_to_qs()
4083 q->q_vector->num_rxq++; in idpf_vport_intr_map_vector_to_qs()
4086 q->napi = &q->q_vector->napi; in idpf_vport_intr_map_vector_to_qs()
4090 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_vport_intr_map_vector_to_qs()
4093 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_vport_intr_map_vector_to_qs()
4094 bufq->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4095 q_index = bufq->q_vector->num_bufq; in idpf_vport_intr_map_vector_to_qs()
4096 bufq->q_vector->bufq[q_index] = bufq; in idpf_vport_intr_map_vector_to_qs()
4097 bufq->q_vector->num_bufq++; in idpf_vport_intr_map_vector_to_qs()
4104 split = idpf_is_queue_model_split(vport->txq_model); in idpf_vport_intr_map_vector_to_qs()
4109 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4112 tx_qgrp = &vport->txq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4113 num_txq = tx_qgrp->num_txq; in idpf_vport_intr_map_vector_to_qs()
4118 q = tx_qgrp->txqs[j]; in idpf_vport_intr_map_vector_to_qs()
4119 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4120 q->q_vector->tx[q->q_vector->num_txq++] = q; in idpf_vport_intr_map_vector_to_qs()
4124 struct idpf_compl_queue *q = tx_qgrp->complq; in idpf_vport_intr_map_vector_to_qs()
4126 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4127 q->q_vector->complq[q->q_vector->num_complq++] = q; in idpf_vport_intr_map_vector_to_qs()
4135 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4142 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_init_vec_idx()
4147 ac = adapter->req_vec_chunks; in idpf_vport_intr_init_vec_idx()
4149 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4150 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; in idpf_vport_intr_init_vec_idx()
4158 return -ENOMEM; in idpf_vport_intr_init_vec_idx()
4160 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); in idpf_vport_intr_init_vec_idx()
4162 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4163 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; in idpf_vport_intr_init_vec_idx()
4171 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4179 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_intr_napi_add_all()
4184 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_napi_add_all()
4185 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_napi_add_all()
4187 netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); in idpf_vport_intr_napi_add_all()
4191 cpumask_set_cpu(v_idx, q_vector->affinity_mask); in idpf_vport_intr_napi_add_all()
4196 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4200 * return -ENOMEM.
4208 vport->q_vectors = kcalloc(vport->num_q_vectors, in idpf_vport_intr_alloc()
4210 if (!vport->q_vectors) in idpf_vport_intr_alloc()
4211 return -ENOMEM; in idpf_vport_intr_alloc()
4213 txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4214 vport->num_q_vectors); in idpf_vport_intr_alloc()
4215 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4216 vport->num_q_vectors); in idpf_vport_intr_alloc()
4217 bufqs_per_vector = vport->num_bufqs_per_qgrp * in idpf_vport_intr_alloc()
4218 DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4219 vport->num_q_vectors); in idpf_vport_intr_alloc()
4220 complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4221 vport->num_q_vectors); in idpf_vport_intr_alloc()
4223 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_alloc()
4224 q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_alloc()
4225 q_vector->vport = vport; in idpf_vport_intr_alloc()
4227 q_vector->tx_itr_value = IDPF_ITR_TX_DEF; in idpf_vport_intr_alloc()
4228 q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; in idpf_vport_intr_alloc()
4229 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; in idpf_vport_intr_alloc()
4231 q_vector->rx_itr_value = IDPF_ITR_RX_DEF; in idpf_vport_intr_alloc()
4232 q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; in idpf_vport_intr_alloc()
4233 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; in idpf_vport_intr_alloc()
4235 if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) in idpf_vport_intr_alloc()
4238 q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), in idpf_vport_intr_alloc()
4240 if (!q_vector->tx) in idpf_vport_intr_alloc()
4243 q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), in idpf_vport_intr_alloc()
4245 if (!q_vector->rx) in idpf_vport_intr_alloc()
4248 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_intr_alloc()
4251 q_vector->bufq = kcalloc(bufqs_per_vector, in idpf_vport_intr_alloc()
4252 sizeof(*q_vector->bufq), in idpf_vport_intr_alloc()
4254 if (!q_vector->bufq) in idpf_vport_intr_alloc()
4257 q_vector->complq = kcalloc(complqs_per_vector, in idpf_vport_intr_alloc()
4258 sizeof(*q_vector->complq), in idpf_vport_intr_alloc()
4260 if (!q_vector->complq) in idpf_vport_intr_alloc()
4269 return -ENOMEM; in idpf_vport_intr_alloc()
4273 * idpf_vport_intr_init - Setup all vectors for the given vport
4289 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); in idpf_vport_intr_init()
4312 * idpf_config_rss - Send virtchnl messages to configure RSS
4329 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4334 struct idpf_adapter *adapter = vport->adapter; in idpf_fill_dflt_rss_lut()
4335 u16 num_active_rxq = vport->num_rxq; in idpf_fill_dflt_rss_lut()
4339 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_fill_dflt_rss_lut()
4341 for (i = 0; i < rss_data->rss_lut_size; i++) { in idpf_fill_dflt_rss_lut()
4342 rss_data->rss_lut[i] = i % num_active_rxq; in idpf_fill_dflt_rss_lut()
4343 rss_data->cached_lut[i] = rss_data->rss_lut[i]; in idpf_fill_dflt_rss_lut()
4348 * idpf_init_rss - Allocate and initialize RSS resources
4355 struct idpf_adapter *adapter = vport->adapter; in idpf_init_rss()
4359 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_init_rss()
4361 lut_size = rss_data->rss_lut_size * sizeof(u32); in idpf_init_rss()
4362 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); in idpf_init_rss()
4363 if (!rss_data->rss_lut) in idpf_init_rss()
4364 return -ENOMEM; in idpf_init_rss()
4366 rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); in idpf_init_rss()
4367 if (!rss_data->cached_lut) { in idpf_init_rss()
4368 kfree(rss_data->rss_lut); in idpf_init_rss()
4369 rss_data->rss_lut = NULL; in idpf_init_rss()
4371 return -ENOMEM; in idpf_init_rss()
4381 * idpf_deinit_rss - Release RSS resources
4386 struct idpf_adapter *adapter = vport->adapter; in idpf_deinit_rss()
4389 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_deinit_rss()
4390 kfree(rss_data->cached_lut); in idpf_deinit_rss()
4391 rss_data->cached_lut = NULL; in idpf_deinit_rss()
4392 kfree(rss_data->rss_lut); in idpf_deinit_rss()
4393 rss_data->rss_lut = NULL; in idpf_deinit_rss()