Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
21 #define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
25 unsigned long csr_base = (unsigned long)ring->doorbell; in fbnic_ring_csr_base()
27 csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1); in fbnic_ring_csr_base()
48 return (ring->head - ring->tail - 1) & ring->size_mask; in fbnic_desc_unused()
53 return (ring->tail - ring->head) & ring->size_mask; in fbnic_desc_used()
59 return netdev_get_tx_queue(dev, ring->q_idx); in txring_txq()
77 struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); in fbnic_tx_sent_queue()
78 unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount; in fbnic_tx_sent_queue()
118 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_tx_offloads()
122 i3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_tx_offloads()
125 skb->csum_offset / 2)); in fbnic_tx_offloads()
139 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) in fbnic_rx_csum()
143 skb->ip_summed = CHECKSUM_UNNECESSARY; in fbnic_rx_csum()
147 skb->ip_summed = CHECKSUM_COMPLETE; in fbnic_rx_csum()
148 skb->csum = (__force __wsum)csum; in fbnic_rx_csum()
155 struct device *dev = skb->dev->dev.parent; in fbnic_tx_map()
156 unsigned int tail = ring->tail, first; in fbnic_tx_map()
162 ring->tx_buf[tail] = skb; in fbnic_tx_map()
165 tail &= ring->size_mask; in fbnic_tx_map()
169 data_len = skb->data_len; in fbnic_tx_map()
174 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in fbnic_tx_map()
176 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in fbnic_tx_map()
177 twd = &ring->desc[tail]; in fbnic_tx_map()
188 tail &= ring->size_mask; in fbnic_tx_map()
194 data_len -= size; in fbnic_tx_map()
204 FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask; in fbnic_tx_map()
206 ring->tail = tail; in fbnic_tx_map()
209 fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); in fbnic_tx_map()
217 writel(tail, ring->doorbell); in fbnic_tx_map()
223 netdev_err(skb->dev, "TX DMA map failed\n"); in fbnic_tx_map()
226 tail--; in fbnic_tx_map()
227 tail &= ring->size_mask; in fbnic_tx_map()
228 twd = &ring->desc[tail]; in fbnic_tx_map()
243 __le64 *meta = &ring->desc[ring->tail]; in fbnic_xmit_frame_ring()
255 desc_needed = skb_shinfo(skb)->nr_frags + 10; in fbnic_xmit_frame_ring()
256 if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed)) in fbnic_xmit_frame_ring()
262 FBNIC_XMIT_CB(skb)->bytecount = skb->len; in fbnic_xmit_frame_ring()
263 FBNIC_XMIT_CB(skb)->desc_count = 0; in fbnic_xmit_frame_ring()
276 u64_stats_update_begin(&ring->stats.syncp); in fbnic_xmit_frame_ring()
277 ring->stats.dropped++; in fbnic_xmit_frame_ring()
278 u64_stats_update_end(&ring->stats.syncp); in fbnic_xmit_frame_ring()
285 unsigned int q_map = skb->queue_mapping; in fbnic_xmit_frame()
287 return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); in fbnic_xmit_frame()
296 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) in fbnic_features_check()
300 l3len = skb_checksum_start(skb) - skb_network_header(skb); in fbnic_features_check()
306 if ((l2len | l3len | skb->csum_offset) % 2 || in fbnic_features_check()
309 !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) in fbnic_features_check()
320 unsigned int head = ring->head; in fbnic_clean_twq0()
324 clean_desc = (hw_head - head) & ring->size_mask; in fbnic_clean_twq0()
327 struct sk_buff *skb = ring->tx_buf[head]; in fbnic_clean_twq0()
330 desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; in fbnic_clean_twq0()
334 ring->tx_buf[head] = NULL; in fbnic_clean_twq0()
336 clean_desc -= desc_cnt; in fbnic_clean_twq0()
338 while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) { in fbnic_clean_twq0()
340 head &= ring->size_mask; in fbnic_clean_twq0()
341 desc_cnt--; in fbnic_clean_twq0()
344 fbnic_unmap_single_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
346 head &= ring->size_mask; in fbnic_clean_twq0()
347 desc_cnt--; in fbnic_clean_twq0()
349 while (desc_cnt--) { in fbnic_clean_twq0()
350 fbnic_unmap_page_twd(nv->dev, &ring->desc[head]); in fbnic_clean_twq0()
352 head &= ring->size_mask; in fbnic_clean_twq0()
355 total_bytes += FBNIC_XMIT_CB(skb)->bytecount; in fbnic_clean_twq0()
364 ring->head = head; in fbnic_clean_twq0()
366 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0()
369 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
370 ring->stats.dropped += total_packets; in fbnic_clean_twq0()
371 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
377 u64_stats_update_begin(&ring->stats.syncp); in fbnic_clean_twq0()
378 ring->stats.bytes += total_bytes; in fbnic_clean_twq0()
379 ring->stats.packets += total_packets; in fbnic_clean_twq0()
380 u64_stats_update_end(&ring->stats.syncp); in fbnic_clean_twq0()
390 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_init()
393 rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; in fbnic_page_pool_init()
394 rx_buf->page = page; in fbnic_page_pool_init()
400 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_get()
402 rx_buf->pagecnt_bias--; in fbnic_page_pool_get()
404 return rx_buf->page; in fbnic_page_pool_get()
410 struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; in fbnic_page_pool_drain()
411 struct page *page = rx_buf->page; in fbnic_page_pool_drain()
413 if (!page_pool_unref_page(page, rx_buf->pagecnt_bias)) in fbnic_page_pool_drain()
414 page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget); in fbnic_page_pool_drain()
416 rx_buf->page = NULL; in fbnic_page_pool_drain()
423 fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); in fbnic_clean_twq()
430 struct fbnic_ring *cmpl = &qt->cmpl; in fbnic_clean_tcq()
432 u32 head = cmpl->head; in fbnic_clean_tcq()
433 s32 head0 = -1; in fbnic_clean_tcq()
435 done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); in fbnic_clean_tcq()
436 raw_tcd = &cmpl->desc[head & cmpl->size_mask]; in fbnic_clean_tcq()
438 /* Walk the completion queue collecting the heads reported by NIC */ in fbnic_clean_tcq()
462 if (!(head & cmpl->size_mask)) { in fbnic_clean_tcq()
464 raw_tcd = &cmpl->desc[0]; in fbnic_clean_tcq()
468 /* Record the current head/tail of the queue */ in fbnic_clean_tcq()
469 if (cmpl->head != head) { in fbnic_clean_tcq()
470 cmpl->head = head; in fbnic_clean_tcq()
471 writel(head & cmpl->size_mask, cmpl->doorbell); in fbnic_clean_tcq()
481 unsigned int head = ring->head; in fbnic_clean_bdq()
490 head &= ring->size_mask; in fbnic_clean_bdq()
493 ring->head = head; in fbnic_clean_bdq()
498 __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT]; in fbnic_bd_prep()
514 } while (--i); in fbnic_bd_prep()
520 unsigned int i = bdq->tail; in fbnic_fill_bdq()
528 page = page_pool_dev_alloc_pages(nv->page_pool); in fbnic_fill_bdq()
536 i &= bdq->size_mask; in fbnic_fill_bdq()
538 count--; in fbnic_fill_bdq()
541 if (bdq->tail != i) { in fbnic_fill_bdq()
542 bdq->tail = i; in fbnic_fill_bdq()
547 writel(i, bdq->doorbell); in fbnic_fill_bdq()
561 return pg_off - FBNIC_RX_HROOM; in fbnic_hdr_pg_start()
571 return ALIGN(pg_off, 128) - FBNIC_RX_HROOM; in fbnic_hdr_pg_end()
575 struct fbnic_pkt_buff *pkt, in fbnic_pkt_prepare() argument
580 struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx); in fbnic_pkt_prepare()
586 WARN_ON_ONCE(pkt->buff.data_hard_start); in fbnic_pkt_prepare()
588 /* Short-cut the end calculation if we know page is fully consumed */ in fbnic_pkt_prepare()
593 headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD; in fbnic_pkt_prepare()
594 frame_sz = hdr_pg_end - hdr_pg_start; in fbnic_pkt_prepare()
595 xdp_init_buff(&pkt->buff, frame_sz, NULL); in fbnic_pkt_prepare()
600 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_pkt_prepare()
607 xdp_prepare_buff(&pkt->buff, hdr_start, headroom, in fbnic_pkt_prepare()
608 len - FBNIC_RX_PAD, true); in fbnic_pkt_prepare()
610 pkt->data_truesize = 0; in fbnic_pkt_prepare()
611 pkt->data_len = 0; in fbnic_pkt_prepare()
612 pkt->nr_frags = 0; in fbnic_pkt_prepare()
616 struct fbnic_pkt_buff *pkt, in fbnic_add_rx_frag() argument
622 struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx); in fbnic_add_rx_frag()
627 FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128); in fbnic_add_rx_frag()
633 dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), in fbnic_add_rx_frag()
637 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_add_rx_frag()
640 pkt->data_truesize += truesize; in fbnic_add_rx_frag()
642 __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); in fbnic_add_rx_frag()
645 pkt->data_len += len; in fbnic_add_rx_frag()
649 struct fbnic_pkt_buff *pkt, int budget) in fbnic_put_pkt_buff() argument
655 if (!pkt->buff.data_hard_start) in fbnic_put_pkt_buff()
658 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_put_pkt_buff()
659 nr_frags = pkt->nr_frags; in fbnic_put_pkt_buff()
661 while (nr_frags--) { in fbnic_put_pkt_buff()
662 page = skb_frag_page(&shinfo->frags[nr_frags]); in fbnic_put_pkt_buff()
663 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
666 page = virt_to_page(pkt->buff.data_hard_start); in fbnic_put_pkt_buff()
667 page_pool_put_full_page(nv->page_pool, page, !!budget); in fbnic_put_pkt_buff()
671 struct fbnic_pkt_buff *pkt) in fbnic_build_skb() argument
673 unsigned int nr_frags = pkt->nr_frags; in fbnic_build_skb()
678 truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM - in fbnic_build_skb()
679 pkt->buff.data_hard_start; in fbnic_build_skb()
682 skb = napi_build_skb(pkt->buff.data_hard_start, truesize); in fbnic_build_skb()
687 skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start); in fbnic_build_skb()
688 __skb_put(skb, pkt->buff.data_end - pkt->buff.data); in fbnic_build_skb()
691 skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta); in fbnic_build_skb()
696 shinfo = xdp_get_shared_info_from_buff(&pkt->buff); in fbnic_build_skb()
699 skb->truesize += pkt->data_truesize; in fbnic_build_skb()
700 skb->data_len += pkt->data_len; in fbnic_build_skb()
701 shinfo->nr_frags = nr_frags; in fbnic_build_skb()
702 skb->len += pkt->data_len; in fbnic_build_skb()
708 skb->protocol = eth_type_trans(skb, nv->napi.dev); in fbnic_build_skb()
724 struct net_device *netdev = nv->napi.dev; in fbnic_populate_skb_fields()
725 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_populate_skb_fields()
729 if (netdev->features & NETIF_F_RXHASH) in fbnic_populate_skb_fields()
734 skb_record_rx_queue(skb, rcq->q_idx); in fbnic_populate_skb_fields()
746 struct fbnic_ring *rcq = &qt->cmpl; in fbnic_clean_rcq()
747 struct fbnic_pkt_buff *pkt; in fbnic_clean_rcq() local
748 s32 head0 = -1, head1 = -1; in fbnic_clean_rcq()
750 u32 head = rcq->head; in fbnic_clean_rcq()
752 done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; in fbnic_clean_rcq()
753 raw_rcd = &rcq->desc[head & rcq->size_mask]; in fbnic_clean_rcq()
754 pkt = rcq->pkt; in fbnic_clean_rcq()
756 /* Walk the completion queue collecting the heads reported by NIC */ in fbnic_clean_rcq()
758 struct sk_buff *skb = ERR_PTR(-EINVAL); in fbnic_clean_rcq()
771 fbnic_pkt_prepare(nv, rcd, pkt, qt); in fbnic_clean_rcq()
776 fbnic_add_rx_frag(nv, rcd, pkt, qt); in fbnic_clean_rcq()
788 skb = fbnic_build_skb(nv, pkt); in fbnic_clean_rcq()
795 bytes += skb->len; in fbnic_clean_rcq()
797 napi_gro_receive(&nv->napi, skb); in fbnic_clean_rcq()
800 fbnic_put_pkt_buff(nv, pkt, 1); in fbnic_clean_rcq()
803 pkt->buff.data_hard_start = NULL; in fbnic_clean_rcq()
810 if (!(head & rcq->size_mask)) { in fbnic_clean_rcq()
812 raw_rcd = &rcq->desc[0]; in fbnic_clean_rcq()
816 u64_stats_update_begin(&rcq->stats.syncp); in fbnic_clean_rcq()
817 rcq->stats.packets += packets; in fbnic_clean_rcq()
818 rcq->stats.bytes += bytes; in fbnic_clean_rcq()
819 /* Re-add ethernet header length (removed in fbnic_build_skb) */ in fbnic_clean_rcq()
820 rcq->stats.bytes += ETH_HLEN * packets; in fbnic_clean_rcq()
821 rcq->stats.dropped += dropped; in fbnic_clean_rcq()
822 u64_stats_update_end(&rcq->stats.syncp); in fbnic_clean_rcq()
826 fbnic_clean_bdq(nv, budget, &qt->sub0, head0); in fbnic_clean_rcq()
827 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_clean_rcq()
830 fbnic_clean_bdq(nv, budget, &qt->sub1, head1); in fbnic_clean_rcq()
831 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_clean_rcq()
833 /* Record the current head/tail of the queue */ in fbnic_clean_rcq()
834 if (rcq->head != head) { in fbnic_clean_rcq()
835 rcq->head = head; in fbnic_clean_rcq()
836 writel(head & rcq->size_mask, rcq->doorbell); in fbnic_clean_rcq()
844 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_disable()
845 u32 v_idx = nv->v_idx; in fbnic_nv_irq_disable()
852 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_rearm()
853 u32 v_idx = nv->v_idx; in fbnic_nv_irq_rearm()
866 for (i = 0; i < nv->txt_count; i++) in fbnic_poll()
867 fbnic_clean_tcq(nv, &nv->qt[i], budget); in fbnic_poll()
869 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_poll()
870 work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget); in fbnic_poll()
885 napi_schedule_irqoff(&nv->napi); in fbnic_msix_clean_rings()
893 struct fbnic_queue_stats *stats = &rxr->stats; in fbnic_aggregate_ring_rx_counters()
896 fbn->rx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_rx_counters()
897 fbn->rx_stats.packets += stats->packets; in fbnic_aggregate_ring_rx_counters()
898 fbn->rx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_rx_counters()
904 struct fbnic_queue_stats *stats = &txr->stats; in fbnic_aggregate_ring_tx_counters()
907 fbn->tx_stats.bytes += stats->bytes; in fbnic_aggregate_ring_tx_counters()
908 fbn->tx_stats.packets += stats->packets; in fbnic_aggregate_ring_tx_counters()
909 fbn->tx_stats.dropped += stats->dropped; in fbnic_aggregate_ring_tx_counters()
915 if (!(txr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_tx_ring()
920 /* Remove pointer to the Tx ring */ in fbnic_remove_tx_ring()
921 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); in fbnic_remove_tx_ring()
922 fbn->tx[txr->q_idx] = NULL; in fbnic_remove_tx_ring()
928 if (!(rxr->flags & FBNIC_RING_F_STATS)) in fbnic_remove_rx_ring()
934 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); in fbnic_remove_rx_ring()
935 fbn->rx[rxr->q_idx] = NULL; in fbnic_remove_rx_ring()
941 struct fbnic_dev *fbd = nv->fbd; in fbnic_free_napi_vector()
942 u32 v_idx = nv->v_idx; in fbnic_free_napi_vector()
945 for (i = 0; i < nv->txt_count; i++) { in fbnic_free_napi_vector()
946 fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
947 fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
950 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_free_napi_vector()
951 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); in fbnic_free_napi_vector()
952 fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); in fbnic_free_napi_vector()
953 fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); in fbnic_free_napi_vector()
957 page_pool_destroy(nv->page_pool); in fbnic_free_napi_vector()
958 netif_napi_del(&nv->napi); in fbnic_free_napi_vector()
959 list_del(&nv->napis); in fbnic_free_napi_vector()
967 list_for_each_entry_safe(nv, temp, &fbn->napis, napis) in fbnic_free_napi_vectors()
973 unsigned char *dev_name = nv->napi.dev->name; in fbnic_name_napi_vector()
975 if (!nv->rxt_count) in fbnic_name_napi_vector()
976 snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name, in fbnic_name_napi_vector()
977 nv->v_idx - FBNIC_NON_NAPI_VECTORS); in fbnic_name_napi_vector()
979 snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name, in fbnic_name_napi_vector()
980 nv->v_idx - FBNIC_NON_NAPI_VECTORS); in fbnic_name_napi_vector()
992 .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count, in fbnic_alloc_nv_page_pool()
994 .dev = nv->dev, in fbnic_alloc_nv_page_pool()
1017 nv->page_pool = pp; in fbnic_alloc_nv_page_pool()
1025 u64_stats_init(&ring->stats.syncp); in fbnic_ring_init()
1026 ring->doorbell = doorbell; in fbnic_ring_init()
1027 ring->q_idx = q_idx; in fbnic_ring_init()
1028 ring->flags = flags; in fbnic_ring_init()
1037 u32 __iomem *uc_addr = fbd->uc_addr0; in fbnic_alloc_napi_vector()
1045 return -EINVAL; in fbnic_alloc_napi_vector()
1049 return -EIO; in fbnic_alloc_napi_vector()
1051 /* Allocate NAPI vector and queue triads */ in fbnic_alloc_napi_vector()
1054 return -ENOMEM; in fbnic_alloc_napi_vector()
1056 /* Record queue triad counts */ in fbnic_alloc_napi_vector()
1057 nv->txt_count = txt_count; in fbnic_alloc_napi_vector()
1058 nv->rxt_count = rxt_count; in fbnic_alloc_napi_vector()
1060 /* Provide pointer back to fbnic and MSI-X vectors */ in fbnic_alloc_napi_vector()
1061 nv->fbd = fbd; in fbnic_alloc_napi_vector()
1062 nv->v_idx = v_idx; in fbnic_alloc_napi_vector()
1065 list_add(&nv->napis, &fbn->napis); in fbnic_alloc_napi_vector()
1066 netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); in fbnic_alloc_napi_vector()
1069 netif_napi_set_irq(&nv->napi, in fbnic_alloc_napi_vector()
1070 pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); in fbnic_alloc_napi_vector()
1073 nv->dev = fbd->dev; in fbnic_alloc_napi_vector()
1087 IRQF_SHARED, nv->name, nv); in fbnic_alloc_napi_vector()
1091 /* Initialize queue triads */ in fbnic_alloc_napi_vector()
1092 qt = nv->qt; in fbnic_alloc_napi_vector()
1095 /* Configure Tx queue */ in fbnic_alloc_napi_vector()
1098 /* Assign Tx queue to netdev if applicable */ in fbnic_alloc_napi_vector()
1102 fbnic_ring_init(&qt->sub0, db, txq_idx, flags); in fbnic_alloc_napi_vector()
1103 fbn->tx[txq_idx] = &qt->sub0; in fbnic_alloc_napi_vector()
1104 txq_count--; in fbnic_alloc_napi_vector()
1106 fbnic_ring_init(&qt->sub0, db, 0, in fbnic_alloc_napi_vector()
1110 /* Configure Tx completion queue */ in fbnic_alloc_napi_vector()
1112 fbnic_ring_init(&qt->cmpl, db, 0, 0); in fbnic_alloc_napi_vector()
1114 /* Update Tx queue index */ in fbnic_alloc_napi_vector()
1115 txt_count--; in fbnic_alloc_napi_vector()
1118 /* Move to next queue triad */ in fbnic_alloc_napi_vector()
1123 /* Configure header queue */ in fbnic_alloc_napi_vector()
1125 fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1127 /* Configure payload queue */ in fbnic_alloc_napi_vector()
1129 fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); in fbnic_alloc_napi_vector()
1131 /* Configure Rx completion queue */ in fbnic_alloc_napi_vector()
1133 fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS); in fbnic_alloc_napi_vector()
1134 fbn->rx[rxq_idx] = &qt->cmpl; in fbnic_alloc_napi_vector()
1136 /* Update Rx queue index */ in fbnic_alloc_napi_vector()
1137 rxt_count--; in fbnic_alloc_napi_vector()
1140 /* Move to next queue triad */ in fbnic_alloc_napi_vector()
1147 page_pool_destroy(nv->page_pool); in fbnic_alloc_napi_vector()
1149 netif_napi_del(&nv->napi); in fbnic_alloc_napi_vector()
1150 list_del(&nv->napis); in fbnic_alloc_napi_vector()
1158 unsigned int num_tx = fbn->num_tx_queues; in fbnic_alloc_napi_vectors()
1159 unsigned int num_rx = fbn->num_rx_queues; in fbnic_alloc_napi_vectors()
1160 unsigned int num_napi = fbn->num_napi; in fbnic_alloc_napi_vectors()
1161 struct fbnic_dev *fbd = fbn->fbd; in fbnic_alloc_napi_vectors()
1164 /* Allocate 1 Tx queue per napi vector */ in fbnic_alloc_napi_vectors()
1174 num_tx--; in fbnic_alloc_napi_vectors()
1181 /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */ in fbnic_alloc_napi_vectors()
1183 int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx); in fbnic_alloc_napi_vectors()
1184 int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx); in fbnic_alloc_napi_vectors()
1192 num_tx -= tqpv; in fbnic_alloc_napi_vectors()
1195 num_rx -= rqpv; in fbnic_alloc_napi_vectors()
1206 return -ENOMEM; in fbnic_alloc_napi_vectors()
1212 kvfree(ring->buffer); in fbnic_free_ring_resources()
1213 ring->buffer = NULL; in fbnic_free_ring_resources()
1216 if (!ring->size) in fbnic_free_ring_resources()
1219 dma_free_coherent(dev, ring->size, ring->desc, ring->dma); in fbnic_free_ring_resources()
1220 ring->size_mask = 0; in fbnic_free_ring_resources()
1221 ring->size = 0; in fbnic_free_ring_resources()
1227 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_desc()
1231 size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096); in fbnic_alloc_tx_ring_desc()
1233 txr->desc = dma_alloc_coherent(dev, size, &txr->dma, in fbnic_alloc_tx_ring_desc()
1235 if (!txr->desc) in fbnic_alloc_tx_ring_desc()
1236 return -ENOMEM; in fbnic_alloc_tx_ring_desc()
1238 /* txq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_tx_ring_desc()
1239 txr->size_mask = fbn->txq_size - 1; in fbnic_alloc_tx_ring_desc()
1240 txr->size = size; in fbnic_alloc_tx_ring_desc()
1247 size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1); in fbnic_alloc_tx_ring_buffer()
1249 txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_tx_ring_buffer()
1251 return txr->tx_buf ? 0 : -ENOMEM; in fbnic_alloc_tx_ring_buffer()
1257 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_ring_resources()
1260 if (txr->flags & FBNIC_RING_F_DISABLED) in fbnic_alloc_tx_ring_resources()
1267 if (!(txr->flags & FBNIC_RING_F_CTX)) in fbnic_alloc_tx_ring_resources()
1284 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_desc()
1285 size_t desc_size = sizeof(*rxr->desc); in fbnic_alloc_rx_ring_desc()
1289 switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) { in fbnic_alloc_rx_ring_desc()
1291 rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1295 rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT; in fbnic_alloc_rx_ring_desc()
1299 rxq_size = fbn->rcq_size; in fbnic_alloc_rx_ring_desc()
1302 return -EINVAL; in fbnic_alloc_rx_ring_desc()
1308 rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma, in fbnic_alloc_rx_ring_desc()
1310 if (!rxr->desc) in fbnic_alloc_rx_ring_desc()
1311 return -ENOMEM; in fbnic_alloc_rx_ring_desc()
1313 /* rxq_size should be a power of 2, so mask is just that -1 */ in fbnic_alloc_rx_ring_desc()
1314 rxr->size_mask = rxq_size - 1; in fbnic_alloc_rx_ring_desc()
1315 rxr->size = size; in fbnic_alloc_rx_ring_desc()
1322 size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1324 if (rxr->flags & FBNIC_RING_F_CTX) in fbnic_alloc_rx_ring_buffer()
1325 size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1); in fbnic_alloc_rx_ring_buffer()
1327 size = sizeof(*rxr->pkt); in fbnic_alloc_rx_ring_buffer()
1329 rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); in fbnic_alloc_rx_ring_buffer()
1331 return rxr->rx_buf ? 0 : -ENOMEM; in fbnic_alloc_rx_ring_buffer()
1337 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_ring_resources()
1358 struct device *dev = fbn->netdev->dev.parent; in fbnic_free_qt_resources()
1360 fbnic_free_ring_resources(dev, &qt->cmpl); in fbnic_free_qt_resources()
1361 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_free_qt_resources()
1362 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_free_qt_resources()
1368 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_tx_qt_resources()
1371 err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1375 err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_tx_qt_resources()
1382 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_tx_qt_resources()
1389 struct device *dev = fbn->netdev->dev.parent; in fbnic_alloc_rx_qt_resources()
1392 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1396 err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1400 err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl); in fbnic_alloc_rx_qt_resources()
1407 fbnic_free_ring_resources(dev, &qt->sub1); in fbnic_alloc_rx_qt_resources()
1409 fbnic_free_ring_resources(dev, &qt->sub0); in fbnic_alloc_rx_qt_resources()
1418 /* Free Tx Resources */ in fbnic_free_nv_resources()
1419 for (i = 0; i < nv->txt_count; i++) in fbnic_free_nv_resources()
1420 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1422 for (j = 0; j < nv->rxt_count; j++, i++) in fbnic_free_nv_resources()
1423 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_free_nv_resources()
1431 /* Allocate Tx Resources */ in fbnic_alloc_nv_resources()
1432 for (i = 0; i < nv->txt_count; i++) { in fbnic_alloc_nv_resources()
1433 err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1439 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_alloc_nv_resources()
1440 err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1448 while (i--) in fbnic_alloc_nv_resources()
1449 fbnic_free_qt_resources(fbn, &nv->qt[i]); in fbnic_alloc_nv_resources()
1457 list_for_each_entry(nv, &fbn->napis, napis) in fbnic_free_resources()
1464 int err = -ENODEV; in fbnic_alloc_resources()
1466 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_alloc_resources()
1475 list_for_each_entry_continue_reverse(nv, &fbn->napis, napis) in fbnic_alloc_resources()
1515 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_napi_disable()
1516 napi_disable(&nv->napi); in fbnic_napi_disable()
1524 struct fbnic_dev *fbd = fbn->fbd; in fbnic_disable()
1528 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_disable()
1529 /* Disable Tx queue triads */ in fbnic_disable()
1530 for (i = 0; i < nv->txt_count; i++) { in fbnic_disable()
1531 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_disable()
1533 fbnic_disable_twq0(&qt->sub0); in fbnic_disable()
1534 fbnic_disable_tcq(&qt->cmpl); in fbnic_disable()
1537 /* Disable Rx queue triads */ in fbnic_disable()
1538 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_disable()
1539 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_disable()
1541 fbnic_disable_bdq(&qt->sub0, &qt->sub1); in fbnic_disable()
1542 fbnic_disable_rcq(&qt->cmpl); in fbnic_disable()
1551 netdev_warn(fbd->netdev, "triggering Tx flush\n"); in fbnic_tx_flush()
1588 netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err); in fbnic_idle_dump()
1591 netdev_err(fbd->netdev, "0x%04x: %08x\n", in fbnic_idle_dump()
1598 static const struct fbnic_idle_regs tx[] = { in fbnic_wait_all_queues_idle() local
1612 false, fbd, tx, ARRAY_SIZE(tx)); in fbnic_wait_all_queues_idle()
1613 if (err == -ETIMEDOUT) { in fbnic_wait_all_queues_idle()
1617 fbd, tx, ARRAY_SIZE(tx)); in fbnic_wait_all_queues_idle()
1621 fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err); in fbnic_wait_all_queues_idle()
1637 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_flush()
1640 /* Flush any processed Tx Queue Triads and drop the rest */ in fbnic_flush()
1641 for (i = 0; i < nv->txt_count; i++) { in fbnic_flush()
1642 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_flush()
1646 fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail); in fbnic_flush()
1648 /* Reset completion queue descriptor ring */ in fbnic_flush()
1649 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
1651 /* Nothing else to do if Tx queue is disabled */ in fbnic_flush()
1652 if (qt->sub0.flags & FBNIC_RING_F_DISABLED) in fbnic_flush()
1655 /* Reset BQL associated with Tx queue */ in fbnic_flush()
1656 tx_queue = netdev_get_tx_queue(nv->napi.dev, in fbnic_flush()
1657 qt->sub0.q_idx); in fbnic_flush()
1660 /* Disassociate Tx queue from NAPI */ in fbnic_flush()
1661 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_flush()
1665 /* Flush any processed Rx Queue Triads and drop the rest */ in fbnic_flush()
1666 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_flush()
1667 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_flush()
1670 fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); in fbnic_flush()
1671 fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail); in fbnic_flush()
1673 /* Reset completion queue descriptor ring */ in fbnic_flush()
1674 memset(qt->cmpl.desc, 0, qt->cmpl.size); in fbnic_flush()
1676 fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); in fbnic_flush()
1677 qt->cmpl.pkt->buff.data_hard_start = NULL; in fbnic_flush()
1679 /* Disassociate Rx queue from NAPI */ in fbnic_flush()
1680 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_flush()
1690 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_fill()
1693 /* Configure NAPI mapping for Tx */ in fbnic_fill()
1694 for (i = 0; i < nv->txt_count; i++) { in fbnic_fill()
1695 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_fill()
1697 /* Nothing to do if Tx queue is disabled */ in fbnic_fill()
1698 if (qt->sub0.flags & FBNIC_RING_F_DISABLED) in fbnic_fill()
1701 /* Associate Tx queue with NAPI */ in fbnic_fill()
1702 netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, in fbnic_fill()
1703 NETDEV_QUEUE_TYPE_TX, &nv->napi); in fbnic_fill()
1709 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_fill()
1710 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_fill()
1712 /* Associate Rx queue with NAPI */ in fbnic_fill()
1713 netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, in fbnic_fill()
1714 NETDEV_QUEUE_TYPE_RX, &nv->napi); in fbnic_fill()
1717 fbnic_fill_bdq(nv, &qt->sub0); in fbnic_fill()
1718 fbnic_fill_bdq(nv, &qt->sub1); in fbnic_fill()
1725 u32 log_size = fls(twq->size_mask); in fbnic_enable_twq0()
1727 if (!twq->size_mask) in fbnic_enable_twq0()
1732 twq->tail = 0; in fbnic_enable_twq0()
1733 twq->head = 0; in fbnic_enable_twq0()
1736 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma)); in fbnic_enable_twq0()
1737 fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma)); in fbnic_enable_twq0()
1748 u32 log_size = fls(tcq->size_mask); in fbnic_enable_tcq()
1750 if (!tcq->size_mask) in fbnic_enable_tcq()
1755 tcq->tail = 0; in fbnic_enable_tcq()
1756 tcq->head = 0; in fbnic_enable_tcq()
1759 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma)); in fbnic_enable_tcq()
1760 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma)); in fbnic_enable_tcq()
1765 /* Store interrupt information for the completion queue */ in fbnic_enable_tcq()
1766 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx); in fbnic_enable_tcq()
1767 fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2); in fbnic_enable_tcq()
1770 /* Enable queue */ in fbnic_enable_tcq()
1781 ppq->tail = 0; in fbnic_enable_bdq()
1782 ppq->head = 0; in fbnic_enable_bdq()
1783 hpq->tail = 0; in fbnic_enable_bdq()
1784 hpq->head = 0; in fbnic_enable_bdq()
1786 log_size = fls(hpq->size_mask); in fbnic_enable_bdq()
1789 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma)); in fbnic_enable_bdq()
1790 fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma)); in fbnic_enable_bdq()
1795 if (!ppq->size_mask) in fbnic_enable_bdq()
1798 log_size = fls(ppq->size_mask); in fbnic_enable_bdq()
1804 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma)); in fbnic_enable_bdq()
1805 fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma)); in fbnic_enable_bdq()
1830 u32 log_size = fls(rcq->size_mask); in fbnic_enable_rcq()
1846 rcq->head = 0; in fbnic_enable_rcq()
1847 rcq->tail = 0; in fbnic_enable_rcq()
1850 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma)); in fbnic_enable_rcq()
1851 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma)); in fbnic_enable_rcq()
1856 /* Store interrupt information for the completion queue */ in fbnic_enable_rcq()
1857 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx); in fbnic_enable_rcq()
1858 fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2); in fbnic_enable_rcq()
1861 /* Enable queue */ in fbnic_enable_rcq()
1867 struct fbnic_dev *fbd = fbn->fbd; in fbnic_enable()
1871 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_enable()
1872 /* Setup Tx Queue Triads */ in fbnic_enable()
1873 for (i = 0; i < nv->txt_count; i++) { in fbnic_enable()
1874 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_enable()
1876 fbnic_enable_twq0(&qt->sub0); in fbnic_enable()
1877 fbnic_enable_tcq(nv, &qt->cmpl); in fbnic_enable()
1880 /* Setup Rx Queue Triads */ in fbnic_enable()
1881 for (j = 0; j < nv->rxt_count; j++, i++) { in fbnic_enable()
1882 struct fbnic_q_triad *qt = &nv->qt[i]; in fbnic_enable()
1884 fbnic_enable_bdq(&qt->sub0, &qt->sub1); in fbnic_enable()
1885 fbnic_config_drop_mode_rcq(nv, &qt->cmpl); in fbnic_enable()
1886 fbnic_enable_rcq(nv, &qt->cmpl); in fbnic_enable()
1895 struct fbnic_dev *fbd = nv->fbd; in fbnic_nv_irq_enable()
1900 fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); in fbnic_nv_irq_enable()
1906 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_enable()
1910 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_napi_enable()
1911 napi_enable(&nv->napi); in fbnic_napi_enable()
1918 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_enable()
1938 struct fbnic_dev *fbd = fbn->fbd; in fbnic_napi_depletion_check()
1942 list_for_each_entry(nv, &fbn->napis, napis) { in fbnic_napi_depletion_check()
1944 for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) { in fbnic_napi_depletion_check()
1948 if (fbnic_desc_used(&nv->qt[i].sub0) < 4 || in fbnic_napi_depletion_check()
1949 fbnic_desc_used(&nv->qt[i].sub1) < 4) in fbnic_napi_depletion_check()
1950 irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); in fbnic_napi_depletion_check()