Lines Matching +full:pa +full:- +full:stats

1 // SPDX-License-Identifier: GPL-2.0
22 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
38 u64 incr = (u64)(cq->cq_idx) << 32; in otx2_nix_cq_op_status()
41 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); in otx2_nix_cq_op_status()
45 dev_err(pfvf->dev, "CQ stopped due to error"); in otx2_nix_cq_op_status()
46 return -EINVAL; in otx2_nix_cq_op_status()
49 cq->cq_tail = status & 0xFFFFF; in otx2_nix_cq_op_status()
50 cq->cq_head = (status >> 20) & 0xFFFFF; in otx2_nix_cq_op_status()
51 if (cq->cq_tail < cq->cq_head) in otx2_nix_cq_op_status()
52 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + in otx2_nix_cq_op_status()
53 cq->cq_tail; in otx2_nix_cq_op_status()
55 cq->pend_cqe = cq->cq_tail - cq->cq_head; in otx2_nix_cq_op_status()
64 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); in otx2_get_next_cqe()
65 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) in otx2_get_next_cqe()
68 cq->cq_head++; in otx2_get_next_cqe()
69 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_get_next_cqe()
77 return (i & ~3) + 3 - (i & 3); in frag_num()
90 /* First segment is always skb->data */ in otx2_dma_map_skb_frag()
92 page = virt_to_page(skb->data); in otx2_dma_map_skb_frag()
93 offset = offset_in_page(skb->data); in otx2_dma_map_skb_frag()
96 frag = &skb_shinfo(skb)->frags[seg - 1]; in otx2_dma_map_skb_frag()
108 for (seg = 0; seg < sg->num_segs; seg++) { in otx2_dma_unmap_skb_frags()
109 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], in otx2_dma_unmap_skb_frags()
110 sg->size[seg], DMA_TO_DEVICE); in otx2_dma_unmap_skb_frags()
112 sg->num_segs = 0; in otx2_dma_unmap_skb_frags()
119 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_xdp_snd_pkt_handler()
122 u64 pa; in otx2_xdp_snd_pkt_handler() local
124 sg = &sq->sg[snd_comp->sqe_id]; in otx2_xdp_snd_pkt_handler()
126 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); in otx2_xdp_snd_pkt_handler()
127 otx2_dma_unmap_page(pfvf, sg->dma_addr[0], in otx2_xdp_snd_pkt_handler()
128 sg->size[0], DMA_TO_DEVICE); in otx2_xdp_snd_pkt_handler()
129 page = virt_to_page(phys_to_virt(pa)); in otx2_xdp_snd_pkt_handler()
139 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler()
146 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) in otx2_snd_pkt_handler()
148 pfvf->netdev->name, cq->cint_idx, in otx2_snd_pkt_handler()
149 snd_comp->status); in otx2_snd_pkt_handler()
151 sg = &sq->sg[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
152 skb = (struct sk_buff *)sg->skb; in otx2_snd_pkt_handler()
156 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { in otx2_snd_pkt_handler()
157 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; in otx2_snd_pkt_handler()
159 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); in otx2_snd_pkt_handler()
169 *tx_bytes += skb->len; in otx2_snd_pkt_handler()
173 sg->skb = (u64)NULL; in otx2_snd_pkt_handler()
182 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) in otx2_set_rxtstamp()
185 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); in otx2_set_rxtstamp()
191 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); in otx2_set_rxtstamp()
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); in otx2_skb_add_frag()
204 if (likely(!skb_shinfo(skb)->nr_frags)) { in otx2_skb_add_frag()
210 if (parse->laptr) { in otx2_skb_add_frag()
217 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { in otx2_skb_add_frag()
218 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in otx2_skb_add_frag()
219 va - page_address(page) + off, in otx2_skb_add_frag()
220 len - off, pfvf->rbsize); in otx2_skb_add_frag()
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); in otx2_skb_add_frag()
239 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) in otx2_set_rxhash()
242 rss = &pfvf->hw.rss_info; in otx2_set_rxhash()
243 if (rss->flowkey_cfg) { in otx2_set_rxhash()
244 if (rss->flowkey_cfg & in otx2_set_rxhash()
249 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
257 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg()
263 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg()
266 seg_addr = &sg->seg_addr; in otx2_free_rcv_seg()
267 for (seg = 0; seg < sg->segs; seg++, seg_addr++) in otx2_free_rcv_seg()
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx, in otx2_free_rcv_seg()
277 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; in otx2_check_rcv_errors() local
278 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_check_rcv_errors()
281 netdev_err(pfvf->netdev, in otx2_check_rcv_errors()
283 qidx, parse->errlev, parse->errcode); in otx2_check_rcv_errors()
285 if (parse->errlev == NPC_ERRLVL_RE) { in otx2_check_rcv_errors()
286 switch (parse->errcode) { in otx2_check_rcv_errors()
289 atomic_inc(&stats->rx_fcs_errs); in otx2_check_rcv_errors()
292 atomic_inc(&stats->rx_undersize_errs); in otx2_check_rcv_errors()
295 atomic_inc(&stats->rx_oversize_errs); in otx2_check_rcv_errors()
298 atomic_inc(&stats->rx_len_errs); in otx2_check_rcv_errors()
301 atomic_inc(&stats->rx_other_errs); in otx2_check_rcv_errors()
304 } else if (parse->errlev == NPC_ERRLVL_NIX) { in otx2_check_rcv_errors()
305 switch (parse->errcode) { in otx2_check_rcv_errors()
310 atomic_inc(&stats->rx_len_errs); in otx2_check_rcv_errors()
314 atomic_inc(&stats->rx_csum_errs); in otx2_check_rcv_errors()
317 atomic_inc(&stats->rx_other_errs); in otx2_check_rcv_errors()
321 atomic_inc(&stats->rx_other_errs); in otx2_check_rcv_errors()
329 if (pfvf->netdev->features & NETIF_F_RXALL) in otx2_check_rcv_errors()
333 if (cqe->sg.segs) in otx2_check_rcv_errors()
343 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_rcv_pkt_handler()
344 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_rcv_pkt_handler()
351 if (unlikely(parse->errlev || parse->errcode)) { in otx2_rcv_pkt_handler()
352 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) in otx2_rcv_pkt_handler()
356 if (pfvf->xdp_prog) in otx2_rcv_pkt_handler()
357 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) in otx2_rcv_pkt_handler()
365 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_rcv_pkt_handler()
368 seg_addr = &sg->seg_addr; in otx2_rcv_pkt_handler()
370 for (seg = 0; seg < sg->segs; seg++, seg_addr++) { in otx2_rcv_pkt_handler()
372 seg_size[seg], parse, cq->cq_idx)) in otx2_rcv_pkt_handler()
373 cq->pool_ptrs++; in otx2_rcv_pkt_handler()
379 skb_record_rx_queue(skb, cq->cq_idx); in otx2_rcv_pkt_handler()
380 if (pfvf->netdev->features & NETIF_F_RXCSUM) in otx2_rcv_pkt_handler()
381 skb->ip_summed = CHECKSUM_UNNECESSARY; in otx2_rcv_pkt_handler()
383 if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) in otx2_rcv_pkt_handler()
384 skb->mark = parse->match_id; in otx2_rcv_pkt_handler()
399 if (cq->pend_cqe >= budget) in otx2_rx_napi_handler()
402 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_rx_napi_handler()
406 while (likely(processed_cqe < budget) && cq->pend_cqe) { in otx2_rx_napi_handler()
407 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); in otx2_rx_napi_handler()
408 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || in otx2_rx_napi_handler()
409 !cqe->sg.seg_addr) { in otx2_rx_napi_handler()
414 cq->cq_head++; in otx2_rx_napi_handler()
415 cq->cq_head &= (cq->cqe_cnt - 1); in otx2_rx_napi_handler()
419 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; in otx2_rx_napi_handler()
420 cqe->sg.seg_addr = 0x00; in otx2_rx_napi_handler()
422 cq->pend_cqe--; in otx2_rx_napi_handler()
429 ((u64)cq->cq_idx << 32) | processed_cqe); in otx2_rx_napi_handler()
437 int cnt = cq->pool_ptrs; in otx2_refill_pool_ptrs()
440 while (cq->pool_ptrs) { in otx2_refill_pool_ptrs()
443 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); in otx2_refill_pool_ptrs()
444 cq->pool_ptrs--; in otx2_refill_pool_ptrs()
447 return cnt - cq->pool_ptrs; in otx2_refill_pool_ptrs()
458 if (cq->pend_cqe >= budget) in otx2_tx_napi_handler()
461 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_tx_napi_handler()
465 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
466 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
468 while (likely(processed_cqe < budget) && cq->pend_cqe) { in otx2_tx_napi_handler()
476 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
478 if (cq->cq_type == CQ_XDP) in otx2_tx_napi_handler()
481 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], in otx2_tx_napi_handler()
484 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; in otx2_tx_napi_handler()
486 cq->pend_cqe--; in otx2_tx_napi_handler()
488 sq->cons_head++; in otx2_tx_napi_handler()
489 sq->cons_head &= (sq->sqe_cnt - 1); in otx2_tx_napi_handler()
494 ((u64)cq->cq_idx << 32) | processed_cqe); in otx2_tx_napi_handler()
499 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
501 if (qidx >= pfvf->hw.tx_queues) in otx2_tx_napi_handler()
502 qidx -= pfvf->hw.xdp_queues; in otx2_tx_napi_handler()
503 txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_tx_napi_handler()
508 netif_carrier_ok(pfvf->netdev)) in otx2_tx_napi_handler()
526 dim_update_sample(pfvf->napi_events, in otx2_adjust_adaptive_coalese()
530 net_dim(&cq_poll->dim, dim_sample); in otx2_adjust_adaptive_coalese()
541 int filled_cnt = -1; in otx2_napi_handler()
544 pfvf = (struct otx2_nic *)cq_poll->dev; in otx2_napi_handler()
545 qset = &pfvf->qset; in otx2_napi_handler()
548 cq_idx = cq_poll->cq_ids[i]; in otx2_napi_handler()
551 cq = &qset->cq[cq_idx]; in otx2_napi_handler()
552 if (cq->cq_type == CQ_RX) { in otx2_napi_handler()
561 if (rx_cq && rx_cq->pool_ptrs) in otx2_napi_handler()
562 filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); in otx2_napi_handler()
564 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); in otx2_napi_handler()
567 /* If interface is going down, don't re-enable IRQ */ in otx2_napi_handler()
568 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) in otx2_napi_handler()
572 if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) in otx2_napi_handler()
579 work = &pfvf->refill_wrk[cq->cq_idx]; in otx2_napi_handler()
580 dwork = &work->pool_refill_work; in otx2_napi_handler()
582 if (!cq->refill_task_sched) { in otx2_napi_handler()
583 work->napi = napi; in otx2_napi_handler()
584 cq->refill_task_sched = true; in otx2_napi_handler()
589 /* Re-enable interrupts */ in otx2_napi_handler()
591 NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), in otx2_napi_handler()
607 memcpy(sq->lmt_addr, sq->sqe_base, size); in otx2_sqe_flush()
608 status = otx2_lmt_flush(sq->io_addr); in otx2_sqe_flush()
611 sq->head++; in otx2_sqe_flush()
612 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush()
625 sq->sg[sq->head].num_segs = 0; in otx2_sqe_add_sg()
629 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); in otx2_sqe_add_sg()
630 sg->ld_type = NIX_SEND_LDTYPE_LDD; in otx2_sqe_add_sg()
631 sg->subdc = NIX_SUBDC_SG; in otx2_sqe_add_sg()
632 sg->segs = 0; in otx2_sqe_add_sg()
636 * So if sg->segs is whether 2 or 3, offset += 16bytes. in otx2_sqe_add_sg()
638 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) in otx2_sqe_add_sg()
644 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_sqe_add_sg()
648 sg->segs++; in otx2_sqe_add_sg()
652 sq->sg[sq->head].dma_addr[seg] = dma_addr; in otx2_sqe_add_sg()
653 sq->sg[sq->head].size[seg] = len; in otx2_sqe_add_sg()
654 sq->sg[sq->head].num_segs++; in otx2_sqe_add_sg()
657 sq->sg[sq->head].skb = (u64)skb; in otx2_sqe_add_sg()
667 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); in otx2_sqe_add_ext()
668 ext->subdc = NIX_SUBDC_EXT; in otx2_sqe_add_ext()
669 if (skb_shinfo(skb)->gso_size) { in otx2_sqe_add_ext()
670 ext->lso = 1; in otx2_sqe_add_ext()
671 ext->lso_sb = skb_tcp_all_headers(skb); in otx2_sqe_add_ext()
672 ext->lso_mps = skb_shinfo(skb)->gso_size; in otx2_sqe_add_ext()
675 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { in otx2_sqe_add_ext()
676 ext->lso_format = pfvf->hw.lso_tsov4_idx; in otx2_sqe_add_ext()
678 /* HW adds payload size to 'ip_hdr->tot_len' while in otx2_sqe_add_ext()
682 ip_hdr(skb)->tot_len = in otx2_sqe_add_ext()
683 htons(ext->lso_sb - skb_network_offset(skb)); in otx2_sqe_add_ext()
684 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { in otx2_sqe_add_ext()
685 ext->lso_format = pfvf->hw.lso_tsov6_idx; in otx2_sqe_add_ext()
686 ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); in otx2_sqe_add_ext()
687 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in otx2_sqe_add_ext()
692 ext->lso_sb = skb_transport_offset(skb) + in otx2_sqe_add_ext()
699 iplen = htons(ext->lso_sb - skb_network_offset(skb)); in otx2_sqe_add_ext()
701 ip_hdr(skb)->tot_len = iplen; in otx2_sqe_add_ext()
702 ext->lso_format = pfvf->hw.lso_udpv4_idx; in otx2_sqe_add_ext()
704 ipv6_hdr(skb)->payload_len = iplen; in otx2_sqe_add_ext()
705 ext->lso_format = pfvf->hw.lso_udpv6_idx; in otx2_sqe_add_ext()
708 udph->len = htons(sizeof(struct udphdr)); in otx2_sqe_add_ext()
710 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in otx2_sqe_add_ext()
711 ext->tstmp = 1; in otx2_sqe_add_ext()
714 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) in otx2_sqe_add_ext()
716 if (skb->vlan_proto == htons(ETH_P_8021Q)) { in otx2_sqe_add_ext()
717 ext->vlan1_ins_ena = 1; in otx2_sqe_add_ext()
718 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; in otx2_sqe_add_ext()
719 ext->vlan1_ins_tci = skb_vlan_tag_get(skb); in otx2_sqe_add_ext()
720 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { in otx2_sqe_add_ext()
721 ext->vlan0_ins_ena = 1; in otx2_sqe_add_ext()
722 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; in otx2_sqe_add_ext()
723 ext->vlan0_ins_tci = skb_vlan_tag_get(skb); in otx2_sqe_add_ext()
736 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); in otx2_sqe_add_mem()
737 mem->subdc = NIX_SUBDC_MEM; in otx2_sqe_add_mem()
738 mem->alg = alg; in otx2_sqe_add_mem()
739 mem->wmem = 1; /* wait for the memory operation */ in otx2_sqe_add_mem()
740 mem->addr = iova; in otx2_sqe_add_mem()
743 mem->start_offset = ptp_offset; in otx2_sqe_add_mem()
744 mem->udp_csum_crt = !!udp_csum_crt; in otx2_sqe_add_mem()
745 mem->base_ns = base_ns; in otx2_sqe_add_mem()
746 mem->step_type = 1; in otx2_sqe_add_mem()
762 if (!sqe_hdr->total) { in otx2_sqe_add_hdr()
764 sqe_hdr->df = 1; in otx2_sqe_add_hdr()
765 sqe_hdr->aura = sq->aura_id; in otx2_sqe_add_hdr()
767 sqe_hdr->pnc = 1; in otx2_sqe_add_hdr()
768 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ? in otx2_sqe_add_hdr()
769 qidx + pfvf->hw.xdp_queues : qidx; in otx2_sqe_add_hdr()
771 sqe_hdr->total = skb->len; in otx2_sqe_add_hdr()
773 sqe_hdr->sqe_id = sq->head; in otx2_sqe_add_hdr()
776 if (skb->ip_summed == CHECKSUM_PARTIAL) { in otx2_sqe_add_hdr()
777 sqe_hdr->ol3ptr = skb_network_offset(skb); in otx2_sqe_add_hdr()
778 sqe_hdr->ol4ptr = skb_transport_offset(skb); in otx2_sqe_add_hdr()
780 if (eth_type_vlan(skb->protocol)) in otx2_sqe_add_hdr()
781 skb->protocol = vlan_get_protocol(skb); in otx2_sqe_add_hdr()
783 if (skb->protocol == htons(ETH_P_IP)) { in otx2_sqe_add_hdr()
784 proto = ip_hdr(skb)->protocol; in otx2_sqe_add_hdr()
788 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; in otx2_sqe_add_hdr()
789 } else if (skb->protocol == htons(ETH_P_IPV6)) { in otx2_sqe_add_hdr()
790 proto = ipv6_hdr(skb)->nexthdr; in otx2_sqe_add_hdr()
791 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6; in otx2_sqe_add_hdr()
795 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; in otx2_sqe_add_hdr()
797 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; in otx2_sqe_add_hdr()
805 int num_segs = skb_shinfo(skb)->nr_frags + 1; in otx2_dma_map_tso_skb()
806 struct sg_list *sg = &sq->sg[sqe]; in otx2_dma_map_tso_skb()
810 sg->num_segs = 0; in otx2_dma_map_tso_skb()
812 /* Get payload length at skb->data */ in otx2_dma_map_tso_skb()
813 len = skb_headlen(skb) - hdr_len; in otx2_dma_map_tso_skb()
816 /* Skip skb->data, if there is no payload */ in otx2_dma_map_tso_skb()
820 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_dma_map_tso_skb()
824 sg->dma_addr[sg->num_segs] = dma_addr; in otx2_dma_map_tso_skb()
825 sg->size[sg->num_segs] = len; in otx2_dma_map_tso_skb()
826 sg->num_segs++; in otx2_dma_map_tso_skb()
831 return -EINVAL; in otx2_dma_map_tso_skb()
838 struct sg_list *sg = &sq->sg[sqe]; in otx2_tso_frag_dma_addr()
843 return sg->dma_addr[0] + (seg_addr - (u64)skb->data); in otx2_tso_frag_dma_addr()
845 frag = &skb_shinfo(skb)->frags[seg]; in otx2_tso_frag_dma_addr()
846 offset = seg_addr - (u64)skb_frag_address(frag); in otx2_tso_frag_dma_addr()
847 if (skb_headlen(skb) - hdr_len) in otx2_tso_frag_dma_addr()
849 return sg->dma_addr[seg] + offset; in otx2_tso_frag_dma_addr()
861 for (seg = 0; seg < list->num_segs; seg++) { in otx2_sqe_tso_add_sg()
863 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); in otx2_sqe_tso_add_sg()
864 sg->ld_type = NIX_SEND_LDTYPE_LDD; in otx2_sqe_tso_add_sg()
865 sg->subdc = NIX_SUBDC_SG; in otx2_sqe_tso_add_sg()
866 sg->segs = 0; in otx2_sqe_tso_add_sg()
870 * So if sg->segs is whether 2 or 3, offset += 16bytes. in otx2_sqe_tso_add_sg()
872 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) in otx2_sqe_tso_add_sg()
877 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; in otx2_sqe_tso_add_sg()
878 *iova++ = list->dma_addr[seg]; in otx2_sqe_tso_add_sg()
879 sg->segs++; in otx2_sqe_tso_add_sg()
886 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_sq_append_tso()
889 int first_sqe = sq->head; in otx2_sq_append_tso()
903 netdev_tx_sent_queue(txq, skb->len); in otx2_sq_append_tso()
905 tcp_data = skb->len - hdr_len; in otx2_sq_append_tso()
909 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); in otx2_sq_append_tso()
910 tcp_data -= seg_len; in otx2_sq_append_tso()
913 memset(sq->sqe_base, 0, sq->sqe_size); in otx2_sq_append_tso()
914 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); in otx2_sq_append_tso()
919 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); in otx2_sq_append_tso()
922 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); in otx2_sq_append_tso()
936 tso.next_frag_idx - 1, in otx2_sq_append_tso()
941 seg_len -= size; in otx2_sq_append_tso()
944 sqe_hdr->total = pkt_len; in otx2_sq_append_tso()
954 sqe_hdr->pnc = 1; in otx2_sq_append_tso()
955 sqe_hdr->sqe_id = first_sqe; in otx2_sq_append_tso()
956 sq->sg[first_sqe].skb = (u64)skb; in otx2_sq_append_tso()
958 sqe_hdr->pnc = 0; in otx2_sq_append_tso()
961 sqe_hdr->sizem1 = (offset / 16) - 1; in otx2_sq_append_tso()
964 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_tso()
973 if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) in is_hw_tso_supported()
977 if (!is_96xx_B0(pfvf->pdev)) in is_hw_tso_supported()
985 payload_len = skb->len - skb_tcp_all_headers(skb); in is_hw_tso_supported()
986 last_seg_size = payload_len % skb_shinfo(skb)->gso_size; in is_hw_tso_supported()
995 if (!skb_shinfo(skb)->gso_size) in otx2_get_sqe_count()
1003 return skb_shinfo(skb)->gso_segs; in otx2_get_sqe_count()
1008 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) || in otx2_validate_network_transport()
1009 (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) { in otx2_validate_network_transport()
1012 if (udph->source == htons(PTP_PORT) && in otx2_validate_network_transport()
1013 udph->dest == htons(PTP_PORT)) in otx2_validate_network_transport()
1022 struct ethhdr *eth = (struct ethhdr *)(skb->data); in otx2_ptp_is_sync()
1025 u8 *data = skb->data, *msgtype; in otx2_ptp_is_sync()
1026 __be16 proto = eth->h_proto; in otx2_ptp_is_sync()
1033 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX && in otx2_ptp_is_sync()
1034 skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) { in otx2_ptp_is_sync()
1035 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in otx2_ptp_is_sync()
1037 proto = __vlan_get_protocol(skb, eth->h_proto, NULL); in otx2_ptp_is_sync()
1044 } else if (skb->vlan_proto == htons(ETH_P_8021Q)) { in otx2_ptp_is_sync()
1047 } else if (eth_type_vlan(eth->h_proto)) { in otx2_ptp_is_sync()
1048 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); in otx2_ptp_is_sync()
1084 struct ethhdr *eth = (struct ethhdr *)(skb->data); in otx2_set_txtstamp()
1093 if (unlikely(!skb_shinfo(skb)->gso_size && in otx2_set_txtstamp()
1094 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { in otx2_set_txtstamp()
1095 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC && in otx2_set_txtstamp()
1098 ((u8 *)skb->data + ptp_offset + in otx2_set_txtstamp()
1100 ts = ns_to_timespec64(pfvf->ptp->tstamp); in otx2_set_txtstamp()
1101 origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); in otx2_set_txtstamp()
1102 origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); in otx2_set_txtstamp()
1103 origin_tstamp->nanoseconds = htonl(ts.tv_nsec); in otx2_set_txtstamp()
1114 if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) { in otx2_set_txtstamp()
1116 uh->check = 0; in otx2_set_txtstamp()
1117 skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff, in otx2_set_txtstamp()
1119 if (ntohs(eth->h_proto) == ETH_P_IPV6) in otx2_set_txtstamp()
1120 uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in otx2_set_txtstamp()
1121 &ipv6_hdr(skb)->daddr, in otx2_set_txtstamp()
1122 skb->len - udphoff, in otx2_set_txtstamp()
1123 ipv6_hdr(skb)->nexthdr, in otx2_set_txtstamp()
1126 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, in otx2_set_txtstamp()
1127 ip_hdr(skb)->daddr, in otx2_set_txtstamp()
1128 skb->len - udphoff, in otx2_set_txtstamp()
1134 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in otx2_set_txtstamp()
1136 iova = sq->timestamps->iova + (sq->head * sizeof(u64)); in otx2_set_txtstamp()
1138 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt); in otx2_set_txtstamp()
1155 free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); in otx2_sq_append_skb()
1156 if (free_desc < sq->sqe_thresh) in otx2_sq_append_skb()
1162 num_segs = skb_shinfo(skb)->nr_frags + 1; in otx2_sq_append_skb()
1172 num_segs = skb_shinfo(skb)->nr_frags + 1; in otx2_sq_append_skb()
1175 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { in otx2_sq_append_skb()
1189 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); in otx2_sq_append_skb()
1190 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); in otx2_sq_append_skb()
1199 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); in otx2_sq_append_skb()
1205 sqe_hdr->sizem1 = (offset / 16) - 1; in otx2_sq_append_skb()
1207 netdev_tx_sent_queue(txq, skb->len); in otx2_sq_append_skb()
1210 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_skb()
1224 if (pfvf->xdp_prog) in otx2_cleanup_rx_cqes()
1225 xdp_rxq_info_unreg(&cq->xdp_rxq); in otx2_cleanup_rx_cqes()
1227 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_rx_cqes()
1231 pool = &pfvf->qset.pool[pool_id]; in otx2_cleanup_rx_cqes()
1233 while (cq->pend_cqe) { in otx2_cleanup_rx_cqes()
1236 cq->pend_cqe--; in otx2_cleanup_rx_cqes()
1240 if (cqe->sg.segs > 1) { in otx2_cleanup_rx_cqes()
1241 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); in otx2_cleanup_rx_cqes()
1244 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; in otx2_cleanup_rx_cqes()
1246 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); in otx2_cleanup_rx_cqes()
1251 ((u64)cq->cq_idx << 32) | processed_cqe); in otx2_cleanup_rx_cqes()
1265 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_cleanup_tx_cqes()
1266 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes()
1268 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_tx_cqes()
1271 while (cq->pend_cqe) { in otx2_cleanup_tx_cqes()
1274 cq->pend_cqe--; in otx2_cleanup_tx_cqes()
1278 sg = &sq->sg[cqe->comp.sqe_id]; in otx2_cleanup_tx_cqes()
1279 skb = (struct sk_buff *)sg->skb; in otx2_cleanup_tx_cqes()
1281 tx_bytes += skb->len; in otx2_cleanup_tx_cqes()
1285 sg->skb = (u64)NULL; in otx2_cleanup_tx_cqes()
1290 if (qidx >= pfvf->hw.tx_queues) in otx2_cleanup_tx_cqes()
1291 qidx -= pfvf->hw.xdp_queues; in otx2_cleanup_tx_cqes()
1292 txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_cleanup_tx_cqes()
1297 ((u64)cq->cq_idx << 32) | processed_cqe); in otx2_cleanup_tx_cqes()
1305 mutex_lock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1307 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); in otx2_rxtx_enable()
1309 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); in otx2_rxtx_enable()
1312 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1313 return -ENOMEM; in otx2_rxtx_enable()
1316 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rxtx_enable()
1317 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1330 for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { in otx2_free_pending_sqe()
1331 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe()
1332 for (sqe = 0; sqe < sq->sqe_cnt; sqe++) { in otx2_free_pending_sqe()
1333 sg = &sq->sg[sqe]; in otx2_free_pending_sqe()
1334 skb = (struct sk_buff *)sg->skb; in otx2_free_pending_sqe()
1336 tx_bytes += skb->len; in otx2_free_pending_sqe()
1340 sg->skb = (u64)NULL; in otx2_free_pending_sqe()
1346 txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); in otx2_free_pending_sqe()
1359 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); in otx2_xdp_sqe_add_sg()
1360 sg->ld_type = NIX_SEND_LDTYPE_LDD; in otx2_xdp_sqe_add_sg()
1361 sg->subdc = NIX_SUBDC_SG; in otx2_xdp_sqe_add_sg()
1362 sg->segs = 1; in otx2_xdp_sqe_add_sg()
1363 sg->seg1_size = len; in otx2_xdp_sqe_add_sg()
1368 sq->sg[sq->head].dma_addr[0] = dma_addr; in otx2_xdp_sqe_add_sg()
1369 sq->sg[sq->head].size[0] = len; in otx2_xdp_sqe_add_sg()
1370 sq->sg[sq->head].num_segs = 1; in otx2_xdp_sqe_add_sg()
1379 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt()
1380 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; in otx2_xdp_sq_append_pkt()
1381 if (free_sqe < sq->sqe_thresh) in otx2_xdp_sq_append_pkt()
1384 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); in otx2_xdp_sq_append_pkt()
1386 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); in otx2_xdp_sq_append_pkt()
1388 if (!sqe_hdr->total) { in otx2_xdp_sq_append_pkt()
1389 sqe_hdr->aura = sq->aura_id; in otx2_xdp_sq_append_pkt()
1390 sqe_hdr->df = 1; in otx2_xdp_sq_append_pkt()
1391 sqe_hdr->sq = qidx; in otx2_xdp_sq_append_pkt()
1392 sqe_hdr->pnc = 1; in otx2_xdp_sq_append_pkt()
1394 sqe_hdr->total = len; in otx2_xdp_sq_append_pkt()
1395 sqe_hdr->sqe_id = sq->head; in otx2_xdp_sq_append_pkt()
1400 sqe_hdr->sizem1 = (offset / 16) - 1; in otx2_xdp_sq_append_pkt()
1401 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_xdp_sq_append_pkt()
1413 int qidx = cq->cq_idx; in otx2_xdp_rcv_pkt_handler()
1416 u64 iova, pa; in otx2_xdp_rcv_pkt_handler() local
1420 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; in otx2_xdp_rcv_pkt_handler()
1421 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_xdp_rcv_pkt_handler()
1422 page = virt_to_page(phys_to_virt(pa)); in otx2_xdp_rcv_pkt_handler()
1424 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); in otx2_xdp_rcv_pkt_handler()
1426 hard_start = (unsigned char *)phys_to_virt(pa); in otx2_xdp_rcv_pkt_handler()
1428 cqe->sg.seg_size, false); in otx2_xdp_rcv_pkt_handler()
1436 qidx += pfvf->hw.tx_queues; in otx2_xdp_rcv_pkt_handler()
1437 cq->pool_ptrs++; in otx2_xdp_rcv_pkt_handler()
1439 cqe->sg.seg_size, qidx); in otx2_xdp_rcv_pkt_handler()
1441 cq->pool_ptrs++; in otx2_xdp_rcv_pkt_handler()
1442 err = xdp_do_redirect(pfvf->netdev, &xdp, prog); in otx2_xdp_rcv_pkt_handler()
1444 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()
1453 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1456 trace_xdp_exception(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1459 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()
1462 cq->pool_ptrs++; in otx2_xdp_rcv_pkt_handler()