Lines Matching +full:tie +full:- +full:off

1 // SPDX-License-Identifier: GPL-2.0
11 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
14 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
20 mutex_lock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_contig()
21 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, in __ice_vsi_get_qs_contig()
22 0, qs_cfg->q_count, 0); in __ice_vsi_get_qs_contig()
23 if (offset >= qs_cfg->pf_map_size) { in __ice_vsi_get_qs_contig()
24 mutex_unlock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_contig()
25 return -ENOMEM; in __ice_vsi_get_qs_contig()
28 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); in __ice_vsi_get_qs_contig()
29 for (i = 0; i < qs_cfg->q_count; i++) in __ice_vsi_get_qs_contig()
30 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); in __ice_vsi_get_qs_contig()
31 mutex_unlock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_contig()
37 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
40 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
46 mutex_lock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_sc()
47 for (i = 0; i < qs_cfg->q_count; i++) { in __ice_vsi_get_qs_sc()
48 index = find_next_zero_bit(qs_cfg->pf_map, in __ice_vsi_get_qs_sc()
49 qs_cfg->pf_map_size, index); in __ice_vsi_get_qs_sc()
50 if (index >= qs_cfg->pf_map_size) in __ice_vsi_get_qs_sc()
52 set_bit(index, qs_cfg->pf_map); in __ice_vsi_get_qs_sc()
53 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; in __ice_vsi_get_qs_sc()
55 mutex_unlock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_sc()
60 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); in __ice_vsi_get_qs_sc()
61 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; in __ice_vsi_get_qs_sc()
63 mutex_unlock(qs_cfg->qs_mutex); in __ice_vsi_get_qs_sc()
65 return -ENOMEM; in __ice_vsi_get_qs_sc()
69 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
76 * Returns -ETIMEDOUT in case of failing to reach the requested state after
84 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & in ice_pf_rxq_wait()
91 return -ETIMEDOUT; in ice_pf_rxq_wait()
95 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
100 * with this q_vector. If allocation fails we return -ENOMEM.
104 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_q_vector()
111 return -ENOMEM; in ice_vsi_alloc_q_vector()
113 q_vector->vsi = vsi; in ice_vsi_alloc_q_vector()
114 q_vector->v_idx = v_idx; in ice_vsi_alloc_q_vector()
115 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; in ice_vsi_alloc_q_vector()
116 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; in ice_vsi_alloc_q_vector()
117 q_vector->tx.itr_mode = ITR_DYNAMIC; in ice_vsi_alloc_q_vector()
118 q_vector->rx.itr_mode = ITR_DYNAMIC; in ice_vsi_alloc_q_vector()
119 q_vector->tx.type = ICE_TX_CONTAINER; in ice_vsi_alloc_q_vector()
120 q_vector->rx.type = ICE_RX_CONTAINER; in ice_vsi_alloc_q_vector()
121 q_vector->irq.index = -ENOENT; in ice_vsi_alloc_q_vector()
123 if (vsi->type == ICE_VSI_VF) { in ice_vsi_alloc_q_vector()
124 ice_calc_vf_reg_idx(vsi->vf, q_vector); in ice_vsi_alloc_q_vector()
126 } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { in ice_vsi_alloc_q_vector()
130 if (unlikely(!ctrl_vsi->q_vectors)) { in ice_vsi_alloc_q_vector()
131 err = -ENOENT; in ice_vsi_alloc_q_vector()
135 q_vector->irq = ctrl_vsi->q_vectors[0]->irq; in ice_vsi_alloc_q_vector()
140 q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc); in ice_vsi_alloc_q_vector()
141 if (q_vector->irq.index < 0) { in ice_vsi_alloc_q_vector()
142 err = -ENOMEM; in ice_vsi_alloc_q_vector()
147 q_vector->reg_idx = q_vector->irq.index; in ice_vsi_alloc_q_vector()
148 q_vector->vf_reg_idx = q_vector->irq.index; in ice_vsi_alloc_q_vector()
152 cpumask_set_cpu(v_idx, &q_vector->affinity_mask); in ice_vsi_alloc_q_vector()
158 if (vsi->netdev) in ice_vsi_alloc_q_vector()
159 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll); in ice_vsi_alloc_q_vector()
162 /* tie q_vector and VSI together */ in ice_vsi_alloc_q_vector()
163 vsi->q_vectors[v_idx] = q_vector; in ice_vsi_alloc_q_vector()
174 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
181 struct ice_pf *pf = vsi->back; in ice_free_q_vector()
187 if (!vsi->q_vectors[v_idx]) { in ice_free_q_vector()
191 q_vector = vsi->q_vectors[v_idx]; in ice_free_q_vector()
193 ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx) in ice_free_q_vector()
194 tx_ring->q_vector = NULL; in ice_free_q_vector()
196 ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx) in ice_free_q_vector()
197 rx_ring->q_vector = NULL; in ice_free_q_vector()
200 if (vsi->netdev) in ice_free_q_vector()
201 netif_napi_del(&q_vector->napi); in ice_free_q_vector()
204 if (q_vector->irq.index < 0) in ice_free_q_vector()
208 if (vsi->type == ICE_VSI_CTRL && vsi->vf && in ice_free_q_vector()
212 ice_free_irq(pf, q_vector->irq); in ice_free_q_vector()
216 vsi->q_vectors[v_idx] = NULL; in ice_free_q_vector()
220 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
243 * ice_calc_txq_handle - calculate the queue handle
252 if (ring->ch) in ice_calc_txq_handle()
253 return ring->q_index - ring->ch->base_q; in ice_calc_txq_handle()
259 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; in ice_calc_txq_handle()
263 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
271 if (!ring->q_vector || !ring->netdev) in ice_cfg_xps_tx_ring()
275 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) in ice_cfg_xps_tx_ring()
278 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, in ice_cfg_xps_tx_ring()
279 ring->q_index); in ice_cfg_xps_tx_ring()
283 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
293 struct ice_vsi *vsi = ring->vsi; in ice_setup_tx_ctx()
294 struct ice_hw *hw = &vsi->back->hw; in ice_setup_tx_ctx()
296 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; in ice_setup_tx_ctx()
298 tlan_ctx->port_num = vsi->port_info->lport; in ice_setup_tx_ctx()
301 tlan_ctx->qlen = ring->count; in ice_setup_tx_ctx()
303 ice_set_cgd_num(tlan_ctx, ring->dcb_tc); in ice_setup_tx_ctx()
306 tlan_ctx->pf_num = hw->pf_id; in ice_setup_tx_ctx()
310 * for vmvf_type = VF, it is VF number between 0-256 in ice_setup_tx_ctx()
311 * for vmvf_type = VM, it is VM number between 0-767 in ice_setup_tx_ctx()
314 switch (vsi->type) { in ice_setup_tx_ctx()
318 if (ring->ch) in ice_setup_tx_ctx()
319 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; in ice_setup_tx_ctx()
321 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; in ice_setup_tx_ctx()
325 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; in ice_setup_tx_ctx()
326 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; in ice_setup_tx_ctx()
329 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; in ice_setup_tx_ctx()
336 if (ring->ch) in ice_setup_tx_ctx()
337 tlan_ctx->src_vsi = ring->ch->vsi_num; in ice_setup_tx_ctx()
339 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); in ice_setup_tx_ctx()
342 switch (vsi->type) { in ice_setup_tx_ctx()
344 tlan_ctx->tsyn_ena = 1; in ice_setup_tx_ctx()
350 tlan_ctx->tso_ena = ICE_TX_LEGACY; in ice_setup_tx_ctx()
351 tlan_ctx->tso_qnum = pf_q; in ice_setup_tx_ctx()
357 tlan_ctx->legacy_int = ICE_TX_LEGACY; in ice_setup_tx_ctx()
361 * ice_rx_offset - Return expected offset into page to access data
374 * ice_setup_rx_ctx - Configure a receive ring context
381 struct ice_vsi *vsi = ring->vsi; in ice_setup_rx_ctx()
388 hw = &vsi->back->hw; in ice_setup_rx_ctx()
391 pf_q = vsi->rxq_map[ring->q_index]; in ice_setup_rx_ctx()
400 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; in ice_setup_rx_ctx()
402 rlan_ctx.qlen = ring->count; in ice_setup_rx_ctx()
407 rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, in ice_setup_rx_ctx()
416 rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS); in ice_setup_rx_ctx()
419 * and it needs to remain 1 for non-DVM capable configurations to not in ice_setup_rx_ctx()
427 if (vsi->type == ICE_VSI_VF && in ice_setup_rx_ctx()
428 ice_vf_is_port_vlan_ena(vsi->vf)) in ice_setup_rx_ctx()
445 /* Max packet size for this queue - must not be set to a larger value in ice_setup_rx_ctx()
448 rlan_ctx.rxmax = min_t(u32, vsi->max_frame, in ice_setup_rx_ctx()
449 ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); in ice_setup_rx_ctx()
457 if (ice_is_eswitch_mode_switchdev(vsi->back)) { in ice_setup_rx_ctx()
458 ring->flags |= ICE_RX_FLAGS_MULTIDEV; in ice_setup_rx_ctx()
468 if (vsi->type != ICE_VSI_VF) in ice_setup_rx_ctx()
477 …dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d err… in ice_setup_rx_ctx()
479 return -EIO; in ice_setup_rx_ctx()
482 if (vsi->type == ICE_VSI_VF) in ice_setup_rx_ctx()
486 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_setup_rx_ctx()
491 ring->rx_offset = ice_rx_offset(ring); in ice_setup_rx_ctx()
494 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); in ice_setup_rx_ctx()
495 writel(0, ring->tail); in ice_setup_rx_ctx()
502 void *ctx_ptr = &ring->pkt_ctx; in ice_xsk_pool_fill_cb()
507 desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - in ice_xsk_pool_fill_cb()
510 xsk_pool_fill_cb(ring->xsk_pool, &desc); in ice_xsk_pool_fill_cb()
514 * ice_get_frame_sz - calculate xdp_buff::frame_sz
524 frame_sz = rx_ring->rx_buf_len; in ice_get_frame_sz()
533 * ice_vsi_cfg_rxq - Configure an Rx queue
540 struct device *dev = ice_pf_to_dev(ring->vsi->back); in ice_vsi_cfg_rxq()
544 ring->rx_buf_len = ring->vsi->rx_buf_len; in ice_vsi_cfg_rxq()
546 if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { in ice_vsi_cfg_rxq()
547 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { in ice_vsi_cfg_rxq()
548 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, in ice_vsi_cfg_rxq()
549 ring->q_index, in ice_vsi_cfg_rxq()
550 ring->q_vector->napi.napi_id, in ice_vsi_cfg_rxq()
551 ring->rx_buf_len); in ice_vsi_cfg_rxq()
557 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
558 xdp_rxq_info_unreg(&ring->xdp_rxq); in ice_vsi_cfg_rxq()
560 ring->rx_buf_len = in ice_vsi_cfg_rxq()
561 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq()
562 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, in ice_vsi_cfg_rxq()
563 ring->q_index, in ice_vsi_cfg_rxq()
564 ring->q_vector->napi.napi_id, in ice_vsi_cfg_rxq()
565 ring->rx_buf_len); in ice_vsi_cfg_rxq()
568 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ice_vsi_cfg_rxq()
573 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq()
577 ring->q_index); in ice_vsi_cfg_rxq()
579 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { in ice_vsi_cfg_rxq()
580 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, in ice_vsi_cfg_rxq()
581 ring->q_index, in ice_vsi_cfg_rxq()
582 ring->q_vector->napi.napi_id, in ice_vsi_cfg_rxq()
583 ring->rx_buf_len); in ice_vsi_cfg_rxq()
588 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in ice_vsi_cfg_rxq()
596 xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); in ice_vsi_cfg_rxq()
597 ring->xdp.data = NULL; in ice_vsi_cfg_rxq()
598 ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; in ice_vsi_cfg_rxq()
602 ring->q_index, err); in ice_vsi_cfg_rxq()
606 if (ring->xsk_pool) { in ice_vsi_cfg_rxq()
609 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq()
611 num_bufs, ring->q_index); in ice_vsi_cfg_rxq()
617 ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); in ice_vsi_cfg_rxq()
619 u16 pf_q = ring->vsi->rxq_map[ring->q_index]; in ice_vsi_cfg_rxq()
622 ring->q_index, pf_q); in ice_vsi_cfg_rxq()
635 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
636 return -EINVAL; in ice_vsi_cfg_single_rxq()
638 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
642 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
647 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { in ice_vsi_cfg_frame_size()
648 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; in ice_vsi_cfg_frame_size()
649 vsi->rx_buf_len = ICE_RXBUF_1664; in ice_vsi_cfg_frame_size()
652 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in ice_vsi_cfg_frame_size()
653 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
654 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
657 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
658 vsi->rx_buf_len = ICE_RXBUF_3072; in ice_vsi_cfg_frame_size()
663 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
673 if (vsi->type == ICE_VSI_VF) in ice_vsi_cfg_rxqs()
680 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); in ice_vsi_cfg_rxqs()
690 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
691 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
696 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
705 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; in __ice_vsi_get_qs()
706 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, in __ice_vsi_get_qs()
707 qs_cfg->scatter_count); in __ice_vsi_get_qs()
714 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
717 * @rxq_idx: 0-based Rx queue index for the VSI passed in
725 int pf_q = vsi->rxq_map[rxq_idx]; in ice_vsi_ctrl_one_rx_ring()
726 struct ice_pf *pf = vsi->back; in ice_vsi_ctrl_one_rx_ring()
727 struct ice_hw *hw = &pf->hw; in ice_vsi_ctrl_one_rx_ring()
736 /* turn on/off the queue */ in ice_vsi_ctrl_one_rx_ring()
751 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
754 * @rxq_idx: 0-based Rx queue index for the VSI passed in
757 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
763 int pf_q = vsi->rxq_map[rxq_idx]; in ice_vsi_wait_one_rx_ring()
764 struct ice_pf *pf = vsi->back; in ice_vsi_wait_one_rx_ring()
770 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
774 * return -ENOMEM.
778 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_alloc_q_vectors()
782 if (vsi->q_vectors[0]) { in ice_vsi_alloc_q_vectors()
783 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); in ice_vsi_alloc_q_vectors()
784 return -EEXIST; in ice_vsi_alloc_q_vectors()
787 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { in ice_vsi_alloc_q_vectors()
796 while (v_idx--) in ice_vsi_alloc_q_vectors()
800 vsi->num_q_vectors, vsi->vsi_num, err); in ice_vsi_alloc_q_vectors()
801 vsi->num_q_vectors = 0; in ice_vsi_alloc_q_vectors()
806 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
809 * This function maps descriptor rings to the queue-specific vectors allotted
810 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
815 int q_vectors = vsi->num_q_vectors; in ice_vsi_map_rings_to_vectors()
820 tx_rings_rem = vsi->num_txq; in ice_vsi_map_rings_to_vectors()
821 rx_rings_rem = vsi->num_rxq; in ice_vsi_map_rings_to_vectors()
824 struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; in ice_vsi_map_rings_to_vectors()
830 q_vectors - v_id); in ice_vsi_map_rings_to_vectors()
831 q_vector->num_ring_tx = tx_rings_per_v; in ice_vsi_map_rings_to_vectors()
832 q_vector->tx.tx_ring = NULL; in ice_vsi_map_rings_to_vectors()
833 q_vector->tx.itr_idx = ICE_TX_ITR; in ice_vsi_map_rings_to_vectors()
834 q_base = vsi->num_txq - tx_rings_rem; in ice_vsi_map_rings_to_vectors()
837 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors()
839 tx_ring->q_vector = q_vector; in ice_vsi_map_rings_to_vectors()
840 tx_ring->next = q_vector->tx.tx_ring; in ice_vsi_map_rings_to_vectors()
841 q_vector->tx.tx_ring = tx_ring; in ice_vsi_map_rings_to_vectors()
843 tx_rings_rem -= tx_rings_per_v; in ice_vsi_map_rings_to_vectors()
847 q_vectors - v_id); in ice_vsi_map_rings_to_vectors()
848 q_vector->num_ring_rx = rx_rings_per_v; in ice_vsi_map_rings_to_vectors()
849 q_vector->rx.rx_ring = NULL; in ice_vsi_map_rings_to_vectors()
850 q_vector->rx.itr_idx = ICE_RX_ITR; in ice_vsi_map_rings_to_vectors()
851 q_base = vsi->num_rxq - rx_rings_rem; in ice_vsi_map_rings_to_vectors()
854 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; in ice_vsi_map_rings_to_vectors()
856 rx_ring->q_vector = q_vector; in ice_vsi_map_rings_to_vectors()
857 rx_ring->next = q_vector->rx.rx_ring; in ice_vsi_map_rings_to_vectors()
858 q_vector->rx.rx_ring = rx_ring; in ice_vsi_map_rings_to_vectors()
860 rx_rings_rem -= rx_rings_per_v; in ice_vsi_map_rings_to_vectors()
868 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
878 vsi->num_q_vectors = 0; in ice_vsi_free_q_vectors()
882 * ice_vsi_cfg_txq - Configure single Tx queue
894 struct ice_channel *ch = ring->ch; in ice_vsi_cfg_txq()
895 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_txq()
896 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_txq()
904 pf_q = ring->reg_idx; in ice_vsi_cfg_txq()
907 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); in ice_vsi_cfg_txq()
908 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, in ice_vsi_cfg_txq()
914 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); in ice_vsi_cfg_txq()
917 tc = ring->dcb_tc; in ice_vsi_cfg_txq()
924 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); in ice_vsi_cfg_txq()
927 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, in ice_vsi_cfg_txq()
928 ring->q_handle, 1, qg_buf, buf_len, in ice_vsi_cfg_txq()
931 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, in ice_vsi_cfg_txq()
932 ring->q_handle, 1, qg_buf, buf_len, in ice_vsi_cfg_txq()
944 txq = &qg_buf->txqs[0]; in ice_vsi_cfg_txq()
945 if (pf_q == le16_to_cpu(txq->txq_id)) in ice_vsi_cfg_txq()
946 ring->txq_teid = le32_to_cpu(txq->q_teid); in ice_vsi_cfg_txq()
956 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
957 return -EINVAL; in ice_vsi_cfg_single_txq()
959 qg_buf->num_txqs = 1; in ice_vsi_cfg_single_txq()
965 * ice_vsi_cfg_txqs - Configure the VSI for Tx
980 qg_buf->num_txqs = 1; in ice_vsi_cfg_txqs()
992 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1000 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); in ice_vsi_cfg_lan_txqs()
1004 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1015 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1026 * ice_cfg_itr - configure the initial interrupt throttle values
1037 if (q_vector->num_ring_rx) in ice_cfg_itr()
1038 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); in ice_cfg_itr()
1040 if (q_vector->num_ring_tx) in ice_cfg_itr()
1041 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); in ice_cfg_itr()
1043 ice_write_intrl(q_vector, q_vector->intrl); in ice_cfg_itr()
1047 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1049 * @txq: Tx queue being mapped to MSI-X vector
1050 * @msix_idx: MSI-X vector index within the function
1053 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1059 struct ice_pf *pf = vsi->back; in ice_cfg_txq_interrupt()
1060 struct ice_hw *hw = &pf->hw; in ice_cfg_txq_interrupt()
1068 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); in ice_cfg_txq_interrupt()
1070 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_cfg_txq_interrupt()
1072 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), in ice_cfg_txq_interrupt()
1079 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1081 * @rxq: Rx queue being mapped to MSI-X vector
1082 * @msix_idx: MSI-X vector index within the function
1085 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1091 struct ice_pf *pf = vsi->back; in ice_cfg_rxq_interrupt()
1092 struct ice_hw *hw = &pf->hw; in ice_cfg_rxq_interrupt()
1100 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); in ice_cfg_rxq_interrupt()
1106 * ice_trigger_sw_intr - trigger a software interrupt
1112 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), in ice_trigger_sw_intr()
1119 * ice_vsi_stop_tx_ring - Disable single Tx ring
1131 struct ice_pf *pf = vsi->back; in ice_vsi_stop_tx_ring()
1133 struct ice_hw *hw = &pf->hw; in ice_vsi_stop_tx_ring()
1138 val = rd32(hw, QINT_TQCTL(ring->reg_idx)); in ice_vsi_stop_tx_ring()
1140 wr32(hw, QINT_TQCTL(ring->reg_idx), val); in ice_vsi_stop_tx_ring()
1148 q_vector = ring->q_vector; in ice_vsi_stop_tx_ring()
1149 if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf))) in ice_vsi_stop_tx_ring()
1152 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, in ice_vsi_stop_tx_ring()
1153 txq_meta->tc, 1, &txq_meta->q_handle, in ice_vsi_stop_tx_ring()
1154 &txq_meta->q_id, &txq_meta->q_teid, rst_src, in ice_vsi_stop_tx_ring()
1158 * active reset flow, -EBUSY is returned. in ice_vsi_stop_tx_ring()
1162 if (status == -EBUSY) { in ice_vsi_stop_tx_ring()
1163 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); in ice_vsi_stop_tx_ring()
1164 } else if (status == -ENOENT) { in ice_vsi_stop_tx_ring()
1165 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); in ice_vsi_stop_tx_ring()
1167 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", in ice_vsi_stop_tx_ring()
1176 * ice_fill_txq_meta - Prepare the Tx queue's meta data
1188 struct ice_channel *ch = ring->ch; in ice_fill_txq_meta()
1192 tc = ring->dcb_tc; in ice_fill_txq_meta()
1196 txq_meta->q_id = ring->reg_idx; in ice_fill_txq_meta()
1197 txq_meta->q_teid = ring->txq_teid; in ice_fill_txq_meta()
1198 txq_meta->q_handle = ring->q_handle; in ice_fill_txq_meta()
1200 txq_meta->vsi_idx = ch->ch_vsi->idx; in ice_fill_txq_meta()
1201 txq_meta->tc = 0; in ice_fill_txq_meta()
1203 txq_meta->vsi_idx = vsi->idx; in ice_fill_txq_meta()
1204 txq_meta->tc = tc; in ice_fill_txq_meta()