/linux-6.12.1/drivers/net/wwan/t7xx/ |
D | t7xx_hif_dpmaif_tx.c | 54 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_update_drb_rd_idx() local 58 if (!txq->que_started) in t7xx_dpmaif_update_drb_rd_idx() 61 old_sw_rd_idx = txq->drb_rd_idx; in t7xx_dpmaif_update_drb_rd_idx() 71 drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx() 73 spin_lock_irqsave(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx() 74 txq->drb_rd_idx = new_hw_rd_idx; in t7xx_dpmaif_update_drb_rd_idx() 75 spin_unlock_irqrestore(&txq->tx_lock, flags); in t7xx_dpmaif_update_drb_rd_idx() 83 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_release_tx_buffer() local 90 drb_skb_base = txq->drb_skb_base; in t7xx_dpmaif_release_tx_buffer() 91 drb_base = txq->drb_base; in t7xx_dpmaif_release_tx_buffer() [all …]
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | ipoib_tx.c | 29 struct hfi1_ipoib_txq *txq; member 47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_used() argument 49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used() 50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used() 53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_stop_txq() argument 55 trace_hfi1_txq_stop(txq); in hfi1_ipoib_stop_txq() 56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq() 57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) in hfi1_ipoib_wake_txq() argument 62 trace_hfi1_txq_wake(txq); in hfi1_ipoib_wake_txq() [all …]
|
D | trace_tx.h | 897 TP_PROTO(struct hfi1_ipoib_txq *txq), 898 TP_ARGS(txq), 900 DD_DEV_ENTRY(txq->priv->dd) 901 __field(struct hfi1_ipoib_txq *, txq) 913 DD_DEV_ASSIGN(txq->priv->dd); 914 __entry->txq = txq; 915 __entry->sde = txq->sde; 916 __entry->head = txq->tx_ring.head; 917 __entry->tail = txq->tx_ring.tail; 918 __entry->idx = txq->q_idx; [all …]
|
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 79 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument 83 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr() 85 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr() 108 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr() 117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr() 118 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr() 120 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr() 129 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local 134 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs() 135 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs() [all …]
|
D | tx-gen2.c | 297 struct iwl_txq *txq, in iwl_txq_gen2_build_tx_amsdu() argument 304 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx_amsdu() 305 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu() 310 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx_amsdu() 346 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu() 384 struct iwl_txq *txq, in iwl_txq_gen2_build_tx() argument 392 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx() 393 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx() 399 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx() 402 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx() [all …]
|
/linux-6.12.1/drivers/net/ethernet/huawei/hinic/ |
D | hinic_tx.c | 77 static void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument 79 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats() 96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument 98 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats() 116 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument 118 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init() 121 hinic_txq_clean_stats(txq); in txq_stats_init() 499 struct hinic_txq *txq; in hinic_lb_xmit_frame() local 502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() [all …]
|
/linux-6.12.1/include/net/ |
D | netdev_queues.h | 153 #define netif_txq_try_stop(txq, get_desc, start_thrs) \ argument 157 netif_tx_stop_queue(txq); \ 169 netif_tx_start_queue(txq); \ 194 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs) \ argument 200 _res = netif_txq_try_stop(txq, get_desc, start_thrs); \ 237 #define __netif_txq_completed_wake(txq, pkts, bytes, \ argument 247 netdev_txq_completed_mb(txq, pkts, bytes); \ 252 if (unlikely(netif_tx_queue_stopped(txq)) && \ 254 netif_tx_wake_queue(txq); \ 261 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \ argument [all …]
|
/linux-6.12.1/drivers/net/wireless/ath/ath9k/ |
D | xmit.c | 56 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 59 int tx_flags, struct ath_txq *txq, 62 struct ath_txq *txq, struct list_head *bf_q, 65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 73 struct ath_txq *txq, 107 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock_complete() argument 108 __releases(&txq->axq_lock) in ath_txq_unlock_complete() 115 skb_queue_splice_init(&txq->complete_q, &q); in ath_txq_unlock_complete() 116 spin_unlock_bh(&txq->axq_lock); in ath_txq_unlock_complete() 135 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local [all …]
|
/linux-6.12.1/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 179 #define IS_TSO_HEADER(txq, addr) \ argument 180 ((addr >= txq->tso_hdrs_dma) && \ 181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 408 struct tx_queue txq[8]; member 446 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument 448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 467 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument 469 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() 472 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr() 473 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr() [all …]
|
D | mvneta.c | 135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument 776 txq->txq_get_index++; in mvneta_txq_inc_get() 777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 778 txq->txq_get_index = 0; in mvneta_txq_inc_get() 782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument 784 txq->txq_put_index++; in mvneta_txq_inc_put() 785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put() 786 txq->txq_put_index = 0; in mvneta_txq_inc_put() 964 struct mvneta_tx_queue *txq, in mvneta_txq_pend_desc_add() argument [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | selq.h | 30 static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix() argument 32 while (unlikely(txq >= num_channels)) in mlx5e_txq_to_ch_ix() 33 txq -= num_channels; in mlx5e_txq_to_ch_ix() 34 return txq; in mlx5e_txq_to_ch_ix() 37 static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix_htb() argument 39 if (unlikely(txq >= num_channels)) { in mlx5e_txq_to_ch_ix_htb() 40 if (unlikely(txq >= num_channels << 3)) in mlx5e_txq_to_ch_ix_htb() 41 txq %= num_channels; in mlx5e_txq_to_ch_ix_htb() 44 txq -= num_channels; in mlx5e_txq_to_ch_ix_htb() 45 while (txq >= num_channels); in mlx5e_txq_to_ch_ix_htb() [all …]
|
/linux-6.12.1/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 76 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument 78 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt() 79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt() 84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 104 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 114 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 120 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt() [all …]
|
D | qede_main.c | 544 struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_txq_fp_log_metadata() argument 546 struct qed_chain *p_chain = &txq->tx_pbl; in qede_txq_fp_log_metadata() 551 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, in qede_txq_fp_log_metadata() 557 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, in qede_txq_fp_log_metadata() 562 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) in qede_tx_log_print() argument 574 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); in qede_tx_log_print() 579 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print() 580 qed_chain_get_cons_idx(&txq->tx_pbl), in qede_tx_log_print() 581 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); in qede_tx_log_print() 585 txq->index, fp->sb_info->igu_sb_id, in qede_tx_log_print() [all …]
|
/linux-6.12.1/drivers/net/ethernet/atheros/alx/ |
D | main.c | 53 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) in alx_free_txbuf() argument 55 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf() 58 dma_unmap_single(txq->dev, in alx_free_txbuf() 149 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping() 152 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) in alx_get_tx_queue() argument 154 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue() 157 static inline int alx_tpd_avail(struct alx_tx_queue *txq) in alx_tpd_avail() argument 159 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 160 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 161 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | qdisc.h | 16 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, 19 TP_ARGS(qdisc, txq, packets, skb), 23 __field(const struct netdev_queue *, txq ) 35 __entry->txq = txq; 38 __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; 41 __entry->txq_state = txq->state; 51 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), 53 TP_ARGS(qdisc, txq, skb), 57 __field(const struct netdev_queue *, txq) 66 __entry->txq = txq; [all …]
|
/linux-6.12.1/drivers/net/ethernet/freescale/ |
D | fec_main.c | 307 #define IS_TSO_HEADER(txq, addr) \ argument 308 ((addr >= txq->tso_hdrs_dma) && \ 309 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument 337 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num() 338 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 340 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num() 356 struct fec_enet_priv_tx_q *txq; in fec_dump() local 362 txq = fep->tx_queue[0]; in fec_dump() 363 bdp = txq->bd.base; in fec_dump() [all …]
|
/linux-6.12.1/drivers/bluetooth/ |
D | btintel_pcie.c | 109 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index, in btintel_pcie_prepare_tx() argument 115 tfd = &txq->tfds[tfd_index]; in btintel_pcie_prepare_tx() 118 buf = &txq->bufs[tfd_index]; in btintel_pcie_prepare_tx() 132 struct txq *txq = &data->txq; in btintel_pcie_send_sync() local 136 if (tfd_index > txq->count) in btintel_pcie_send_sync() 142 btintel_pcie_prepare_tx(txq, tfd_index, skb); in btintel_pcie_send_sync() 144 tfd_index = (tfd_index + 1) % txq->count; in btintel_pcie_send_sync() 341 struct txq *txq; in btintel_pcie_msix_tx_handle() local 350 txq = &data->txq; in btintel_pcie_msix_tx_handle() 356 urbd0 = &txq->urbd0s[cr_tia]; in btintel_pcie_msix_tx_handle() [all …]
|
/linux-6.12.1/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument 1135 netif_tx_stop_queue(txq->txq); in txq_stop() 1136 txq->q.stops++; in txq_stop() 1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local 1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1205 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit() 1214 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit() 1223 txq_stop(txq); in t4vf_eth_xmit() 1237 txq->mapping_err++; in t4vf_eth_xmit() 1252 txq_stop(txq); in t4vf_eth_xmit() [all …]
|
/linux-6.12.1/drivers/net/wireless/mediatek/mt76/ |
D | tx.c | 9 mt76_txq_get_qid(struct ieee80211_txq *txq) in mt76_txq_get_qid() argument 11 if (!txq->sta) in mt76_txq_get_qid() 14 return txq->ac; in mt76_txq_get_qid() 21 struct ieee80211_txq *txq; in mt76_tx_check_agg_ssn() local 30 txq = sta->txq[tid]; in mt76_tx_check_agg_ssn() 31 mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_tx_check_agg_ssn() 374 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); in mt76_txq_dequeue() local 378 skb = ieee80211_tx_dequeue(phy->hw, txq); in mt76_txq_dequeue() 418 struct ieee80211_txq *txq = sta->txq[i]; in mt76_release_buffered_frames() local 419 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; in mt76_release_buffered_frames() [all …]
|
/linux-6.12.1/drivers/net/ethernet/fungible/funeth/ |
D | funeth_trace.h | 15 TP_PROTO(const struct funeth_txq *txq, 20 TP_ARGS(txq, len, sqe_idx, ngle), 27 __string(devname, txq->netdev->name) 31 __entry->qidx = txq->qidx; 45 TP_PROTO(const struct funeth_txq *txq, 50 TP_ARGS(txq, sqe_idx, num_sqes, hw_head), 57 __string(devname, txq->netdev->name) 61 __entry->qidx = txq->qidx;
|
/linux-6.12.1/drivers/net/ethernet/hisilicon/ |
D | hisi_femac.c | 120 struct hisi_femac_queue txq; member 147 dma_addr = priv->txq.dma_phys[pos]; in hisi_femac_tx_dma_unmap() 155 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local 163 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim() 169 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); in hisi_femac_xmit_reclaim() 177 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim() 178 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim() 372 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); in hisi_femac_init_tx_and_rx_queues() 387 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local 412 pos = txq->tail; in hisi_femac_free_skb_rings() [all …]
|
/linux-6.12.1/net/sched/ |
D | sch_generic.c | 40 const struct netdev_queue *txq) in qdisc_maybe_clear_missed() argument 54 if (!netif_xmit_frozen_or_stopped(txq)) in qdisc_maybe_clear_missed() 75 const struct netdev_queue *txq = q->dev_queue; in __skb_dequeue_bad_txq() local 87 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq() 88 if (!netif_xmit_frozen_or_stopped(txq)) { in __skb_dequeue_bad_txq() 99 qdisc_maybe_clear_missed(q, txq); in __skb_dequeue_bad_txq() 181 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument 184 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb() 232 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local 260 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb() [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/idpf/ |
D | idpf_txrx.h | 116 #define IDPF_DESC_UNUSED(txq) \ argument 117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ 118 (txq)->next_to_clean - (txq)->next_to_use - 1) 120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) argument 121 #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ argument 122 (txq)->desc_count >> 2) 128 #define IDPF_TX_COMPLQ_PENDING(txq) \ argument 129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \ 131 (txq)->num_completions_pending - (txq)->complq->num_completions) 135 #define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ argument [all …]
|
/linux-6.12.1/drivers/net/wireless/ath/ath5k/ |
D | base.c | 769 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument 875 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup() 876 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup() 877 txq->txq_len++; in ath5k_txbuf_setup() 878 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup() 879 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup() 881 *txq->link = bf->daddr; in ath5k_txbuf_setup() 883 txq->link = &ds->ds_link; in ath5k_txbuf_setup() 884 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup() 885 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup() [all …]
|
/linux-6.12.1/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset() 691 if (q->txq[i].desc) { in t3_free_qset() 693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset() 695 if (q->txq[i].sdesc) { in t3_free_qset() 696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset() 697 q->txq[i].in_use); in t3_free_qset() 698 kfree(q->txq[i].sdesc); in t3_free_qset() 701 q->txq[i].size * in t3_free_qset() 703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() [all …]
|