Lines Matching +full:num +full:- +full:txq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
19 #include "iwl-fh.h"
20 #include "iwl-debug.h"
21 #include "iwl-csr.h"
22 #include "iwl-prph.h"
23 #include "iwl-io.h"
24 #include "iwl-scd.h"
25 #include "iwl-op-mode.h"
29 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
55 if (WARN_ON(ptr->addr)) in iwl_pcie_alloc_dma_ptr()
56 return -EINVAL; in iwl_pcie_alloc_dma_ptr()
58 ptr->addr = dma_alloc_coherent(trans->dev, size, in iwl_pcie_alloc_dma_ptr()
59 &ptr->dma, GFP_KERNEL); in iwl_pcie_alloc_dma_ptr()
60 if (!ptr->addr) in iwl_pcie_alloc_dma_ptr()
61 return -ENOMEM; in iwl_pcie_alloc_dma_ptr()
62 ptr->size = size; in iwl_pcie_alloc_dma_ptr()
68 if (unlikely(!ptr->addr)) in iwl_pcie_free_dma_ptr()
71 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); in iwl_pcie_free_dma_ptr()
76 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
79 struct iwl_txq *txq) in iwl_pcie_txq_inc_wr_ptr() argument
83 int txq_id = txq->id; in iwl_pcie_txq_inc_wr_ptr()
85 lockdep_assert_held(&txq->lock); in iwl_pcie_txq_inc_wr_ptr()
93 if (!trans->trans_cfg->base_params->shadow_reg_enable && in iwl_pcie_txq_inc_wr_ptr()
94 txq_id != trans_pcie->txqs.cmd.q_id && in iwl_pcie_txq_inc_wr_ptr()
95 test_bit(STATUS_TPOWER_PMI, &trans->status)) { in iwl_pcie_txq_inc_wr_ptr()
108 txq->need_update = true; in iwl_pcie_txq_inc_wr_ptr()
114 * if not in power-save mode, uCode will never sleep when we're in iwl_pcie_txq_inc_wr_ptr()
117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr()
118 if (!txq->block) in iwl_pcie_txq_inc_wr_ptr()
120 txq->write_ptr | (txq_id << 8)); in iwl_pcie_txq_inc_wr_ptr()
128 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_pcie_txq_check_wrptrs()
129 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; in iwl_pcie_txq_check_wrptrs() local
131 if (!test_bit(i, trans_pcie->txqs.queue_used)) in iwl_pcie_txq_check_wrptrs()
134 spin_lock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs()
135 if (txq->need_update) { in iwl_pcie_txq_check_wrptrs()
136 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_pcie_txq_check_wrptrs()
137 txq->need_update = false; in iwl_pcie_txq_check_wrptrs()
139 spin_unlock_bh(&txq->lock); in iwl_pcie_txq_check_wrptrs()
146 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; in iwl_pcie_gen1_tfd_set_tb()
149 put_unaligned_le32(addr, &tb->lo); in iwl_pcie_gen1_tfd_set_tb()
152 tb->hi_n_len = cpu_to_le16(hi_n_len); in iwl_pcie_gen1_tfd_set_tb()
154 tfd->num_tbs = idx + 1; in iwl_pcie_gen1_tfd_set_tb()
159 return tfd->num_tbs & 0x1f; in iwl_txq_gen1_tfd_get_num_tbs()
162 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_pcie_txq_build_tfd() argument
169 tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr; in iwl_pcie_txq_build_tfd()
172 memset(tfd, 0, trans_pcie->txqs.tfd.size); in iwl_pcie_txq_build_tfd()
177 if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) { in iwl_pcie_txq_build_tfd()
179 trans_pcie->txqs.tfd.max_tbs); in iwl_pcie_txq_build_tfd()
180 return -EINVAL; in iwl_pcie_txq_build_tfd()
185 return -EINVAL; in iwl_pcie_txq_build_tfd()
196 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) in iwl_pcie_clear_cmd_in_flight()
199 spin_lock(&trans_pcie->reg_lock); in iwl_pcie_clear_cmd_in_flight()
201 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { in iwl_pcie_clear_cmd_in_flight()
202 spin_unlock(&trans_pcie->reg_lock); in iwl_pcie_clear_cmd_in_flight()
206 trans_pcie->cmd_hold_nic_awake = false; in iwl_pcie_clear_cmd_in_flight()
209 spin_unlock(&trans_pcie->reg_lock); in iwl_pcie_clear_cmd_in_flight()
218 if (refcount_dec_and_test(&info->use_count)) { in iwl_pcie_free_and_unmap_tso_page()
219 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, in iwl_pcie_free_and_unmap_tso_page()
233 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); in iwl_pcie_free_tso_pages()
242 next = info->next; in iwl_pcie_free_tso_pages()
245 if (!next && cmd_meta->sg_offset) { in iwl_pcie_free_tso_pages()
249 cmd_meta->sg_offset); in iwl_pcie_free_tso_pages()
251 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); in iwl_pcie_free_tso_pages()
261 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_addr()
265 addr = get_unaligned_le32(&tb->lo); in iwl_txq_gen1_tfd_tb_get_addr()
270 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; in iwl_txq_gen1_tfd_tb_get_addr()
273 * shift by 16 twice to avoid warnings on 32-bit in iwl_txq_gen1_tfd_tb_get_addr()
283 tfd->num_tbs = 0; in iwl_txq_set_tfd_invalid_gen1()
285 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen1()
286 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen1()
291 struct iwl_txq *txq, int index) in iwl_txq_gen1_tfd_unmap() argument
295 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
300 if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { in iwl_txq_gen1_tfd_unmap()
307 if (meta->sg_offset) in iwl_txq_gen1_tfd_unmap()
310 /* first TB is never freed - it's the bidirectional DMA data */ in iwl_txq_gen1_tfd_unmap()
313 if (meta->tbs & BIT(i)) in iwl_txq_gen1_tfd_unmap()
314 dma_unmap_page(trans->dev, in iwl_txq_gen1_tfd_unmap()
320 dma_unmap_single(trans->dev, in iwl_txq_gen1_tfd_unmap()
327 meta->tbs = 0; in iwl_txq_gen1_tfd_unmap()
333 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
335 * @txq: tx queue
336 * @read_ptr: the TXQ read_ptr to free
341 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_free_tfd() argument
347 int idx = iwl_txq_get_cmd_index(txq, read_ptr); in iwl_txq_free_tfd()
350 lockdep_assert_held(&txq->reclaim_lock); in iwl_txq_free_tfd()
352 if (!txq->entries) in iwl_txq_free_tfd()
355 /* We have only q->n_window txq->entries, but we use in iwl_txq_free_tfd()
358 if (trans->trans_cfg->gen2) in iwl_txq_free_tfd()
359 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
360 iwl_txq_get_tfd(trans, txq, read_ptr)); in iwl_txq_free_tfd()
362 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
363 txq, read_ptr); in iwl_txq_free_tfd()
366 skb = txq->entries[idx].skb; in iwl_txq_free_tfd()
368 /* Can be called from irqs-disabled context in iwl_txq_free_tfd()
370 * freed and that the queue is not empty - free the skb in iwl_txq_free_tfd()
373 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_free_tfd()
374 txq->entries[idx].skb = NULL; in iwl_txq_free_tfd()
379 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
384 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_pcie_txq_unmap() local
386 if (!txq) { in iwl_pcie_txq_unmap()
391 spin_lock_bh(&txq->reclaim_lock); in iwl_pcie_txq_unmap()
392 spin_lock(&txq->lock); in iwl_pcie_txq_unmap()
393 while (txq->write_ptr != txq->read_ptr) { in iwl_pcie_txq_unmap()
395 txq_id, txq->read_ptr); in iwl_pcie_txq_unmap()
397 if (txq_id != trans_pcie->txqs.cmd.q_id) { in iwl_pcie_txq_unmap()
398 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; in iwl_pcie_txq_unmap()
400 &txq->entries[txq->read_ptr].meta; in iwl_pcie_txq_unmap()
407 iwl_txq_free_tfd(trans, txq, txq->read_ptr); in iwl_pcie_txq_unmap()
408 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_pcie_txq_unmap()
410 if (txq->read_ptr == txq->write_ptr && in iwl_pcie_txq_unmap()
411 txq_id == trans_pcie->txqs.cmd.q_id) in iwl_pcie_txq_unmap()
415 while (!skb_queue_empty(&txq->overflow_q)) { in iwl_pcie_txq_unmap()
416 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); in iwl_pcie_txq_unmap()
418 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_pcie_txq_unmap()
421 spin_unlock(&txq->lock); in iwl_pcie_txq_unmap()
422 spin_unlock_bh(&txq->reclaim_lock); in iwl_pcie_txq_unmap()
424 /* just in case - this queue may have been stopped */ in iwl_pcie_txq_unmap()
425 iwl_trans_pcie_wake_queue(trans, txq); in iwl_pcie_txq_unmap()
429 * iwl_pcie_txq_free - Deallocate DMA queue.
430 * @txq: Transmit queue to deallocate.
434 * 0-fill, but do not free "txq" descriptor structure.
439 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_pcie_txq_free() local
440 struct device *dev = trans->dev; in iwl_pcie_txq_free()
443 if (WARN_ON(!txq)) in iwl_pcie_txq_free()
448 /* De-alloc array of command/tx buffers */ in iwl_pcie_txq_free()
449 if (txq_id == trans_pcie->txqs.cmd.q_id) in iwl_pcie_txq_free()
450 for (i = 0; i < txq->n_window; i++) { in iwl_pcie_txq_free()
451 kfree_sensitive(txq->entries[i].cmd); in iwl_pcie_txq_free()
452 kfree_sensitive(txq->entries[i].free_buf); in iwl_pcie_txq_free()
455 /* De-alloc circular buffer of TFDs */ in iwl_pcie_txq_free()
456 if (txq->tfds) { in iwl_pcie_txq_free()
458 trans_pcie->txqs.tfd.size * in iwl_pcie_txq_free()
459 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_txq_free()
460 txq->tfds, txq->dma_addr); in iwl_pcie_txq_free()
461 txq->dma_addr = 0; in iwl_pcie_txq_free()
462 txq->tfds = NULL; in iwl_pcie_txq_free()
465 sizeof(*txq->first_tb_bufs) * txq->n_window, in iwl_pcie_txq_free()
466 txq->first_tb_bufs, txq->first_tb_dma); in iwl_pcie_txq_free()
469 kfree(txq->entries); in iwl_pcie_txq_free()
470 txq->entries = NULL; in iwl_pcie_txq_free()
472 del_timer_sync(&txq->stuck_timer); in iwl_pcie_txq_free()
474 /* 0-fill queue descriptor structure */ in iwl_pcie_txq_free()
475 memset(txq, 0, sizeof(*txq)); in iwl_pcie_txq_free()
481 int nq = trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_start()
484 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - in iwl_pcie_tx_start()
488 memset(trans_pcie->txqs.queue_stopped, 0, in iwl_pcie_tx_start()
489 sizeof(trans_pcie->txqs.queue_stopped)); in iwl_pcie_tx_start()
490 memset(trans_pcie->txqs.queue_used, 0, in iwl_pcie_tx_start()
491 sizeof(trans_pcie->txqs.queue_used)); in iwl_pcie_tx_start()
493 trans_pcie->scd_base_addr = in iwl_pcie_tx_start()
497 scd_base_addr != trans_pcie->scd_base_addr); in iwl_pcie_tx_start()
500 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + in iwl_pcie_tx_start()
505 trans_pcie->txqs.scd_bc_tbls.dma >> 10); in iwl_pcie_tx_start()
510 if (trans->trans_cfg->base_params->scd_chain_ext_wa) in iwl_pcie_tx_start()
513 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, in iwl_pcie_tx_start()
514 trans_pcie->txqs.cmd.fifo, in iwl_pcie_tx_start()
515 trans_pcie->txqs.cmd.wdg_timeout); in iwl_pcie_tx_start()
531 /* Enable L1-Active */ in iwl_pcie_tx_start()
532 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) in iwl_pcie_tx_start()
546 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) in iwl_trans_pcie_tx_reset()
549 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_trans_pcie_tx_reset()
551 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_trans_pcie_tx_reset() local
552 if (trans->trans_cfg->gen2) in iwl_trans_pcie_tx_reset()
555 txq->dma_addr); in iwl_trans_pcie_tx_reset()
559 txq->dma_addr >> 8); in iwl_trans_pcie_tx_reset()
561 txq->read_ptr = 0; in iwl_trans_pcie_tx_reset()
562 txq->write_ptr = 0; in iwl_trans_pcie_tx_reset()
567 trans_pcie->kw.dma >> 4); in iwl_trans_pcie_tx_reset()
583 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_tx_stop_fh()
604 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_tx_stop_fh()
608 * iwl_pcie_tx_stop - Stop all Tx DMA channels
624 * Since we stop Tx altogether - mark the queues as stopped. in iwl_pcie_tx_stop()
626 memset(trans_pcie->txqs.queue_stopped, 0, in iwl_pcie_tx_stop()
627 sizeof(trans_pcie->txqs.queue_stopped)); in iwl_pcie_tx_stop()
628 memset(trans_pcie->txqs.queue_used, 0, in iwl_pcie_tx_stop()
629 sizeof(trans_pcie->txqs.queue_used)); in iwl_pcie_tx_stop()
632 if (!trans_pcie->txq_memory) in iwl_pcie_tx_stop()
636 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_stop()
644 * iwl_trans_tx_free - Free TXQ Context
653 memset(trans_pcie->txqs.queue_used, 0, in iwl_pcie_tx_free()
654 sizeof(trans_pcie->txqs.queue_used)); in iwl_pcie_tx_free()
657 if (trans_pcie->txq_memory) { in iwl_pcie_tx_free()
659 txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_free()
662 trans_pcie->txqs.txq[txq_id] = NULL; in iwl_pcie_tx_free()
666 kfree(trans_pcie->txq_memory); in iwl_pcie_tx_free()
667 trans_pcie->txq_memory = NULL; in iwl_pcie_tx_free()
669 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); in iwl_pcie_tx_free()
671 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls); in iwl_pcie_tx_free()
674 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
676 u32 txq_id = txq->id; in iwl_txq_log_scd_error()
681 if (trans->trans_cfg->gen2) { in iwl_txq_log_scd_error()
683 txq->read_ptr, txq->write_ptr); in iwl_txq_log_scd_error()
695 jiffies_to_msecs(txq->wd_timeout), in iwl_txq_log_scd_error()
696 txq->read_ptr, txq->write_ptr, in iwl_txq_log_scd_error()
698 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
700 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
706 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); in iwl_txq_stuck_timer() local
707 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer()
709 spin_lock(&txq->lock); in iwl_txq_stuck_timer()
711 if (txq->read_ptr == txq->write_ptr) { in iwl_txq_stuck_timer()
712 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
715 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
717 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
722 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_pcie_txq_alloc() argument
726 size_t num_entries = trans->trans_cfg->gen2 ? in iwl_pcie_txq_alloc()
727 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_pcie_txq_alloc()
732 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) in iwl_pcie_txq_alloc()
733 return -EINVAL; in iwl_pcie_txq_alloc()
735 if (WARN_ON(txq->entries || txq->tfds)) in iwl_pcie_txq_alloc()
736 return -EINVAL; in iwl_pcie_txq_alloc()
738 tfd_sz = trans_pcie->txqs.tfd.size * num_entries; in iwl_pcie_txq_alloc()
740 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); in iwl_pcie_txq_alloc()
741 txq->trans = trans; in iwl_pcie_txq_alloc()
743 txq->n_window = slots_num; in iwl_pcie_txq_alloc()
745 txq->entries = kcalloc(slots_num, in iwl_pcie_txq_alloc()
749 if (!txq->entries) in iwl_pcie_txq_alloc()
754 txq->entries[i].cmd = in iwl_pcie_txq_alloc()
757 if (!txq->entries[i].cmd) in iwl_pcie_txq_alloc()
764 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_pcie_txq_alloc()
765 &txq->dma_addr, GFP_KERNEL); in iwl_pcie_txq_alloc()
766 if (!txq->tfds) in iwl_pcie_txq_alloc()
769 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); in iwl_pcie_txq_alloc()
771 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; in iwl_pcie_txq_alloc()
773 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_pcie_txq_alloc()
774 &txq->first_tb_dma, in iwl_pcie_txq_alloc()
776 if (!txq->first_tb_bufs) in iwl_pcie_txq_alloc()
780 void *tfd = iwl_txq_get_tfd(trans, txq, i); in iwl_pcie_txq_alloc()
782 if (trans->trans_cfg->gen2) in iwl_pcie_txq_alloc()
790 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_pcie_txq_alloc()
791 txq->tfds = NULL; in iwl_pcie_txq_alloc()
793 if (txq->entries && cmd_queue) in iwl_pcie_txq_alloc()
795 kfree(txq->entries[i].cmd); in iwl_pcie_txq_alloc()
796 kfree(txq->entries); in iwl_pcie_txq_alloc()
797 txq->entries = NULL; in iwl_pcie_txq_alloc()
799 return -ENOMEM; in iwl_pcie_txq_alloc()
803 * iwl_pcie_tx_alloc - allocate TX context
811 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_alloc()
813 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) in iwl_pcie_tx_alloc()
814 return -EINVAL; in iwl_pcie_tx_alloc()
820 if (WARN_ON(trans_pcie->txq_memory)) { in iwl_pcie_tx_alloc()
821 ret = -EINVAL; in iwl_pcie_tx_alloc()
825 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls, in iwl_pcie_tx_alloc()
832 /* Alloc keep-warm buffer */ in iwl_pcie_tx_alloc()
833 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); in iwl_pcie_tx_alloc()
839 trans_pcie->txq_memory = in iwl_pcie_tx_alloc()
840 kcalloc(trans->trans_cfg->base_params->num_of_queues, in iwl_pcie_tx_alloc()
842 if (!trans_pcie->txq_memory) { in iwl_pcie_tx_alloc()
843 IWL_ERR(trans, "Not enough memory for txq\n"); in iwl_pcie_tx_alloc()
844 ret = -ENOMEM; in iwl_pcie_tx_alloc()
849 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_alloc()
851 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); in iwl_pcie_tx_alloc()
855 trans->cfg->min_txq_size); in iwl_pcie_tx_alloc()
858 trans->cfg->min_ba_txq_size); in iwl_pcie_tx_alloc()
859 trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; in iwl_pcie_tx_alloc()
860 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], in iwl_pcie_tx_alloc()
866 trans_pcie->txqs.txq[txq_id]->id = txq_id; in iwl_pcie_tx_alloc()
878 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
882 q->n_window = slots_num; in iwl_queue_init()
884 /* slots_num must be power-of-two size, otherwise in iwl_queue_init()
888 return -EINVAL; in iwl_queue_init()
890 q->low_mark = q->n_window / 4; in iwl_queue_init()
891 if (q->low_mark < 4) in iwl_queue_init()
892 q->low_mark = 4; in iwl_queue_init()
894 q->high_mark = q->n_window / 8; in iwl_queue_init()
895 if (q->high_mark < 2) in iwl_queue_init()
896 q->high_mark = 2; in iwl_queue_init()
898 q->write_ptr = 0; in iwl_queue_init()
899 q->read_ptr = 0; in iwl_queue_init()
904 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_init() argument
908 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_init()
911 txq->need_update = false; in iwl_txq_init()
913 /* max_tfd_queue_size must be power-of-two size, otherwise in iwl_txq_init()
916 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), in iwl_txq_init()
919 return -EINVAL; in iwl_txq_init()
921 /* Initialize queue's high/low-water marks, and head/tail indexes */ in iwl_txq_init()
922 ret = iwl_queue_init(txq, slots_num); in iwl_txq_init()
926 spin_lock_init(&txq->lock); in iwl_txq_init()
927 spin_lock_init(&txq->reclaim_lock); in iwl_txq_init()
932 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); in iwl_txq_init()
935 __skb_queue_head_init(&txq->overflow_q); in iwl_txq_init()
947 if (!trans_pcie->txq_memory) { in iwl_pcie_tx_init()
954 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_tx_init()
961 trans_pcie->kw.dma >> 4); in iwl_pcie_tx_init()
963 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_tx_init()
966 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_init()
968 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); in iwl_pcie_tx_init()
972 trans->cfg->min_txq_size); in iwl_pcie_tx_init()
975 trans->cfg->min_ba_txq_size); in iwl_pcie_tx_init()
976 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, in iwl_pcie_tx_init()
990 trans_pcie->txqs.txq[txq_id]->dma_addr >> 8); in iwl_pcie_tx_init()
994 if (trans->trans_cfg->base_params->num_of_queues > 20) in iwl_pcie_tx_init()
1012 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_pcie_set_cmd_in_flight()
1013 return -ENODEV; in iwl_pcie_set_cmd_in_flight()
1015 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) in iwl_pcie_set_cmd_in_flight()
1020 * command - we will let the NIC sleep once all the host commands in iwl_pcie_set_cmd_in_flight()
1025 return -EIO; in iwl_pcie_set_cmd_in_flight()
1032 trans_pcie->cmd_hold_nic_awake = true; in iwl_pcie_set_cmd_in_flight()
1033 spin_unlock(&trans_pcie->reg_lock); in iwl_pcie_set_cmd_in_flight()
1038 static void iwl_txq_progress(struct iwl_txq *txq) in iwl_txq_progress() argument
1040 lockdep_assert_held(&txq->lock); in iwl_txq_progress()
1042 if (!txq->wd_timeout) in iwl_txq_progress()
1046 * station is asleep and we send data - that must in iwl_txq_progress()
1047 * be uAPSD or PS-Poll. Don't rearm the timer. in iwl_txq_progress()
1049 if (txq->frozen) in iwl_txq_progress()
1056 if (txq->read_ptr == txq->write_ptr) in iwl_txq_progress()
1057 del_timer(&txq->stuck_timer); in iwl_txq_progress()
1059 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_txq_progress()
1075 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1084 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_pcie_cmdq_reclaim() local
1088 lockdep_assert_held(&txq->lock); in iwl_pcie_cmdq_reclaim()
1090 idx = iwl_txq_get_cmd_index(txq, idx); in iwl_pcie_cmdq_reclaim()
1091 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_pcie_cmdq_reclaim()
1093 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || in iwl_pcie_cmdq_reclaim()
1094 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { in iwl_pcie_cmdq_reclaim()
1095 WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), in iwl_pcie_cmdq_reclaim()
1096 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", in iwl_pcie_cmdq_reclaim()
1098 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_cmdq_reclaim()
1099 txq->write_ptr, txq->read_ptr); in iwl_pcie_cmdq_reclaim()
1105 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_pcie_cmdq_reclaim()
1109 idx, txq->write_ptr, r); in iwl_pcie_cmdq_reclaim()
1114 if (txq->read_ptr == txq->write_ptr) in iwl_pcie_cmdq_reclaim()
1117 iwl_txq_progress(txq); in iwl_pcie_cmdq_reclaim()
1130 tbl_dw_addr = trans_pcie->scd_base_addr + in iwl_pcie_txq_set_ratid_map()
1154 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_trans_pcie_txq_enable() local
1155 int fifo = -1; in iwl_trans_pcie_txq_enable()
1158 if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used)) in iwl_trans_pcie_txq_enable()
1159 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); in iwl_trans_pcie_txq_enable()
1161 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); in iwl_trans_pcie_txq_enable()
1164 fifo = cfg->fifo; in iwl_trans_pcie_txq_enable()
1167 if (txq_id == trans_pcie->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable()
1168 trans_pcie->scd_set_active) in iwl_trans_pcie_txq_enable()
1174 /* Set this queue as a chain-building queue unless it is CMD */ in iwl_trans_pcie_txq_enable()
1175 if (txq_id != trans_pcie->txqs.cmd.q_id) in iwl_trans_pcie_txq_enable()
1178 if (cfg->aggregate) { in iwl_trans_pcie_txq_enable()
1179 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); in iwl_trans_pcie_txq_enable()
1181 /* Map receiver-address / traffic-ID to this queue */ in iwl_trans_pcie_txq_enable()
1186 txq->ampdu = true; in iwl_trans_pcie_txq_enable()
1191 * since it is now a non-AGG queue. in iwl_trans_pcie_txq_enable()
1195 ssn = txq->read_ptr; in iwl_trans_pcie_txq_enable()
1209 scd_bug = !trans->trans_cfg->mq_rx_supported && in iwl_trans_pcie_txq_enable()
1210 !((ssn - txq->write_ptr) & 0x3f) && in iwl_trans_pcie_txq_enable()
1211 (ssn != txq->write_ptr); in iwl_trans_pcie_txq_enable()
1218 txq->read_ptr = (ssn & 0xff); in iwl_trans_pcie_txq_enable()
1219 txq->write_ptr = (ssn & 0xff); in iwl_trans_pcie_txq_enable()
1224 u8 frame_limit = cfg->frame_limit; in iwl_trans_pcie_txq_enable()
1229 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + in iwl_trans_pcie_txq_enable()
1232 trans_pcie->scd_base_addr + in iwl_trans_pcie_txq_enable()
1240 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | in iwl_trans_pcie_txq_enable()
1245 if (txq_id == trans_pcie->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable()
1246 trans_pcie->scd_set_active) in iwl_trans_pcie_txq_enable()
1265 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_trans_pcie_txq_set_shared_mode() local
1267 txq->ampdu = !shared_mode; in iwl_trans_pcie_txq_set_shared_mode()
1274 u32 stts_addr = trans_pcie->scd_base_addr + in iwl_trans_pcie_txq_disable()
1278 trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0; in iwl_trans_pcie_txq_disable()
1279 trans_pcie->txqs.txq[txq_id]->frozen = false; in iwl_trans_pcie_txq_disable()
1282 * Upon HW Rfkill - we stop the device, and then stop the queues in iwl_trans_pcie_txq_disable()
1287 if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) { in iwl_trans_pcie_txq_disable()
1288 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), in iwl_trans_pcie_txq_disable()
1301 trans_pcie->txqs.txq[txq_id]->ampdu = false; in iwl_trans_pcie_txq_disable()
1313 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_trans_pcie_block_txq_ptrs()
1314 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; in iwl_trans_pcie_block_txq_ptrs() local
1316 if (i == trans_pcie->txqs.cmd.q_id) in iwl_trans_pcie_block_txq_ptrs()
1320 spin_lock_nested(&txq->lock, 1); in iwl_trans_pcie_block_txq_ptrs()
1322 if (!block && !(WARN_ON_ONCE(!txq->block))) { in iwl_trans_pcie_block_txq_ptrs()
1323 txq->block--; in iwl_trans_pcie_block_txq_ptrs()
1324 if (!txq->block) { in iwl_trans_pcie_block_txq_ptrs()
1326 txq->write_ptr | (i << 8)); in iwl_trans_pcie_block_txq_ptrs()
1329 txq->block++; in iwl_trans_pcie_block_txq_ptrs()
1332 spin_unlock(&txq->lock); in iwl_trans_pcie_block_txq_ptrs()
1337 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1349 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; in iwl_pcie_enqueue_hcmd() local
1357 u8 group_id = iwl_cmd_groupid(cmd->id); in iwl_pcie_enqueue_hcmd()
1364 if (WARN(!trans->wide_cmd_header && in iwl_pcie_enqueue_hcmd()
1366 "unsupported wide command %#x\n", cmd->id)) in iwl_pcie_enqueue_hcmd()
1367 return -EINVAL; in iwl_pcie_enqueue_hcmd()
1378 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); in iwl_pcie_enqueue_hcmd()
1381 cmddata[i] = cmd->data[i]; in iwl_pcie_enqueue_hcmd()
1382 cmdlen[i] = cmd->len[i]; in iwl_pcie_enqueue_hcmd()
1384 if (!cmd->len[i]) in iwl_pcie_enqueue_hcmd()
1389 int copy = IWL_FIRST_TB_SIZE - copy_size; in iwl_pcie_enqueue_hcmd()
1393 cmdlen[i] -= copy; in iwl_pcie_enqueue_hcmd()
1398 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { in iwl_pcie_enqueue_hcmd()
1400 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { in iwl_pcie_enqueue_hcmd()
1401 idx = -EINVAL; in iwl_pcie_enqueue_hcmd()
1404 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { in iwl_pcie_enqueue_hcmd()
1413 idx = -EINVAL; in iwl_pcie_enqueue_hcmd()
1420 return -ENOMEM; in iwl_pcie_enqueue_hcmd()
1424 idx = -EINVAL; in iwl_pcie_enqueue_hcmd()
1429 cmd_size += cmd->len[i]; in iwl_pcie_enqueue_hcmd()
1440 iwl_get_cmd_string(trans, cmd->id), in iwl_pcie_enqueue_hcmd()
1441 cmd->id, copy_size)) { in iwl_pcie_enqueue_hcmd()
1442 idx = -EINVAL; in iwl_pcie_enqueue_hcmd()
1446 spin_lock_irqsave(&txq->lock, flags); in iwl_pcie_enqueue_hcmd()
1448 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { in iwl_pcie_enqueue_hcmd()
1449 spin_unlock_irqrestore(&txq->lock, flags); in iwl_pcie_enqueue_hcmd()
1452 iwl_op_mode_cmd_queue_full(trans->op_mode); in iwl_pcie_enqueue_hcmd()
1453 idx = -ENOSPC; in iwl_pcie_enqueue_hcmd()
1457 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_enqueue_hcmd()
1458 out_cmd = txq->entries[idx].cmd; in iwl_pcie_enqueue_hcmd()
1459 out_meta = &txq->entries[idx].meta; in iwl_pcie_enqueue_hcmd()
1461 /* re-initialize, this also marks the SG list as unused */ in iwl_pcie_enqueue_hcmd()
1463 if (cmd->flags & CMD_WANT_SKB) in iwl_pcie_enqueue_hcmd()
1464 out_meta->source = cmd; in iwl_pcie_enqueue_hcmd()
1468 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); in iwl_pcie_enqueue_hcmd()
1469 out_cmd->hdr_wide.group_id = group_id; in iwl_pcie_enqueue_hcmd()
1470 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); in iwl_pcie_enqueue_hcmd()
1471 out_cmd->hdr_wide.length = in iwl_pcie_enqueue_hcmd()
1472 cpu_to_le16(cmd_size - in iwl_pcie_enqueue_hcmd()
1474 out_cmd->hdr_wide.reserved = 0; in iwl_pcie_enqueue_hcmd()
1475 out_cmd->hdr_wide.sequence = in iwl_pcie_enqueue_hcmd()
1476 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | in iwl_pcie_enqueue_hcmd()
1477 INDEX_TO_SEQ(txq->write_ptr)); in iwl_pcie_enqueue_hcmd()
1482 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); in iwl_pcie_enqueue_hcmd()
1483 out_cmd->hdr.sequence = in iwl_pcie_enqueue_hcmd()
1484 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | in iwl_pcie_enqueue_hcmd()
1485 INDEX_TO_SEQ(txq->write_ptr)); in iwl_pcie_enqueue_hcmd()
1486 out_cmd->hdr.group_id = 0; in iwl_pcie_enqueue_hcmd()
1496 if (!cmd->len[i]) in iwl_pcie_enqueue_hcmd()
1500 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | in iwl_pcie_enqueue_hcmd()
1502 copy = cmd->len[i]; in iwl_pcie_enqueue_hcmd()
1504 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); in iwl_pcie_enqueue_hcmd()
1512 * in total (for bi-directional DMA), but copy up to what in iwl_pcie_enqueue_hcmd()
1515 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); in iwl_pcie_enqueue_hcmd()
1517 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); in iwl_pcie_enqueue_hcmd()
1522 copy = IWL_FIRST_TB_SIZE - copy_size; in iwl_pcie_enqueue_hcmd()
1524 if (copy > cmd->len[i]) in iwl_pcie_enqueue_hcmd()
1525 copy = cmd->len[i]; in iwl_pcie_enqueue_hcmd()
1532 iwl_get_cmd_string(trans, cmd->id), in iwl_pcie_enqueue_hcmd()
1533 group_id, out_cmd->hdr.cmd, in iwl_pcie_enqueue_hcmd()
1534 le16_to_cpu(out_cmd->hdr.sequence), in iwl_pcie_enqueue_hcmd()
1535 cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); in iwl_pcie_enqueue_hcmd()
1539 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); in iwl_pcie_enqueue_hcmd()
1540 iwl_pcie_txq_build_tfd(trans, txq, in iwl_pcie_enqueue_hcmd()
1541 iwl_txq_get_first_tb_dma(txq, idx), in iwl_pcie_enqueue_hcmd()
1546 phys_addr = dma_map_single(trans->dev, in iwl_pcie_enqueue_hcmd()
1547 ((u8 *)&out_cmd->hdr) + tb0_size, in iwl_pcie_enqueue_hcmd()
1548 copy_size - tb0_size, in iwl_pcie_enqueue_hcmd()
1550 if (dma_mapping_error(trans->dev, phys_addr)) { in iwl_pcie_enqueue_hcmd()
1551 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, in iwl_pcie_enqueue_hcmd()
1552 txq->write_ptr); in iwl_pcie_enqueue_hcmd()
1553 idx = -ENOMEM; in iwl_pcie_enqueue_hcmd()
1557 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, in iwl_pcie_enqueue_hcmd()
1558 copy_size - tb0_size, false); in iwl_pcie_enqueue_hcmd()
1567 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | in iwl_pcie_enqueue_hcmd()
1570 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) in iwl_pcie_enqueue_hcmd()
1572 phys_addr = dma_map_single(trans->dev, data, in iwl_pcie_enqueue_hcmd()
1574 if (dma_mapping_error(trans->dev, phys_addr)) { in iwl_pcie_enqueue_hcmd()
1575 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, in iwl_pcie_enqueue_hcmd()
1576 txq->write_ptr); in iwl_pcie_enqueue_hcmd()
1577 idx = -ENOMEM; in iwl_pcie_enqueue_hcmd()
1581 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); in iwl_pcie_enqueue_hcmd()
1584 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); in iwl_pcie_enqueue_hcmd()
1585 out_meta->flags = cmd->flags; in iwl_pcie_enqueue_hcmd()
1586 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) in iwl_pcie_enqueue_hcmd()
1587 kfree_sensitive(txq->entries[idx].free_buf); in iwl_pcie_enqueue_hcmd()
1588 txq->entries[idx].free_buf = dup_buf; in iwl_pcie_enqueue_hcmd()
1590 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); in iwl_pcie_enqueue_hcmd()
1593 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) in iwl_pcie_enqueue_hcmd()
1594 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_pcie_enqueue_hcmd()
1602 if (cmd->flags & CMD_BLOCK_TXQS) in iwl_pcie_enqueue_hcmd()
1606 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_pcie_enqueue_hcmd()
1607 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_pcie_enqueue_hcmd()
1610 spin_unlock_irqrestore(&txq->lock, flags); in iwl_pcie_enqueue_hcmd()
1618 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1625 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_pcie_hcmd_complete()
1634 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; in iwl_pcie_hcmd_complete() local
1639 if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, in iwl_pcie_hcmd_complete()
1641 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, in iwl_pcie_hcmd_complete()
1642 txq->write_ptr)) { in iwl_pcie_hcmd_complete()
1647 spin_lock_bh(&txq->lock); in iwl_pcie_hcmd_complete()
1649 cmd_index = iwl_txq_get_cmd_index(txq, index); in iwl_pcie_hcmd_complete()
1650 cmd = txq->entries[cmd_index].cmd; in iwl_pcie_hcmd_complete()
1651 meta = &txq->entries[cmd_index].meta; in iwl_pcie_hcmd_complete()
1652 group_id = cmd->hdr.group_id; in iwl_pcie_hcmd_complete()
1653 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); in iwl_pcie_hcmd_complete()
1655 if (trans->trans_cfg->gen2) in iwl_pcie_hcmd_complete()
1657 iwl_txq_get_tfd(trans, txq, index)); in iwl_pcie_hcmd_complete()
1659 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); in iwl_pcie_hcmd_complete()
1662 if (meta->flags & CMD_WANT_SKB) { in iwl_pcie_hcmd_complete()
1665 meta->source->resp_pkt = pkt; in iwl_pcie_hcmd_complete()
1666 meta->source->_rx_page_addr = (unsigned long)page_address(p); in iwl_pcie_hcmd_complete()
1667 meta->source->_rx_page_order = trans_pcie->rx_page_order; in iwl_pcie_hcmd_complete()
1670 if (meta->flags & CMD_BLOCK_TXQS) in iwl_pcie_hcmd_complete()
1675 if (!(meta->flags & CMD_ASYNC)) { in iwl_pcie_hcmd_complete()
1676 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { in iwl_pcie_hcmd_complete()
1681 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_pcie_hcmd_complete()
1684 wake_up(&trans->wait_command_queue); in iwl_pcie_hcmd_complete()
1687 meta->flags = 0; in iwl_pcie_hcmd_complete()
1689 spin_unlock_bh(&txq->lock); in iwl_pcie_hcmd_complete()
1693 struct iwl_txq *txq, u8 hdr_len, in iwl_fill_data_tbs() argument
1703 head_tb_len = skb_headlen(skb) - hdr_len; in iwl_fill_data_tbs()
1706 dma_addr_t tb_phys = dma_map_single(trans->dev, in iwl_fill_data_tbs()
1707 skb->data + hdr_len, in iwl_fill_data_tbs()
1709 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_fill_data_tbs()
1710 return -EINVAL; in iwl_fill_data_tbs()
1711 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, in iwl_fill_data_tbs()
1713 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); in iwl_fill_data_tbs()
1717 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in iwl_fill_data_tbs()
1718 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in iwl_fill_data_tbs()
1725 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, in iwl_fill_data_tbs()
1728 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_fill_data_tbs()
1729 return -EINVAL; in iwl_fill_data_tbs()
1730 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), in iwl_fill_data_tbs()
1732 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, in iwl_fill_data_tbs()
1737 out_meta->tbs |= BIT(tb_idx); in iwl_fill_data_tbs()
1748 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page); in iwl_pcie_get_page_hdr()
1754 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); in iwl_pcie_get_page_hdr()
1759 if (!p->page) in iwl_pcie_get_page_hdr()
1766 * page - we need it somewhere, and if it's there then we in iwl_pcie_get_page_hdr()
1768 * trigger the 32-bit boundary hardware bug. in iwl_pcie_get_page_hdr()
1770 * (see also get_workaround_page() in tx-gen2.c) in iwl_pcie_get_page_hdr()
1772 if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) { in iwl_pcie_get_page_hdr()
1773 info = IWL_TSO_PAGE_INFO(page_address(p->page)); in iwl_pcie_get_page_hdr()
1778 iwl_pcie_free_and_unmap_tso_page(trans, p->page); in iwl_pcie_get_page_hdr()
1781 p->page = alloc_page(GFP_ATOMIC); in iwl_pcie_get_page_hdr()
1782 if (!p->page) in iwl_pcie_get_page_hdr()
1784 p->pos = page_address(p->page); in iwl_pcie_get_page_hdr()
1786 info = IWL_TSO_PAGE_INFO(page_address(p->page)); in iwl_pcie_get_page_hdr()
1789 info->next = NULL; in iwl_pcie_get_page_hdr()
1792 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, in iwl_pcie_get_page_hdr()
1794 if (unlikely(dma_mapping_error(trans->dev, phys))) { in iwl_pcie_get_page_hdr()
1795 __free_page(p->page); in iwl_pcie_get_page_hdr()
1796 p->page = NULL; in iwl_pcie_get_page_hdr()
1802 info->dma_addr = phys; in iwl_pcie_get_page_hdr()
1803 refcount_set(&info->use_count, 1); in iwl_pcie_get_page_hdr()
1805 *page_ptr = p->page; in iwl_pcie_get_page_hdr()
1807 refcount_inc(&info->use_count); in iwl_pcie_get_page_hdr()
1808 ret = p->pos; in iwl_pcie_get_page_hdr()
1809 p->pos += len; in iwl_pcie_get_page_hdr()
1815 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
1839 return sg_dma_address(sg) + offset - sg_offset; in iwl_pcie_get_sgt_tb_phys()
1850 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
1875 (skb_shinfo(skb)->nr_frags + 1) * in iwl_pcie_prep_tso()
1882 sgt->sgl = (void *)(sgt + 1); in iwl_pcie_prep_tso()
1884 sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); in iwl_pcie_prep_tso()
1887 sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), in iwl_pcie_prep_tso()
1888 skb->data_len); in iwl_pcie_prep_tso()
1889 if (WARN_ON_ONCE(sgt->orig_nents <= 0)) in iwl_pcie_prep_tso()
1893 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) in iwl_pcie_prep_tso()
1896 /* Store non-zero (i.e. valid) offset for unmapping */ in iwl_pcie_prep_tso()
1897 cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK; in iwl_pcie_prep_tso()
1903 struct iwl_txq *txq, u8 hdr_len, in iwl_fill_data_tbs_amsdu() argument
1909 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_fill_data_tbs_amsdu()
1910 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_fill_data_tbs_amsdu()
1912 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_fill_data_tbs_amsdu()
1922 iv_len = ieee80211_has_protected(hdr->frame_control) ? in iwl_fill_data_tbs_amsdu()
1925 trace_iwlwifi_dev_tx(trans->dev, skb, in iwl_fill_data_tbs_amsdu()
1926 iwl_txq_get_tfd(trans, txq, txq->write_ptr), in iwl_fill_data_tbs_amsdu()
1927 trans_pcie->txqs.tfd.size, in iwl_fill_data_tbs_amsdu()
1928 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); in iwl_fill_data_tbs_amsdu()
1932 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; in iwl_fill_data_tbs_amsdu()
1935 /* total amount of header we may need for this A-MSDU */ in iwl_fill_data_tbs_amsdu()
1942 return -ENOMEM; in iwl_fill_data_tbs_amsdu()
1946 memcpy(pos_hdr, skb->data + hdr_len, iv_len); in iwl_fill_data_tbs_amsdu()
1958 * all the different MSDUs inside the A-MSDU. in iwl_fill_data_tbs_amsdu()
1960 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); in iwl_fill_data_tbs_amsdu()
1972 total_len -= data_left; in iwl_fill_data_tbs_amsdu()
1976 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + in iwl_fill_data_tbs_amsdu()
1995 hdr_tb_len = pos_hdr - start_hdr; in iwl_fill_data_tbs_amsdu()
1998 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, in iwl_fill_data_tbs_amsdu()
2000 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, in iwl_fill_data_tbs_amsdu()
2003 le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); in iwl_fill_data_tbs_amsdu()
2017 return -EINVAL; in iwl_fill_data_tbs_amsdu()
2019 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, in iwl_fill_data_tbs_amsdu()
2021 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, in iwl_fill_data_tbs_amsdu()
2024 data_left -= size; in iwl_fill_data_tbs_amsdu()
2030 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, in iwl_fill_data_tbs_amsdu()
2033 /* re -add the WiFi header and IV */ in iwl_fill_data_tbs_amsdu()
2040 struct iwl_txq *txq, u8 hdr_len, in iwl_fill_data_tbs_amsdu() argument
2045 /* No A-MSDU without CONFIG_INET */ in iwl_fill_data_tbs_amsdu()
2048 return -1; in iwl_fill_data_tbs_amsdu()
2056 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2059 struct iwl_txq *txq, u16 byte_cnt, in iwl_txq_gen1_update_byte_cnt_tbl() argument
2064 int write_ptr = txq->write_ptr; in iwl_txq_gen1_update_byte_cnt_tbl()
2065 int txq_id = txq->id; in iwl_txq_gen1_update_byte_cnt_tbl()
2069 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; in iwl_txq_gen1_update_byte_cnt_tbl()
2070 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_txq_gen1_update_byte_cnt_tbl()
2071 u8 sta_id = tx_cmd->sta_id; in iwl_txq_gen1_update_byte_cnt_tbl()
2073 scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_update_byte_cnt_tbl()
2075 sec_ctl = tx_cmd->sec_ctl; in iwl_txq_gen1_update_byte_cnt_tbl()
2088 if (trans_pcie->txqs.bc_table_dword) in iwl_txq_gen1_update_byte_cnt_tbl()
2108 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; in iwl_trans_pcie_tx()
2110 struct iwl_txq *txq; in iwl_trans_pcie_tx() local
2121 txq = trans_pcie->txqs.txq[txq_id]; in iwl_trans_pcie_tx()
2123 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), in iwl_trans_pcie_tx()
2125 return -EINVAL; in iwl_trans_pcie_tx()
2128 skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && in iwl_trans_pcie_tx()
2130 return -ENOMEM; in iwl_trans_pcie_tx()
2135 hdr = (struct ieee80211_hdr *)skb->data; in iwl_trans_pcie_tx()
2136 fc = hdr->frame_control; in iwl_trans_pcie_tx()
2139 spin_lock(&txq->lock); in iwl_trans_pcie_tx()
2141 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_trans_pcie_tx()
2142 iwl_txq_stop(trans, txq); in iwl_trans_pcie_tx()
2145 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_trans_pcie_tx()
2148 dev_cmd_ptr = (void *)((u8 *)skb->cb + in iwl_trans_pcie_tx()
2149 trans_pcie->txqs.dev_cmd_offs); in iwl_trans_pcie_tx()
2152 __skb_queue_tail(&txq->overflow_q, skb); in iwl_trans_pcie_tx()
2154 spin_unlock(&txq->lock); in iwl_trans_pcie_tx()
2164 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); in iwl_trans_pcie_tx()
2165 WARN_ONCE(txq->ampdu && in iwl_trans_pcie_tx()
2166 (wifi_seq & 0xff) != txq->write_ptr, in iwl_trans_pcie_tx()
2168 txq_id, wifi_seq, txq->write_ptr); in iwl_trans_pcie_tx()
2171 txq->entries[txq->write_ptr].skb = skb; in iwl_trans_pcie_tx()
2172 txq->entries[txq->write_ptr].cmd = dev_cmd; in iwl_trans_pcie_tx()
2174 dev_cmd->hdr.sequence = in iwl_trans_pcie_tx()
2176 INDEX_TO_SEQ(txq->write_ptr))); in iwl_trans_pcie_tx()
2178 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); in iwl_trans_pcie_tx()
2182 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); in iwl_trans_pcie_tx()
2183 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); in iwl_trans_pcie_tx()
2186 out_meta = &txq->entries[txq->write_ptr].meta; in iwl_trans_pcie_tx()
2191 * and the 802.11 header - dword aligned size in iwl_trans_pcie_tx()
2196 hdr_len - IWL_FIRST_TB_SIZE; in iwl_trans_pcie_tx()
2197 /* do not align A-MSDU to dword as the subframe header aligns it */ in iwl_trans_pcie_tx()
2203 /* Tell NIC about any 2-byte padding after MAC header */ in iwl_trans_pcie_tx()
2205 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); in iwl_trans_pcie_tx()
2211 * The first TB points to bi-directional DMA data, we'll in iwl_trans_pcie_tx()
2214 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, in iwl_trans_pcie_tx()
2224 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; in iwl_trans_pcie_tx()
2225 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); in iwl_trans_pcie_tx()
2226 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) in iwl_trans_pcie_tx()
2228 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); in iwl_trans_pcie_tx()
2230 trace_iwlwifi_dev_tx(trans->dev, skb, in iwl_trans_pcie_tx()
2231 iwl_txq_get_tfd(trans, txq, txq->write_ptr), in iwl_trans_pcie_tx()
2232 trans_pcie->txqs.tfd.size, in iwl_trans_pcie_tx()
2233 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, in iwl_trans_pcie_tx()
2240 * pre-built, and we just need to send the resulting skb. in iwl_trans_pcie_tx()
2242 if (amsdu && skb_shinfo(skb)->gso_size) { in iwl_trans_pcie_tx()
2243 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, in iwl_trans_pcie_tx()
2250 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, in iwl_trans_pcie_tx()
2255 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, in iwl_trans_pcie_tx()
2261 /* building the A-MSDU might have changed this data, so memcpy it now */ in iwl_trans_pcie_tx()
2262 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_trans_pcie_tx()
2264 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); in iwl_trans_pcie_tx()
2265 /* Set up entry for this TFD in Tx byte-count array */ in iwl_trans_pcie_tx()
2266 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), in iwl_trans_pcie_tx()
2272 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { in iwl_trans_pcie_tx()
2274 * If the TXQ is active, then set the timer, if not, in iwl_trans_pcie_tx()
2279 if (!txq->frozen) in iwl_trans_pcie_tx()
2280 mod_timer(&txq->stuck_timer, in iwl_trans_pcie_tx()
2281 jiffies + txq->wd_timeout); in iwl_trans_pcie_tx()
2283 txq->frozen_expiry_remainder = txq->wd_timeout; in iwl_trans_pcie_tx()
2287 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_trans_pcie_tx()
2289 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_trans_pcie_tx()
2295 spin_unlock(&txq->lock); in iwl_trans_pcie_tx()
2298 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); in iwl_trans_pcie_tx()
2299 spin_unlock(&txq->lock); in iwl_trans_pcie_tx()
2300 return -1; in iwl_trans_pcie_tx()
2304 struct iwl_txq *txq, in iwl_txq_gen1_inval_byte_cnt_tbl() argument
2308 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_inval_byte_cnt_tbl()
2309 int txq_id = txq->id; in iwl_txq_gen1_inval_byte_cnt_tbl()
2312 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; in iwl_txq_gen1_inval_byte_cnt_tbl()
2313 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_txq_gen1_inval_byte_cnt_tbl()
2317 if (txq_id != trans_pcie->txqs.cmd.q_id) in iwl_txq_gen1_inval_byte_cnt_tbl()
2318 sta_id = tx_cmd->sta_id; in iwl_txq_gen1_inval_byte_cnt_tbl()
2334 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_pcie_reclaim() local
2339 if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) in iwl_pcie_reclaim()
2342 if (WARN_ON(!txq)) in iwl_pcie_reclaim()
2345 tfd_num = iwl_txq_get_cmd_index(txq, ssn); in iwl_pcie_reclaim()
2347 spin_lock_bh(&txq->reclaim_lock); in iwl_pcie_reclaim()
2349 spin_lock(&txq->lock); in iwl_pcie_reclaim()
2350 txq_read_ptr = txq->read_ptr; in iwl_pcie_reclaim()
2351 txq_write_ptr = txq->write_ptr; in iwl_pcie_reclaim()
2352 spin_unlock(&txq->lock); in iwl_pcie_reclaim()
2354 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr); in iwl_pcie_reclaim()
2356 if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { in iwl_pcie_reclaim()
2357 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", in iwl_pcie_reclaim()
2365 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", in iwl_pcie_reclaim()
2373 if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) { in iwl_pcie_reclaim()
2375 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", in iwl_pcie_reclaim()
2377 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_reclaim()
2380 iwl_op_mode_time_point(trans->op_mode, in iwl_pcie_reclaim()
2392 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) { in iwl_pcie_reclaim()
2393 struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta; in iwl_pcie_reclaim()
2394 struct sk_buff *skb = txq->entries[read_ptr].skb; in iwl_pcie_reclaim()
2404 txq->entries[read_ptr].skb = NULL; in iwl_pcie_reclaim()
2406 if (!trans->trans_cfg->gen2) in iwl_pcie_reclaim()
2407 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, in iwl_pcie_reclaim()
2410 iwl_txq_free_tfd(trans, txq, txq_read_ptr); in iwl_pcie_reclaim()
2413 spin_lock(&txq->lock); in iwl_pcie_reclaim()
2414 txq->read_ptr = txq_read_ptr; in iwl_pcie_reclaim()
2416 iwl_txq_progress(txq); in iwl_pcie_reclaim()
2418 if (iwl_txq_space(trans, txq) > txq->low_mark && in iwl_pcie_reclaim()
2419 test_bit(txq_id, trans_pcie->txqs.queue_stopped)) { in iwl_pcie_reclaim()
2424 skb_queue_splice_init(&txq->overflow_q, in iwl_pcie_reclaim()
2431 * the state of &txq->overflow_q, as we just emptied it, but in iwl_pcie_reclaim()
2434 txq->overflow_tx = true; in iwl_pcie_reclaim()
2438 * reclaim_lock, so noone will try to access the txq data in iwl_pcie_reclaim()
2440 * Bottom line, we can unlock and re-lock later. in iwl_pcie_reclaim()
2442 spin_unlock(&txq->lock); in iwl_pcie_reclaim()
2447 dev_cmd_ptr = *(void **)((u8 *)skb->cb + in iwl_pcie_reclaim()
2448 trans_pcie->txqs.dev_cmd_offs); in iwl_pcie_reclaim()
2458 if (iwl_txq_space(trans, txq) > txq->low_mark) in iwl_pcie_reclaim()
2459 iwl_trans_pcie_wake_queue(trans, txq); in iwl_pcie_reclaim()
2461 spin_lock(&txq->lock); in iwl_pcie_reclaim()
2462 txq->overflow_tx = false; in iwl_pcie_reclaim()
2465 spin_unlock(&txq->lock); in iwl_pcie_reclaim()
2467 spin_unlock_bh(&txq->reclaim_lock); in iwl_pcie_reclaim()
2470 /* Set wr_ptr of specific device and txq */
2474 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; in iwl_pcie_set_q_ptrs() local
2476 spin_lock_bh(&txq->lock); in iwl_pcie_set_q_ptrs()
2478 txq->write_ptr = ptr; in iwl_pcie_set_q_ptrs()
2479 txq->read_ptr = txq->write_ptr; in iwl_pcie_set_q_ptrs()
2481 spin_unlock_bh(&txq->lock); in iwl_pcie_set_q_ptrs()
2491 struct iwl_txq *txq = trans_pcie->txqs.txq[queue]; in iwl_pcie_freeze_txq_timer() local
2494 spin_lock_bh(&txq->lock); in iwl_pcie_freeze_txq_timer()
2498 if (txq->frozen == freeze) in iwl_pcie_freeze_txq_timer()
2501 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", in iwl_pcie_freeze_txq_timer()
2504 txq->frozen = freeze; in iwl_pcie_freeze_txq_timer()
2506 if (txq->read_ptr == txq->write_ptr) in iwl_pcie_freeze_txq_timer()
2511 txq->stuck_timer.expires))) { in iwl_pcie_freeze_txq_timer()
2519 txq->frozen_expiry_remainder = in iwl_pcie_freeze_txq_timer()
2520 txq->stuck_timer.expires - now; in iwl_pcie_freeze_txq_timer()
2521 del_timer(&txq->stuck_timer); in iwl_pcie_freeze_txq_timer()
2526 * Wake a non-empty queue -> arm timer with the in iwl_pcie_freeze_txq_timer()
2529 mod_timer(&txq->stuck_timer, in iwl_pcie_freeze_txq_timer()
2530 now + txq->frozen_expiry_remainder); in iwl_pcie_freeze_txq_timer()
2533 spin_unlock_bh(&txq->lock); in iwl_pcie_freeze_txq_timer()
2543 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); in iwl_trans_pcie_send_hcmd_sync()
2544 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; in iwl_trans_pcie_send_hcmd_sync() local
2551 &trans->status), in iwl_trans_pcie_send_hcmd_sync()
2553 return -EIO; in iwl_trans_pcie_send_hcmd_sync()
2557 if (trans->trans_cfg->gen2) in iwl_trans_pcie_send_hcmd_sync()
2564 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_pcie_send_hcmd_sync()
2570 ret = wait_event_timeout(trans->wait_command_queue, in iwl_trans_pcie_send_hcmd_sync()
2572 &trans->status), in iwl_trans_pcie_send_hcmd_sync()
2579 txq->read_ptr, txq->write_ptr); in iwl_trans_pcie_send_hcmd_sync()
2581 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_pcie_send_hcmd_sync()
2584 ret = -ETIMEDOUT; in iwl_trans_pcie_send_hcmd_sync()
2590 if (test_bit(STATUS_FW_ERROR, &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2592 &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2596 ret = -EIO; in iwl_trans_pcie_send_hcmd_sync()
2600 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && in iwl_trans_pcie_send_hcmd_sync()
2601 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2603 ret = -ERFKILL; in iwl_trans_pcie_send_hcmd_sync()
2607 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { in iwl_trans_pcie_send_hcmd_sync()
2609 ret = -EIO; in iwl_trans_pcie_send_hcmd_sync()
2616 if (cmd->flags & CMD_WANT_SKB) { in iwl_trans_pcie_send_hcmd_sync()
2621 * address (cmd->meta.source). in iwl_trans_pcie_send_hcmd_sync()
2623 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; in iwl_trans_pcie_send_hcmd_sync()
2626 if (cmd->resp_pkt) { in iwl_trans_pcie_send_hcmd_sync()
2628 cmd->resp_pkt = NULL; in iwl_trans_pcie_send_hcmd_sync()
2638 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_pcie_send_hcmd()
2639 return -ENODEV; in iwl_trans_pcie_send_hcmd()
2641 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && in iwl_trans_pcie_send_hcmd()
2642 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_pcie_send_hcmd()
2644 cmd->id); in iwl_trans_pcie_send_hcmd()
2645 return -ERFKILL; in iwl_trans_pcie_send_hcmd()
2648 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && in iwl_trans_pcie_send_hcmd()
2649 !(cmd->flags & CMD_SEND_IN_D3))) { in iwl_trans_pcie_send_hcmd()
2650 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); in iwl_trans_pcie_send_hcmd()
2651 return -EHOSTDOWN; in iwl_trans_pcie_send_hcmd()
2654 if (cmd->flags & CMD_ASYNC) { in iwl_trans_pcie_send_hcmd()
2658 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) in iwl_trans_pcie_send_hcmd()
2659 return -EINVAL; in iwl_trans_pcie_send_hcmd()
2661 if (trans->trans_cfg->gen2) in iwl_trans_pcie_send_hcmd()
2669 iwl_get_cmd_string(trans, cmd->id), ret); in iwl_trans_pcie_send_hcmd()