Lines Matching +full:msi +full:- +full:base +full:- +full:vec

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
25 #include "iwl-context-info.h"
46 * @invalid: rxb is in driver ownership - not owned by HW
76 * struct iwl_rx_transfer_desc - transfer descriptor
90 * struct iwl_rx_completion_desc - completion descriptor
104 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
116 * struct iwl_rxq - Rx queue
119 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
128 * @free_count: Number of pre-allocated buffers in rx_free
136 * @lock: per-queue lock
137 * @queue: actual rx queue. Not used for multi-rx queue.
168 * struct iwl_rb_allocator - Rx allocator
190 * iwl_get_closed_rb_stts - get closed rb stts from different structs
197 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_get_closed_rb_stts()
198 __le16 *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
202 struct iwl_rb_status *rb_stts = rxq->rb_stts; in iwl_get_closed_rb_stts()
204 return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; in iwl_get_closed_rb_stts()
210 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
226 * enum iwl_shared_irq_flags - level of sharing for irq
236 * enum iwl_image_response_code - image response values
275 * enum iwl_pcie_imr_status - imr dma transfer state
289 * struct iwl_pcie_txqs - TX queues data
292 * @page_offs: offset from skb->cb to mac header page pointer
293 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
301 * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
341 * struct iwl_trans_pcie - PCIe transport specific data
357 * @scd_base_addr: scheduler sram base address in SRAM
362 * @pci_dev: basic pci-network driver stuff
366 * @cmd_queue - command queue number
376 * @msix_entries: array of MSI-X entries
377 * @msix_enabled: true if managed to enable MSI-X
387 * @base_rb_stts: base virtual address of receive buffer status for all queues
388 * @base_rb_stts_dma: base physical address of receive buffer status
401 * @inta_mask: interrupt (INT-A) mask
413 * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
526 return (void *)trans->trans_specific; in IWL_TRANS_GET_PCIE_TRANS()
535 * re-enabled by clearing this bit. This register is defined as in iwl_pcie_clear_irq()
584 * ICT - interrupt handling
599 #define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
608 * that no TB referencing this page can trigger the 32-bit boundary hardware
617 #define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
655 res = IWL_TSO_PAGE_INFO(addr)->dma_addr; in iwl_pcie_get_tso_page_phys()
664 return txq->first_tb_dma + in iwl_txq_get_first_tb_dma()
670 return index & (q->n_window - 1); in iwl_txq_get_cmd_index()
678 if (trans->trans_cfg->gen2) in iwl_txq_get_tfd()
681 return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; in iwl_txq_get_tfd()
685 * We need this inline in case dma_addr_t is only 32-bits - since the
686 * hardware is always 64-bit, the issue can still occur in that case,
687 * so use u64 for 'phys' here to force the addition in 64-bit.
700 if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) { in iwl_txq_stop()
701 iwl_op_mode_queue_full(trans->op_mode, txq->id); in iwl_txq_stop()
702 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); in iwl_txq_stop()
705 txq->id); in iwl_txq_stop()
710 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
717 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_inc_wrap()
721 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
727 return --index & in iwl_txq_dec_wrap()
728 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_dec_wrap()
738 if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) { in iwl_trans_pcie_wake_queue()
739 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); in iwl_trans_pcie_wake_queue()
740 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); in iwl_trans_pcie_wake_queue()
751 tfd->num_tbs = 0; in iwl_txq_set_tfd_invalid_gen2()
753 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen2()
754 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen2()
781 if (trans->trans_cfg->gen2) { in iwl_txq_gen1_tfd_tb_get_len()
783 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_len()
785 return le16_to_cpu(tfh_tb->tb_len); in iwl_txq_gen1_tfd_tb_get_len()
789 tb = &tfd->tbs[idx]; in iwl_txq_gen1_tfd_tb_get_len()
791 return le16_to_cpu(tb->hi_n_len) >> 4; in iwl_txq_gen1_tfd_tb_get_len()
814 clear_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_disable_interrupts()
815 if (!trans_pcie->msix_enabled) { in _iwl_disable_interrupts()
826 trans_pcie->fh_init_mask); in _iwl_disable_interrupts()
828 trans_pcie->hw_init_mask); in _iwl_disable_interrupts()
838 while (start < fw->num_sec && in iwl_pcie_get_num_sections()
839 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && in iwl_pcie_get_num_sections()
840 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { in iwl_pcie_get_num_sections()
850 struct iwl_self_init_dram *dram = &trans->init_dram; in iwl_pcie_ctxt_info_free_fw_img()
853 if (!dram->fw) { in iwl_pcie_ctxt_info_free_fw_img()
854 WARN_ON(dram->fw_cnt); in iwl_pcie_ctxt_info_free_fw_img()
858 for (i = 0; i < dram->fw_cnt; i++) in iwl_pcie_ctxt_info_free_fw_img()
859 dma_free_coherent(trans->dev, dram->fw[i].size, in iwl_pcie_ctxt_info_free_fw_img()
860 dram->fw[i].block, dram->fw[i].physical); in iwl_pcie_ctxt_info_free_fw_img()
862 kfree(dram->fw); in iwl_pcie_ctxt_info_free_fw_img()
863 dram->fw_cnt = 0; in iwl_pcie_ctxt_info_free_fw_img()
864 dram->fw = NULL; in iwl_pcie_ctxt_info_free_fw_img()
871 spin_lock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
873 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_disable_interrupts()
881 set_bit(STATUS_INT_ENABLED, &trans->status); in _iwl_enable_interrupts()
882 if (!trans_pcie->msix_enabled) { in _iwl_enable_interrupts()
883 trans_pcie->inta_mask = CSR_INI_SET_MASK; in _iwl_enable_interrupts()
884 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in _iwl_enable_interrupts()
888 * Unlike msi, in msix cause is enabled when it is unset. in _iwl_enable_interrupts()
890 trans_pcie->hw_mask = trans_pcie->hw_init_mask; in _iwl_enable_interrupts()
891 trans_pcie->fh_mask = trans_pcie->fh_init_mask; in _iwl_enable_interrupts()
893 ~trans_pcie->fh_mask); in _iwl_enable_interrupts()
895 ~trans_pcie->hw_mask); in _iwl_enable_interrupts()
903 spin_lock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
905 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_enable_interrupts()
912 trans_pcie->hw_mask = msk; in iwl_enable_hw_int_msk_msix()
920 trans_pcie->fh_mask = msk; in iwl_enable_fh_int_msk_msix()
928 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int()
929 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; in iwl_enable_fw_load_int()
930 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int()
933 trans_pcie->hw_init_mask); in iwl_enable_fw_load_int()
945 if (!trans_pcie->msix_enabled) { in iwl_enable_fw_load_int_ctx_info()
953 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; in iwl_enable_fw_load_int_ctx_info()
954 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_fw_load_int_ctx_info()
962 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); in iwl_enable_fw_load_int_ctx_info()
969 if (trans_p->shared_vec_mask) { in queue_name()
970 int vec = trans_p->shared_vec_mask & in queue_name() local
977 DRV_NAME ":queue_%d", i + vec); in queue_name()
982 if (i == trans_p->alloc_vecs - 1) in queue_name()
994 if (!trans_pcie->msix_enabled) { in iwl_enable_rfkill_int()
995 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; in iwl_enable_rfkill_int()
996 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); in iwl_enable_rfkill_int()
999 trans_pcie->fh_init_mask); in iwl_enable_rfkill_int()
1004 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { in iwl_enable_rfkill_int()
1006 * On 9000-series devices this bit isn't enabled by default, so in iwl_enable_rfkill_int()
1008 * to wake up the PCI-E bus for RF-kill interrupts. in iwl_enable_rfkill_int()
1021 lockdep_assert_held(&trans_pcie->mutex); in iwl_is_rfkill_set()
1023 if (trans_pcie->debug_rfkill == 1) in iwl_is_rfkill_set()
1059 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); in iwl_pcie_dbg_on()