Lines Matching +full:queue +full:- +full:pkt +full:- +full:rx
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
11 #include "iwl-prph.h"
12 #include "iwl-io.h"
14 #include "iwl-op-mode.h"
15 #include "iwl-context-info-gen3.h"
19 * RX path functions
24 * Rx theory of operation
28 * used not only for Rx frames, but for any command response or notification
29 * from the NIC. The driver and NIC manage the Rx buffers by means
32 * Rx Queue Indexes
33 * The host/firmware share two index registers for managing the Rx buffers.
36 * to -- the driver can read up to (but not including) this position and get
40 * The WRITE index maps to the last position the driver has read from -- the
43 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
46 * During initialization, the host sets up the READ queue position to the first
47 * INDEX position, and WRITE to the last (READ - 1 wrapped)
50 * and fire the RX interrupt. The driver can then query the READ index and
52 * resets the Rx queue buffers with new memory.
55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
57 * The page is either stolen - transferred to the upper layer
58 * or reused - added immediately to the iwl->rxq->rx_free list.
59 * + When the page is stolen - the driver updates the matching queue's used
60 * count, detaches the RBD and transfers it to the queue used list.
61 * When there are two used RBDs - they are transferred to the allocator empty
64 * When there are another 6 used RBDs - they are transferred to the allocator
65 * empty list and the driver tries to claim the pre-allocated buffers and
66 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
68 * When there are 8+ buffers in the free list - either from allocation or from
69 * 8 reused unstolen pages - restock is called to update the FW and indexes.
71 * the allocator has initial pool in the size of num_queues*(8-2) - the
76 * detached from the iwl->rxq. The driver 'processed' index is updated.
77 * + If there are no allocated buffers in iwl->rxq->rx_free,
78 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
88 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
89 * queue, updates firmware pointers, and updates
93 * -- enable interrupts --
94 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
96 * Moves the packet buffer from queue to rx_used.
101 * RBD life-cycle:
104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
111 * rxq.queue -> rxq.rx_free -> rxq.queue
117 * iwl_rxq_space - Return number of free slots available in queue.
121 /* Make sure rx queue size is a power of 2 */ in iwl_rxq_space()
122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); in iwl_rxq_space()
125 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity in iwl_rxq_space()
130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space()
134 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
142 * iwl_pcie_rx_stop - stops the Rx DMA
146 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_rx_stop()
151 } else if (trans->trans_cfg->mq_rx_supported) { in iwl_pcie_rx_stop()
164 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
171 lockdep_assert_held(&rxq->lock); in iwl_pcie_rxq_inc_wr_ptr()
178 if (!trans->trans_cfg->base_params->shadow_reg_enable && in iwl_pcie_rxq_inc_wr_ptr()
179 test_bit(STATUS_TPOWER_PMI, &trans->status)) { in iwl_pcie_rxq_inc_wr_ptr()
183 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", in iwl_pcie_rxq_inc_wr_ptr()
187 rxq->need_update = true; in iwl_pcie_rxq_inc_wr_ptr()
192 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr()
193 if (!trans->trans_cfg->mq_rx_supported) in iwl_pcie_rxq_inc_wr_ptr()
194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr()
195 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) in iwl_pcie_rxq_inc_wr_ptr()
196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | in iwl_pcie_rxq_inc_wr_ptr()
197 HBUS_TARG_WRPTR_RX_Q(rxq->id)); in iwl_pcie_rxq_inc_wr_ptr()
199 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), in iwl_pcie_rxq_inc_wr_ptr()
200 rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr()
208 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_rxq_check_wrptr()
209 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rxq_check_wrptr()
211 if (!rxq->need_update) in iwl_pcie_rxq_check_wrptr()
213 spin_lock_bh(&rxq->lock); in iwl_pcie_rxq_check_wrptr()
215 rxq->need_update = false; in iwl_pcie_rxq_check_wrptr()
216 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_check_wrptr()
224 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_restock_bd()
225 struct iwl_rx_transfer_desc *bd = rxq->bd; in iwl_pcie_restock_bd()
229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); in iwl_pcie_restock_bd()
230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid); in iwl_pcie_restock_bd()
232 __le64 *bd = rxq->bd; in iwl_pcie_restock_bd()
234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); in iwl_pcie_restock_bd()
237 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", in iwl_pcie_restock_bd()
238 (u32)rxb->vid, rxq->id, rxq->write); in iwl_pcie_restock_bd()
242 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
251 * If the device isn't enabled - no need to try to add buffers... in iwl_pcie_rxmq_restock()
258 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) in iwl_pcie_rxmq_restock()
261 spin_lock_bh(&rxq->lock); in iwl_pcie_rxmq_restock()
262 while (rxq->free_count) { in iwl_pcie_rxmq_restock()
263 /* Get next free Rx buffer, remove from free list */ in iwl_pcie_rxmq_restock()
264 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, in iwl_pcie_rxmq_restock()
266 list_del(&rxb->list); in iwl_pcie_rxmq_restock()
267 rxb->invalid = false; in iwl_pcie_rxmq_restock()
269 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); in iwl_pcie_rxmq_restock()
270 /* Point to Rx buffer via next RBD in circular buffer */ in iwl_pcie_rxmq_restock()
272 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); in iwl_pcie_rxmq_restock()
273 rxq->free_count--; in iwl_pcie_rxmq_restock()
275 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxmq_restock()
281 if (rxq->write_actual != (rxq->write & ~0x7)) { in iwl_pcie_rxmq_restock()
282 spin_lock_bh(&rxq->lock); in iwl_pcie_rxmq_restock()
284 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxmq_restock()
289 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
297 * If the device isn't enabled - not need to try to add buffers... in iwl_pcie_rxsq_restock()
304 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) in iwl_pcie_rxsq_restock()
307 spin_lock_bh(&rxq->lock); in iwl_pcie_rxsq_restock()
308 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { in iwl_pcie_rxsq_restock()
309 __le32 *bd = (__le32 *)rxq->bd; in iwl_pcie_rxsq_restock()
311 rxb = rxq->queue[rxq->write]; in iwl_pcie_rxsq_restock()
312 BUG_ON(rxb && rxb->page); in iwl_pcie_rxsq_restock()
314 /* Get next free Rx buffer, remove from free list */ in iwl_pcie_rxsq_restock()
315 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, in iwl_pcie_rxsq_restock()
317 list_del(&rxb->list); in iwl_pcie_rxsq_restock()
318 rxb->invalid = false; in iwl_pcie_rxsq_restock()
320 /* Point to Rx buffer via next RBD in circular buffer */ in iwl_pcie_rxsq_restock()
321 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); in iwl_pcie_rxsq_restock()
322 rxq->queue[rxq->write] = rxb; in iwl_pcie_rxsq_restock()
323 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in iwl_pcie_rxsq_restock()
324 rxq->free_count--; in iwl_pcie_rxsq_restock()
326 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxsq_restock()
330 if (rxq->write_actual != (rxq->write & ~0x7)) { in iwl_pcie_rxsq_restock()
331 spin_lock_bh(&rxq->lock); in iwl_pcie_rxsq_restock()
333 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxsq_restock()
338 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
340 * If there are slots in the RX queue that need to be restocked,
341 * and we have free pre-allocated buffers, fill the ranks as much
351 if (trans->trans_cfg->mq_rx_supported) in iwl_pcie_rxq_restock()
358 * iwl_pcie_rx_alloc_page - allocates and returns a page.
365 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); in iwl_pcie_rx_alloc_page()
366 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; in iwl_pcie_rx_alloc_page()
370 if (trans_pcie->rx_page_order > 0) in iwl_pcie_rx_alloc_page()
373 if (trans_pcie->alloc_page) { in iwl_pcie_rx_alloc_page()
374 spin_lock_bh(&trans_pcie->alloc_page_lock); in iwl_pcie_rx_alloc_page()
376 if (trans_pcie->alloc_page) { in iwl_pcie_rx_alloc_page()
377 *offset = trans_pcie->alloc_page_used; in iwl_pcie_rx_alloc_page()
378 page = trans_pcie->alloc_page; in iwl_pcie_rx_alloc_page()
379 trans_pcie->alloc_page_used += rbsize; in iwl_pcie_rx_alloc_page()
380 if (trans_pcie->alloc_page_used >= allocsize) in iwl_pcie_rx_alloc_page()
381 trans_pcie->alloc_page = NULL; in iwl_pcie_rx_alloc_page()
384 spin_unlock_bh(&trans_pcie->alloc_page_lock); in iwl_pcie_rx_alloc_page()
387 spin_unlock_bh(&trans_pcie->alloc_page_lock); in iwl_pcie_rx_alloc_page()
391 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); in iwl_pcie_rx_alloc_page()
395 trans_pcie->rx_page_order); in iwl_pcie_rx_alloc_page()
397 * Issue an error if we don't have enough pre-allocated in iwl_pcie_rx_alloc_page()
407 spin_lock_bh(&trans_pcie->alloc_page_lock); in iwl_pcie_rx_alloc_page()
408 if (!trans_pcie->alloc_page) { in iwl_pcie_rx_alloc_page()
410 trans_pcie->alloc_page = page; in iwl_pcie_rx_alloc_page()
411 trans_pcie->alloc_page_used = rbsize; in iwl_pcie_rx_alloc_page()
413 spin_unlock_bh(&trans_pcie->alloc_page_lock); in iwl_pcie_rx_alloc_page()
421 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
423 * A used RBD is an Rx buffer that has been given to the stack. To use it again
439 spin_lock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
440 if (list_empty(&rxq->rx_used)) { in iwl_pcie_rxq_alloc_rbs()
441 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
444 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
450 spin_lock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
452 if (list_empty(&rxq->rx_used)) { in iwl_pcie_rxq_alloc_rbs()
453 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
454 __free_pages(page, trans_pcie->rx_page_order); in iwl_pcie_rxq_alloc_rbs()
457 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, in iwl_pcie_rxq_alloc_rbs()
459 list_del(&rxb->list); in iwl_pcie_rxq_alloc_rbs()
460 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
462 BUG_ON(rxb->page); in iwl_pcie_rxq_alloc_rbs()
463 rxb->page = page; in iwl_pcie_rxq_alloc_rbs()
464 rxb->offset = offset; in iwl_pcie_rxq_alloc_rbs()
466 rxb->page_dma = in iwl_pcie_rxq_alloc_rbs()
467 dma_map_page(trans->dev, page, rxb->offset, in iwl_pcie_rxq_alloc_rbs()
468 trans_pcie->rx_buf_bytes, in iwl_pcie_rxq_alloc_rbs()
470 if (dma_mapping_error(trans->dev, rxb->page_dma)) { in iwl_pcie_rxq_alloc_rbs()
471 rxb->page = NULL; in iwl_pcie_rxq_alloc_rbs()
472 spin_lock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
473 list_add(&rxb->list, &rxq->rx_used); in iwl_pcie_rxq_alloc_rbs()
474 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
475 __free_pages(page, trans_pcie->rx_page_order); in iwl_pcie_rxq_alloc_rbs()
479 spin_lock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
481 list_add_tail(&rxb->list, &rxq->rx_free); in iwl_pcie_rxq_alloc_rbs()
482 rxq->free_count++; in iwl_pcie_rxq_alloc_rbs()
484 spin_unlock_bh(&rxq->lock); in iwl_pcie_rxq_alloc_rbs()
493 if (!trans_pcie->rx_pool) in iwl_pcie_free_rbs_pool()
496 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { in iwl_pcie_free_rbs_pool()
497 if (!trans_pcie->rx_pool[i].page) in iwl_pcie_free_rbs_pool()
499 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, in iwl_pcie_free_rbs_pool()
500 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); in iwl_pcie_free_rbs_pool()
501 __free_pages(trans_pcie->rx_pool[i].page, in iwl_pcie_free_rbs_pool()
502 trans_pcie->rx_page_order); in iwl_pcie_free_rbs_pool()
503 trans_pcie->rx_pool[i].page = NULL; in iwl_pcie_free_rbs_pool()
508 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
516 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_allocator()
518 int pending = atomic_read(&rba->req_pending); in iwl_pcie_rx_allocator()
522 /* If we were scheduled - there is at least one request */ in iwl_pcie_rx_allocator()
523 spin_lock_bh(&rba->lock); in iwl_pcie_rx_allocator()
524 /* swap out the rba->rbd_empty to a local list */ in iwl_pcie_rx_allocator()
525 list_replace_init(&rba->rbd_empty, &local_empty); in iwl_pcie_rx_allocator()
526 spin_unlock_bh(&rba->lock); in iwl_pcie_rx_allocator()
541 /* List should never be empty - each reused RBD is in iwl_pcie_rx_allocator()
550 BUG_ON(rxb->page); in iwl_pcie_rx_allocator()
553 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, in iwl_pcie_rx_allocator()
557 rxb->page = page; in iwl_pcie_rx_allocator()
560 rxb->page_dma = dma_map_page(trans->dev, page, in iwl_pcie_rx_allocator()
561 rxb->offset, in iwl_pcie_rx_allocator()
562 trans_pcie->rx_buf_bytes, in iwl_pcie_rx_allocator()
564 if (dma_mapping_error(trans->dev, rxb->page_dma)) { in iwl_pcie_rx_allocator()
565 rxb->page = NULL; in iwl_pcie_rx_allocator()
566 __free_pages(page, trans_pcie->rx_page_order); in iwl_pcie_rx_allocator()
571 list_move(&rxb->list, &local_allocated); in iwl_pcie_rx_allocator()
575 atomic_dec(&rba->req_pending); in iwl_pcie_rx_allocator()
576 pending--; in iwl_pcie_rx_allocator()
579 pending = atomic_read(&rba->req_pending); in iwl_pcie_rx_allocator()
586 spin_lock_bh(&rba->lock); in iwl_pcie_rx_allocator()
588 list_splice_tail(&local_allocated, &rba->rbd_allocated); in iwl_pcie_rx_allocator()
590 list_splice_tail_init(&rba->rbd_empty, &local_empty); in iwl_pcie_rx_allocator()
591 spin_unlock_bh(&rba->lock); in iwl_pcie_rx_allocator()
593 atomic_inc(&rba->req_ready); in iwl_pcie_rx_allocator()
597 spin_lock_bh(&rba->lock); in iwl_pcie_rx_allocator()
599 list_splice_tail(&local_empty, &rba->rbd_empty); in iwl_pcie_rx_allocator()
600 spin_unlock_bh(&rba->lock); in iwl_pcie_rx_allocator()
606 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
608 .* Called by queue when the queue posted allocation request and
610 * This function directly moves the allocated RBs to the queue's ownership
617 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_allocator_get()
620 lockdep_assert_held(&rxq->lock); in iwl_pcie_rx_allocator_get()
623 * atomic_dec_if_positive returns req_ready - 1 for any scenario. in iwl_pcie_rx_allocator_get()
624 * If req_ready is 0 atomic_dec_if_positive will return -1 and this in iwl_pcie_rx_allocator_get()
627 * req_ready > 0, i.e. - there are ready requests and the function in iwl_pcie_rx_allocator_get()
630 if (atomic_dec_if_positive(&rba->req_ready) < 0) in iwl_pcie_rx_allocator_get()
633 spin_lock(&rba->lock); in iwl_pcie_rx_allocator_get()
635 /* Get next free Rx buffer, remove it from free list */ in iwl_pcie_rx_allocator_get()
637 list_first_entry(&rba->rbd_allocated, in iwl_pcie_rx_allocator_get()
640 list_move(&rxb->list, &rxq->rx_free); in iwl_pcie_rx_allocator_get()
642 spin_unlock(&rba->lock); in iwl_pcie_rx_allocator_get()
644 rxq->used_count -= RX_CLAIM_REQ_ALLOC; in iwl_pcie_rx_allocator_get()
645 rxq->free_count += RX_CLAIM_REQ_ALLOC; in iwl_pcie_rx_allocator_get()
655 iwl_pcie_rx_allocator(trans_pcie->trans); in iwl_pcie_rx_allocator_work()
660 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_pcie_free_bd_size()
663 return trans->trans_cfg->mq_rx_supported ? in iwl_pcie_free_bd_size()
669 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) in iwl_pcie_used_bd_size()
672 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_pcie_used_bd_size()
683 if (rxq->bd) in iwl_pcie_free_rxq_dma()
684 dma_free_coherent(trans->dev, in iwl_pcie_free_rxq_dma()
685 free_size * rxq->queue_size, in iwl_pcie_free_rxq_dma()
686 rxq->bd, rxq->bd_dma); in iwl_pcie_free_rxq_dma()
687 rxq->bd_dma = 0; in iwl_pcie_free_rxq_dma()
688 rxq->bd = NULL; in iwl_pcie_free_rxq_dma()
690 rxq->rb_stts_dma = 0; in iwl_pcie_free_rxq_dma()
691 rxq->rb_stts = NULL; in iwl_pcie_free_rxq_dma()
693 if (rxq->used_bd) in iwl_pcie_free_rxq_dma()
694 dma_free_coherent(trans->dev, in iwl_pcie_free_rxq_dma()
696 rxq->queue_size, in iwl_pcie_free_rxq_dma()
697 rxq->used_bd, rxq->used_bd_dma); in iwl_pcie_free_rxq_dma()
698 rxq->used_bd_dma = 0; in iwl_pcie_free_rxq_dma()
699 rxq->used_bd = NULL; in iwl_pcie_free_rxq_dma()
704 bool use_rx_td = (trans->trans_cfg->device_family >= in iwl_pcie_rb_stts_size()
718 struct device *dev = trans->dev; in iwl_pcie_alloc_rxq_dma()
722 spin_lock_init(&rxq->lock); in iwl_pcie_alloc_rxq_dma()
723 if (trans->trans_cfg->mq_rx_supported) in iwl_pcie_alloc_rxq_dma()
724 rxq->queue_size = trans->cfg->num_rbds; in iwl_pcie_alloc_rxq_dma()
726 rxq->queue_size = RX_QUEUE_SIZE; in iwl_pcie_alloc_rxq_dma()
734 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, in iwl_pcie_alloc_rxq_dma()
735 &rxq->bd_dma, GFP_KERNEL); in iwl_pcie_alloc_rxq_dma()
736 if (!rxq->bd) in iwl_pcie_alloc_rxq_dma()
739 if (trans->trans_cfg->mq_rx_supported) { in iwl_pcie_alloc_rxq_dma()
740 rxq->used_bd = dma_alloc_coherent(dev, in iwl_pcie_alloc_rxq_dma()
742 rxq->queue_size, in iwl_pcie_alloc_rxq_dma()
743 &rxq->used_bd_dma, in iwl_pcie_alloc_rxq_dma()
745 if (!rxq->used_bd) in iwl_pcie_alloc_rxq_dma()
749 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; in iwl_pcie_alloc_rxq_dma()
750 rxq->rb_stts_dma = in iwl_pcie_alloc_rxq_dma()
751 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; in iwl_pcie_alloc_rxq_dma()
756 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_alloc_rxq_dma()
757 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_alloc_rxq_dma()
762 return -ENOMEM; in iwl_pcie_alloc_rxq_dma()
769 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_alloc()
772 if (WARN_ON(trans_pcie->rxq)) in iwl_pcie_rx_alloc()
773 return -EINVAL; in iwl_pcie_rx_alloc()
775 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), in iwl_pcie_rx_alloc()
777 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), in iwl_pcie_rx_alloc()
778 sizeof(trans_pcie->rx_pool[0]), in iwl_pcie_rx_alloc()
780 trans_pcie->global_table = in iwl_pcie_rx_alloc()
781 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), in iwl_pcie_rx_alloc()
782 sizeof(trans_pcie->global_table[0]), in iwl_pcie_rx_alloc()
784 if (!trans_pcie->rxq || !trans_pcie->rx_pool || in iwl_pcie_rx_alloc()
785 !trans_pcie->global_table) { in iwl_pcie_rx_alloc()
786 ret = -ENOMEM; in iwl_pcie_rx_alloc()
790 spin_lock_init(&rba->lock); in iwl_pcie_rx_alloc()
796 trans_pcie->base_rb_stts = in iwl_pcie_rx_alloc()
797 dma_alloc_coherent(trans->dev, in iwl_pcie_rx_alloc()
798 rb_stts_size * trans->num_rx_queues, in iwl_pcie_rx_alloc()
799 &trans_pcie->base_rb_stts_dma, in iwl_pcie_rx_alloc()
801 if (!trans_pcie->base_rb_stts) { in iwl_pcie_rx_alloc()
802 ret = -ENOMEM; in iwl_pcie_rx_alloc()
806 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_rx_alloc()
807 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rx_alloc()
809 rxq->id = i; in iwl_pcie_rx_alloc()
817 if (trans_pcie->base_rb_stts) { in iwl_pcie_rx_alloc()
818 dma_free_coherent(trans->dev, in iwl_pcie_rx_alloc()
819 rb_stts_size * trans->num_rx_queues, in iwl_pcie_rx_alloc()
820 trans_pcie->base_rb_stts, in iwl_pcie_rx_alloc()
821 trans_pcie->base_rb_stts_dma); in iwl_pcie_rx_alloc()
822 trans_pcie->base_rb_stts = NULL; in iwl_pcie_rx_alloc()
823 trans_pcie->base_rb_stts_dma = 0; in iwl_pcie_rx_alloc()
825 kfree(trans_pcie->rx_pool); in iwl_pcie_rx_alloc()
826 trans_pcie->rx_pool = NULL; in iwl_pcie_rx_alloc()
827 kfree(trans_pcie->global_table); in iwl_pcie_rx_alloc()
828 trans_pcie->global_table = NULL; in iwl_pcie_rx_alloc()
829 kfree(trans_pcie->rxq); in iwl_pcie_rx_alloc()
830 trans_pcie->rxq = NULL; in iwl_pcie_rx_alloc()
841 switch (trans_pcie->rx_buf_size) { in iwl_pcie_rx_hw_init()
859 /* Stop Rx DMA */ in iwl_pcie_rx_hw_init()
866 /* Reset driver's Rx queue write index */ in iwl_pcie_rx_hw_init()
871 (u32)(rxq->bd_dma >> 8)); in iwl_pcie_rx_hw_init()
873 /* Tell device where in DRAM to update its Rx status */ in iwl_pcie_rx_hw_init()
875 rxq->rb_stts_dma >> 4); in iwl_pcie_rx_hw_init()
877 /* Enable Rx DMA in iwl_pcie_rx_hw_init()
879 * the credit mechanism in 5000 HW RX FIFO in iwl_pcie_rx_hw_init()
880 * Direct rx interrupts to hosts in iwl_pcie_rx_hw_init()
881 * Rx buffer size 4 or 8k or 12k in iwl_pcie_rx_hw_init()
899 if (trans->cfg->host_interrupt_operation_mode) in iwl_pcie_rx_hw_init()
909 switch (trans_pcie->rx_buf_size) { in iwl_pcie_rx_mq_hw_init()
930 /* Stop Rx DMA */ in iwl_pcie_rx_mq_hw_init()
932 /* disable free amd used rx queue operation */ in iwl_pcie_rx_mq_hw_init()
935 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_rx_mq_hw_init()
939 trans_pcie->rxq[i].bd_dma); in iwl_pcie_rx_mq_hw_init()
943 trans_pcie->rxq[i].used_bd_dma); in iwl_pcie_rx_mq_hw_init()
944 /* Tell device where in DRAM to update its Rx status */ in iwl_pcie_rx_mq_hw_init()
947 trans_pcie->rxq[i].rb_stts_dma); in iwl_pcie_rx_mq_hw_init()
957 * Enable Rx DMA in iwl_pcie_rx_mq_hw_init()
958 * Rx buffer size 4 or 8k or 12k in iwl_pcie_rx_mq_hw_init()
971 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe in iwl_pcie_rx_mq_hw_init()
972 * Default queue is 0 in iwl_pcie_rx_mq_hw_init()
979 trans->trans_cfg->integrated ? in iwl_pcie_rx_mq_hw_init()
982 /* Enable the relevant rx queues */ in iwl_pcie_rx_mq_hw_init()
993 lockdep_assert_held(&rxq->lock); in iwl_pcie_rx_init_rxb_lists()
995 INIT_LIST_HEAD(&rxq->rx_free); in iwl_pcie_rx_init_rxb_lists()
996 INIT_LIST_HEAD(&rxq->rx_used); in iwl_pcie_rx_init_rxb_lists()
997 rxq->free_count = 0; in iwl_pcie_rx_init_rxb_lists()
998 rxq->used_count = 0; in iwl_pcie_rx_init_rxb_lists()
1001 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1015 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); in iwl_pcie_napi_poll()
1016 trans = trans_pcie->trans; in iwl_pcie_napi_poll()
1018 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); in iwl_pcie_napi_poll()
1021 rxq->id, ret, budget); in iwl_pcie_napi_poll()
1024 spin_lock(&trans_pcie->irq_lock); in iwl_pcie_napi_poll()
1025 if (test_bit(STATUS_INT_ENABLED, &trans->status)) in iwl_pcie_napi_poll()
1027 spin_unlock(&trans_pcie->irq_lock); in iwl_pcie_napi_poll()
1029 napi_complete_done(&rxq->napi, ret); in iwl_pcie_napi_poll()
1042 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); in iwl_pcie_napi_poll_msix()
1043 trans = trans_pcie->trans; in iwl_pcie_napi_poll_msix()
1045 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); in iwl_pcie_napi_poll_msix()
1046 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, in iwl_pcie_napi_poll_msix()
1050 int irq_line = rxq->id; in iwl_pcie_napi_poll_msix()
1053 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && in iwl_pcie_napi_poll_msix()
1054 rxq->id == 1) in iwl_pcie_napi_poll_msix()
1057 spin_lock(&trans_pcie->irq_lock); in iwl_pcie_napi_poll_msix()
1059 spin_unlock(&trans_pcie->irq_lock); in iwl_pcie_napi_poll_msix()
1061 napi_complete_done(&rxq->napi, ret); in iwl_pcie_napi_poll_msix()
1072 if (unlikely(!trans_pcie->rxq)) in iwl_pcie_rx_napi_sync()
1075 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_rx_napi_sync()
1076 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rx_napi_sync()
1078 if (rxq && rxq->napi.poll) in iwl_pcie_rx_napi_sync()
1079 napi_synchronize(&rxq->napi); in iwl_pcie_rx_napi_sync()
1087 struct iwl_rb_allocator *rba = &trans_pcie->rba; in _iwl_pcie_rx_init()
1090 if (!trans_pcie->rxq) { in _iwl_pcie_rx_init()
1095 def_rxq = trans_pcie->rxq; in _iwl_pcie_rx_init()
1097 cancel_work_sync(&rba->rx_alloc); in _iwl_pcie_rx_init()
1099 spin_lock_bh(&rba->lock); in _iwl_pcie_rx_init()
1100 atomic_set(&rba->req_pending, 0); in _iwl_pcie_rx_init()
1101 atomic_set(&rba->req_ready, 0); in _iwl_pcie_rx_init()
1102 INIT_LIST_HEAD(&rba->rbd_allocated); in _iwl_pcie_rx_init()
1103 INIT_LIST_HEAD(&rba->rbd_empty); in _iwl_pcie_rx_init()
1104 spin_unlock_bh(&rba->lock); in _iwl_pcie_rx_init()
1106 /* free all first - we overwrite everything here */ in _iwl_pcie_rx_init()
1110 def_rxq->queue[i] = NULL; in _iwl_pcie_rx_init()
1112 for (i = 0; i < trans->num_rx_queues; i++) { in _iwl_pcie_rx_init()
1113 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in _iwl_pcie_rx_init()
1115 spin_lock_bh(&rxq->lock); in _iwl_pcie_rx_init()
1118 * and used all buffers, but have not restocked the Rx queue in _iwl_pcie_rx_init()
1121 rxq->read = 0; in _iwl_pcie_rx_init()
1122 rxq->write = 0; in _iwl_pcie_rx_init()
1123 rxq->write_actual = 0; in _iwl_pcie_rx_init()
1124 memset(rxq->rb_stts, 0, in _iwl_pcie_rx_init()
1125 (trans->trans_cfg->device_family >= in _iwl_pcie_rx_init()
1131 spin_unlock_bh(&rxq->lock); in _iwl_pcie_rx_init()
1133 if (!rxq->napi.poll) { in _iwl_pcie_rx_init()
1136 if (trans_pcie->msix_enabled) in _iwl_pcie_rx_init()
1139 netif_napi_add(trans_pcie->napi_dev, &rxq->napi, in _iwl_pcie_rx_init()
1141 napi_enable(&rxq->napi); in _iwl_pcie_rx_init()
1146 /* move the pool to the default queue and allocator ownerships */ in _iwl_pcie_rx_init()
1147 queue_size = trans->trans_cfg->mq_rx_supported ? in _iwl_pcie_rx_init()
1148 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; in _iwl_pcie_rx_init()
1149 allocator_pool_size = trans->num_rx_queues * in _iwl_pcie_rx_init()
1150 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); in _iwl_pcie_rx_init()
1154 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; in _iwl_pcie_rx_init()
1157 list_add(&rxb->list, &rba->rbd_empty); in _iwl_pcie_rx_init()
1159 list_add(&rxb->list, &def_rxq->rx_used); in _iwl_pcie_rx_init()
1160 trans_pcie->global_table[i] = rxb; in _iwl_pcie_rx_init()
1161 rxb->vid = (u16)(i + 1); in _iwl_pcie_rx_init()
1162 rxb->invalid = true; in _iwl_pcie_rx_init()
1178 if (trans->trans_cfg->mq_rx_supported) in iwl_pcie_rx_init()
1181 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); in iwl_pcie_rx_init()
1183 iwl_pcie_rxq_restock(trans, trans_pcie->rxq); in iwl_pcie_rx_init()
1185 spin_lock_bh(&trans_pcie->rxq->lock); in iwl_pcie_rx_init()
1186 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); in iwl_pcie_rx_init()
1187 spin_unlock_bh(&trans_pcie->rxq->lock); in iwl_pcie_rx_init()
1208 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_free()
1215 if (!trans_pcie->rxq) { in iwl_pcie_rx_free()
1216 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); in iwl_pcie_rx_free()
1220 cancel_work_sync(&rba->rx_alloc); in iwl_pcie_rx_free()
1224 if (trans_pcie->base_rb_stts) { in iwl_pcie_rx_free()
1225 dma_free_coherent(trans->dev, in iwl_pcie_rx_free()
1226 rb_stts_size * trans->num_rx_queues, in iwl_pcie_rx_free()
1227 trans_pcie->base_rb_stts, in iwl_pcie_rx_free()
1228 trans_pcie->base_rb_stts_dma); in iwl_pcie_rx_free()
1229 trans_pcie->base_rb_stts = NULL; in iwl_pcie_rx_free()
1230 trans_pcie->base_rb_stts_dma = 0; in iwl_pcie_rx_free()
1233 for (i = 0; i < trans->num_rx_queues; i++) { in iwl_pcie_rx_free()
1234 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; in iwl_pcie_rx_free()
1238 if (rxq->napi.poll) { in iwl_pcie_rx_free()
1239 napi_disable(&rxq->napi); in iwl_pcie_rx_free()
1240 netif_napi_del(&rxq->napi); in iwl_pcie_rx_free()
1243 kfree(trans_pcie->rx_pool); in iwl_pcie_rx_free()
1244 kfree(trans_pcie->global_table); in iwl_pcie_rx_free()
1245 kfree(trans_pcie->rxq); in iwl_pcie_rx_free()
1247 if (trans_pcie->alloc_page) in iwl_pcie_rx_free()
1248 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); in iwl_pcie_rx_free()
1254 spin_lock(&rba->lock); in iwl_pcie_rx_move_to_allocator()
1255 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); in iwl_pcie_rx_move_to_allocator()
1256 spin_unlock(&rba->lock); in iwl_pcie_rx_move_to_allocator()
1260 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1263 * When there are 2 empty RBDs - a request for allocation is posted
1270 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_reuse_rbd()
1274 list_add_tail(&rxb->list, &rxq->rx_used); in iwl_pcie_rx_reuse_rbd()
1280 rxq->used_count++; in iwl_pcie_rx_reuse_rbd()
1282 /* If we have RX_POST_REQ_ALLOC new released rx buffers - in iwl_pcie_rx_reuse_rbd()
1287 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { in iwl_pcie_rx_reuse_rbd()
1292 atomic_inc(&rba->req_pending); in iwl_pcie_rx_reuse_rbd()
1293 queue_work(rba->alloc_wq, &rba->rx_alloc); in iwl_pcie_rx_reuse_rbd()
1304 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; in iwl_pcie_rx_handle_rb()
1306 int max_len = trans_pcie->rx_buf_bytes; in iwl_pcie_rx_handle_rb()
1312 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); in iwl_pcie_rx_handle_rb()
1315 struct iwl_rx_packet *pkt; in iwl_pcie_rx_handle_rb() local
1319 ._offset = rxb->offset + offset, in iwl_pcie_rx_handle_rb()
1320 ._rx_page_order = trans_pcie->rx_page_order, in iwl_pcie_rx_handle_rb()
1321 ._page = rxb->page, in iwl_pcie_rx_handle_rb()
1326 pkt = rxb_addr(&rxcb); in iwl_pcie_rx_handle_rb()
1328 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { in iwl_pcie_rx_handle_rb()
1331 rxq->id, offset); in iwl_pcie_rx_handle_rb()
1335 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> in iwl_pcie_rx_handle_rb()
1336 FH_RSCSR_RXQ_POS != rxq->id, in iwl_pcie_rx_handle_rb()
1337 "frame on invalid queue - is on %d and indicates %d\n", in iwl_pcie_rx_handle_rb()
1338 rxq->id, in iwl_pcie_rx_handle_rb()
1339 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> in iwl_pcie_rx_handle_rb()
1344 rxq->id, offset, in iwl_pcie_rx_handle_rb()
1346 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), in iwl_pcie_rx_handle_rb()
1347 pkt->hdr.group_id, pkt->hdr.cmd, in iwl_pcie_rx_handle_rb()
1348 le16_to_cpu(pkt->hdr.sequence)); in iwl_pcie_rx_handle_rb()
1350 len = iwl_rx_packet_len(pkt); in iwl_pcie_rx_handle_rb()
1356 if (len < sizeof(*pkt) || offset > max_len) in iwl_pcie_rx_handle_rb()
1359 maybe_trace_iwlwifi_dev_rx(trans, pkt, len); in iwl_pcie_rx_handle_rb()
1362 * to a (driver-originated) command. in iwl_pcie_rx_handle_rb()
1363 * If the packet (e.g. Rx frame) originated from uCode, in iwl_pcie_rx_handle_rb()
1365 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, in iwl_pcie_rx_handle_rb()
1367 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); in iwl_pcie_rx_handle_rb()
1368 if (reclaim && !pkt->hdr.group_id) { in iwl_pcie_rx_handle_rb()
1371 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { in iwl_pcie_rx_handle_rb()
1372 if (trans_pcie->no_reclaim_cmds[i] == in iwl_pcie_rx_handle_rb()
1373 pkt->hdr.cmd) { in iwl_pcie_rx_handle_rb()
1380 if (rxq->id == IWL_DEFAULT_RX_QUEUE) in iwl_pcie_rx_handle_rb()
1381 iwl_op_mode_rx(trans->op_mode, &rxq->napi, in iwl_pcie_rx_handle_rb()
1384 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, in iwl_pcie_rx_handle_rb()
1385 &rxcb, rxq->id); in iwl_pcie_rx_handle_rb()
1393 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_pcie_rx_handle_rb()
1397 kfree_sensitive(txq->entries[cmd_index].free_buf); in iwl_pcie_rx_handle_rb()
1398 txq->entries[cmd_index].free_buf = NULL; in iwl_pcie_rx_handle_rb()
1403 * as we reclaim the driver command queue */ in iwl_pcie_rx_handle_rb()
1411 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_pcie_rx_handle_rb()
1415 /* page was stolen from us -- free our reference */ in iwl_pcie_rx_handle_rb()
1417 __free_pages(rxb->page, trans_pcie->rx_page_order); in iwl_pcie_rx_handle_rb()
1418 rxb->page = NULL; in iwl_pcie_rx_handle_rb()
1422 * SKBs that fail to Rx correctly, add them back into the in iwl_pcie_rx_handle_rb()
1424 if (rxb->page != NULL) { in iwl_pcie_rx_handle_rb()
1425 rxb->page_dma = in iwl_pcie_rx_handle_rb()
1426 dma_map_page(trans->dev, rxb->page, rxb->offset, in iwl_pcie_rx_handle_rb()
1427 trans_pcie->rx_buf_bytes, in iwl_pcie_rx_handle_rb()
1429 if (dma_mapping_error(trans->dev, rxb->page_dma)) { in iwl_pcie_rx_handle_rb()
1435 __free_pages(rxb->page, trans_pcie->rx_page_order); in iwl_pcie_rx_handle_rb()
1436 rxb->page = NULL; in iwl_pcie_rx_handle_rb()
1439 list_add_tail(&rxb->list, &rxq->rx_free); in iwl_pcie_rx_handle_rb()
1440 rxq->free_count++; in iwl_pcie_rx_handle_rb()
1457 if (!trans->trans_cfg->mq_rx_supported) { in iwl_pcie_get_rxb()
1458 rxb = rxq->queue[i]; in iwl_pcie_get_rxb()
1459 rxq->queue[i] = NULL; in iwl_pcie_get_rxb()
1463 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { in iwl_pcie_get_rxb()
1464 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; in iwl_pcie_get_rxb()
1468 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_get_rxb()
1469 struct iwl_rx_completion_desc *cd = rxq->used_bd; in iwl_pcie_get_rxb()
1474 __le32 *cd = rxq->used_bd; in iwl_pcie_get_rxb()
1476 vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ in iwl_pcie_get_rxb()
1479 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) in iwl_pcie_get_rxb()
1482 rxb = trans_pcie->global_table[vid - 1]; in iwl_pcie_get_rxb()
1483 if (rxb->invalid) in iwl_pcie_get_rxb()
1486 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); in iwl_pcie_get_rxb()
1488 rxb->invalid = true; in iwl_pcie_get_rxb()
1499 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1501 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) in iwl_pcie_rx_handle() argument
1508 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) in iwl_pcie_rx_handle()
1511 rxq = &trans_pcie->rxq[queue]; in iwl_pcie_rx_handle()
1514 spin_lock(&rxq->lock); in iwl_pcie_rx_handle()
1515 /* uCode's read index (stored in shared DRAM) indicates the last Rx in iwl_pcie_rx_handle()
1518 i = rxq->read; in iwl_pcie_rx_handle()
1520 /* W/A 9000 device step A0 wrap-around bug */ in iwl_pcie_rx_handle()
1521 r &= (rxq->queue_size - 1); in iwl_pcie_rx_handle()
1523 /* Rx interrupt, but nothing sent from uCode */ in iwl_pcie_rx_handle()
1525 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); in iwl_pcie_rx_handle()
1528 struct iwl_rb_allocator *rba = &trans_pcie->rba; in iwl_pcie_rx_handle()
1532 atomic_read(&trans_pcie->rba.req_pending) * in iwl_pcie_rx_handle()
1536 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && in iwl_pcie_rx_handle()
1541 "RX path is in emergency. Pending allocations %d\n", in iwl_pcie_rx_handle()
1545 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); in iwl_pcie_rx_handle()
1551 if (unlikely(join || rxq->next_rb_is_fragment)) { in iwl_pcie_rx_handle()
1552 rxq->next_rb_is_fragment = join; in iwl_pcie_rx_handle()
1554 * We can only get a multi-RB in the following cases: in iwl_pcie_rx_handle()
1555 * - firmware issue, sending a too big notification in iwl_pcie_rx_handle()
1556 * - sniffer mode with a large A-MSDU in iwl_pcie_rx_handle()
1557 * - large MTU frames (>2k) in iwl_pcie_rx_handle()
1558 * since the multi-RB functionality is limited to newer in iwl_pcie_rx_handle()
1565 list_add_tail(&rxb->list, &rxq->rx_free); in iwl_pcie_rx_handle()
1566 rxq->free_count++; in iwl_pcie_rx_handle()
1571 i = (i + 1) & (rxq->queue_size - 1); in iwl_pcie_rx_handle()
1574 * If we have RX_CLAIM_REQ_ALLOC released rx buffers - in iwl_pcie_rx_handle()
1575 * try to claim the pre-allocated buffers from the allocator. in iwl_pcie_rx_handle()
1576 * If not ready - will try to reclaim next time. in iwl_pcie_rx_handle()
1577 * There is no need to reschedule work - allocator exits only in iwl_pcie_rx_handle()
1580 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) in iwl_pcie_rx_handle()
1583 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { in iwl_pcie_rx_handle()
1590 if (rb_pending_alloc < rxq->queue_size / 3) { in iwl_pcie_rx_handle()
1592 "RX path exited emergency. Pending allocations %d\n", in iwl_pcie_rx_handle()
1597 rxq->read = i; in iwl_pcie_rx_handle()
1598 spin_unlock(&rxq->lock); in iwl_pcie_rx_handle()
1607 rxq->read = i; in iwl_pcie_rx_handle()
1608 spin_unlock(&rxq->lock); in iwl_pcie_rx_handle()
1612 * those RBDs are in the used list, but are not tracked by the queue's in iwl_pcie_rx_handle()
1618 * by the queue. in iwl_pcie_rx_handle()
1619 * by allocating them here, they are now in the queue free list, and in iwl_pcie_rx_handle()
1632 u8 queue = entry->entry; in iwl_pcie_get_trans_pcie() local
1633 struct msix_entry *entries = entry - queue; in iwl_pcie_get_trans_pcie()
1639 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1640 * This interrupt handler should be used with RSS queue only.
1646 struct iwl_trans *trans = trans_pcie->trans; in iwl_pcie_irq_rx_msix_handler()
1649 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); in iwl_pcie_irq_rx_msix_handler()
1651 if (WARN_ON(entry->entry >= trans->num_rx_queues)) in iwl_pcie_irq_rx_msix_handler()
1654 if (!trans_pcie->rxq) { in iwl_pcie_irq_rx_msix_handler()
1657 "[%d] Got MSI-X interrupt before we have Rx queues\n", in iwl_pcie_irq_rx_msix_handler()
1658 entry->entry); in iwl_pcie_irq_rx_msix_handler()
1662 rxq = &trans_pcie->rxq[entry->entry]; in iwl_pcie_irq_rx_msix_handler()
1663 lock_map_acquire(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_rx_msix_handler()
1664 IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); in iwl_pcie_irq_rx_msix_handler()
1667 if (!napi_schedule(&rxq->napi)) in iwl_pcie_irq_rx_msix_handler()
1668 iwl_pcie_clear_irq(trans, entry->entry); in iwl_pcie_irq_rx_msix_handler()
1671 lock_map_release(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_rx_msix_handler()
1677 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1685 if (trans->cfg->internal_wimax_coex && in iwl_pcie_irq_handle_error()
1686 !trans->cfg->apmg_not_supported && in iwl_pcie_irq_handle_error()
1691 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_pcie_irq_handle_error()
1692 iwl_op_mode_wimax_active(trans->op_mode); in iwl_pcie_irq_handle_error()
1693 wake_up(&trans->wait_command_queue); in iwl_pcie_irq_handle_error()
1697 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_pcie_irq_handle_error()
1698 if (!trans_pcie->txqs.txq[i]) in iwl_pcie_irq_handle_error()
1700 del_timer(&trans_pcie->txqs.txq[i]->stuck_timer); in iwl_pcie_irq_handle_error()
1707 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_pcie_irq_handle_error()
1708 wake_up(&trans->wait_command_queue); in iwl_pcie_irq_handle_error()
1715 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); in iwl_pcie_int_cause_non_ict()
1717 trace_iwlwifi_dev_irq(trans->dev); in iwl_pcie_int_cause_non_ict()
1722 /* the thread will service interrupts and re-enable them */ in iwl_pcie_int_cause_non_ict()
1726 /* a device (PCI-E) page is 4096 bytes long */
1746 trace_iwlwifi_dev_irq(trans->dev); in iwl_pcie_int_cause_ict()
1751 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); in iwl_pcie_int_cause_ict()
1752 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); in iwl_pcie_int_cause_ict()
1763 trans_pcie->ict_index, read); in iwl_pcie_int_cause_ict()
1764 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; in iwl_pcie_int_cause_ict()
1765 trans_pcie->ict_index = in iwl_pcie_int_cause_ict()
1766 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); in iwl_pcie_int_cause_ict()
1768 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); in iwl_pcie_int_cause_ict()
1769 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, in iwl_pcie_int_cause_ict()
1778 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit in iwl_pcie_int_cause_ict()
1781 * so we use them to decide on the real state of the Rx bit. in iwl_pcie_int_cause_ict()
1794 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_pcie_handle_rfkill_irq()
1797 mutex_lock(&trans_pcie->mutex); in iwl_pcie_handle_rfkill_irq()
1798 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_handle_rfkill_irq()
1801 set_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_handle_rfkill_irq()
1802 set_bit(STATUS_RFKILL_HW, &trans->status); in iwl_pcie_handle_rfkill_irq()
1804 if (trans_pcie->opmode_down) in iwl_pcie_handle_rfkill_irq()
1807 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_handle_rfkill_irq()
1812 isr_stats->rfkill++; in iwl_pcie_handle_rfkill_irq()
1816 mutex_unlock(&trans_pcie->mutex); in iwl_pcie_handle_rfkill_irq()
1820 &trans->status)) in iwl_pcie_handle_rfkill_irq()
1823 wake_up(&trans->wait_command_queue); in iwl_pcie_handle_rfkill_irq()
1825 clear_bit(STATUS_RFKILL_HW, &trans->status); in iwl_pcie_handle_rfkill_irq()
1826 if (trans_pcie->opmode_down) in iwl_pcie_handle_rfkill_irq()
1827 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); in iwl_pcie_handle_rfkill_irq()
1835 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_pcie_irq_handler()
1840 lock_map_acquire(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_handler()
1842 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
1847 if (likely(trans_pcie->use_ict)) in iwl_pcie_irq_handler()
1855 inta, trans_pcie->inta_mask, in iwl_pcie_irq_handler()
1858 if (inta & (~trans_pcie->inta_mask)) in iwl_pcie_irq_handler()
1861 inta & (~trans_pcie->inta_mask)); in iwl_pcie_irq_handler()
1864 inta &= trans_pcie->inta_mask; in iwl_pcie_irq_handler()
1874 * Re-enable interrupts here since we don't in iwl_pcie_irq_handler()
1877 if (test_bit(STATUS_INT_ENABLED, &trans->status)) in iwl_pcie_irq_handler()
1879 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
1880 lock_map_release(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_handler()
1890 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
1905 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); in iwl_pcie_irq_handler()
1911 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
1920 isr_stats->hw++; in iwl_pcie_irq_handler()
1932 isr_stats->sch++; in iwl_pcie_irq_handler()
1935 /* Alive notification via Rx interrupt will do the real work */ in iwl_pcie_irq_handler()
1938 isr_stats->alive++; in iwl_pcie_irq_handler()
1939 if (trans->trans_cfg->gen2) { in iwl_pcie_irq_handler()
1944 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); in iwl_pcie_irq_handler()
1962 isr_stats->ctkill++; in iwl_pcie_irq_handler()
1970 isr_stats->sw++; in iwl_pcie_irq_handler()
1975 /* uCode wakes up after power-down sleep */ in iwl_pcie_irq_handler()
1981 isr_stats->wakeup++; in iwl_pcie_irq_handler()
1987 * Rx "responses" (frame-received notification), and other in iwl_pcie_irq_handler()
1991 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); in iwl_pcie_irq_handler()
2002 /* Sending RX interrupt require many steps to be done in the in iwl_pcie_irq_handler()
2004 * 1- write interrupt to current index in ICT table. in iwl_pcie_irq_handler()
2005 * 2- dma RX frame. in iwl_pcie_irq_handler()
2006 * 3- update RX shared data to indicate last write index. in iwl_pcie_irq_handler()
2007 * 4- send interrupt. in iwl_pcie_irq_handler()
2008 * This could lead to RX race, driver could receive RX interrupt in iwl_pcie_irq_handler()
2010 * periodic interrupt will detect any dangling Rx activity. in iwl_pcie_irq_handler()
2013 /* Disable periodic interrupt; we use it as just a one-shot. */ in iwl_pcie_irq_handler()
2019 * real RX interrupt (instead of just periodic int), to catch in iwl_pcie_irq_handler()
2020 * any dangling Rx interrupt. If it was just the periodic in iwl_pcie_irq_handler()
2021 * interrupt, there was no dangling Rx activity, and no need in iwl_pcie_irq_handler()
2022 * to extend the periodic interrupt; one-shot is enough. in iwl_pcie_irq_handler()
2028 isr_stats->rx++; in iwl_pcie_irq_handler()
2031 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { in iwl_pcie_irq_handler()
2033 __napi_schedule(&trans_pcie->rxq[0].napi); in iwl_pcie_irq_handler()
2042 isr_stats->tx++; in iwl_pcie_irq_handler()
2045 trans_pcie->ucode_write_complete = true; in iwl_pcie_irq_handler()
2046 wake_up(&trans_pcie->ucode_write_waitq); in iwl_pcie_irq_handler()
2048 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { in iwl_pcie_irq_handler()
2049 trans_pcie->imr_status = IMR_D2S_COMPLETED; in iwl_pcie_irq_handler()
2050 wake_up(&trans_pcie->ucode_write_waitq); in iwl_pcie_irq_handler()
2056 isr_stats->unhandled++; in iwl_pcie_irq_handler()
2059 if (inta & ~(trans_pcie->inta_mask)) { in iwl_pcie_irq_handler()
2061 inta & ~trans_pcie->inta_mask); in iwl_pcie_irq_handler()
2065 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
2066 /* only Re-enable all interrupt if disabled by irq */ in iwl_pcie_irq_handler()
2067 if (test_bit(STATUS_INT_ENABLED, &trans->status)) in iwl_pcie_irq_handler()
2072 /* Re-enable RF_KILL if it occurred */ in iwl_pcie_irq_handler()
2075 /* Re-enable the ALIVE / Rx interrupt if it occurred */ in iwl_pcie_irq_handler()
2078 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_handler()
2082 lock_map_release(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_handler()
2097 if (trans_pcie->ict_tbl) { in iwl_pcie_free_ict()
2098 dma_free_coherent(trans->dev, ICT_SIZE, in iwl_pcie_free_ict()
2099 trans_pcie->ict_tbl, in iwl_pcie_free_ict()
2100 trans_pcie->ict_tbl_dma); in iwl_pcie_free_ict()
2101 trans_pcie->ict_tbl = NULL; in iwl_pcie_free_ict()
2102 trans_pcie->ict_tbl_dma = 0; in iwl_pcie_free_ict()
2115 trans_pcie->ict_tbl = in iwl_pcie_alloc_ict()
2116 dma_alloc_coherent(trans->dev, ICT_SIZE, in iwl_pcie_alloc_ict()
2117 &trans_pcie->ict_tbl_dma, GFP_KERNEL); in iwl_pcie_alloc_ict()
2118 if (!trans_pcie->ict_tbl) in iwl_pcie_alloc_ict()
2119 return -ENOMEM; in iwl_pcie_alloc_ict()
2122 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { in iwl_pcie_alloc_ict()
2124 return -EINVAL; in iwl_pcie_alloc_ict()
2138 if (!trans_pcie->ict_tbl) in iwl_pcie_reset_ict()
2141 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_reset_ict()
2144 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); in iwl_pcie_reset_ict()
2146 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; in iwl_pcie_reset_ict()
2155 trans_pcie->use_ict = true; in iwl_pcie_reset_ict()
2156 trans_pcie->ict_index = 0; in iwl_pcie_reset_ict()
2157 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); in iwl_pcie_reset_ict()
2159 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_reset_ict()
2167 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_disable_ict()
2168 trans_pcie->use_ict = false; in iwl_pcie_disable_ict()
2169 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_disable_ict()
2180 * back-to-back ISRs and sporadic interrupts from our NIC. in iwl_pcie_isr()
2181 * If we have something to service, the tasklet will re-enable ints. in iwl_pcie_isr()
2182 * If we *don't* have something, we'll re-enable before leaving here. in iwl_pcie_isr()
2198 struct iwl_trans *trans = trans_pcie->trans; in iwl_pcie_irq_msix_handler()
2199 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_pcie_irq_msix_handler()
2205 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) in iwl_pcie_irq_msix_handler()
2208 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) in iwl_pcie_irq_msix_handler()
2211 lock_map_acquire(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_msix_handler()
2213 spin_lock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_msix_handler()
2221 spin_unlock_bh(&trans_pcie->irq_lock); in iwl_pcie_irq_msix_handler()
2223 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); in iwl_pcie_irq_msix_handler()
2227 lock_map_release(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_msix_handler()
2234 entry->entry, inta_fh, trans_pcie->fh_mask, in iwl_pcie_irq_msix_handler()
2236 if (inta_fh & ~trans_pcie->fh_mask) in iwl_pcie_irq_msix_handler()
2239 inta_fh & ~trans_pcie->fh_mask); in iwl_pcie_irq_msix_handler()
2242 inta_fh &= trans_pcie->fh_mask; in iwl_pcie_irq_msix_handler()
2244 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && in iwl_pcie_irq_msix_handler()
2247 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { in iwl_pcie_irq_msix_handler()
2249 __napi_schedule(&trans_pcie->rxq[0].napi); in iwl_pcie_irq_msix_handler()
2254 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && in iwl_pcie_irq_msix_handler()
2257 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { in iwl_pcie_irq_msix_handler()
2259 __napi_schedule(&trans_pcie->rxq[1].napi); in iwl_pcie_irq_msix_handler()
2266 trans_pcie->imr_status == IMR_D2S_REQUESTED) { in iwl_pcie_irq_msix_handler()
2268 isr_stats->tx++; in iwl_pcie_irq_msix_handler()
2271 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { in iwl_pcie_irq_msix_handler()
2272 trans_pcie->imr_status = IMR_D2S_COMPLETED; in iwl_pcie_irq_msix_handler()
2273 wake_up(&trans_pcie->ucode_write_waitq); in iwl_pcie_irq_msix_handler()
2277 isr_stats->tx++; in iwl_pcie_irq_msix_handler()
2282 trans_pcie->ucode_write_complete = true; in iwl_pcie_irq_msix_handler()
2283 wake_up(&trans_pcie->ucode_write_waitq); in iwl_pcie_irq_msix_handler()
2286 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { in iwl_pcie_irq_msix_handler()
2287 trans_pcie->imr_status = IMR_D2S_COMPLETED; in iwl_pcie_irq_msix_handler()
2288 wake_up(&trans_pcie->ucode_write_waitq); in iwl_pcie_irq_msix_handler()
2292 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) in iwl_pcie_irq_msix_handler()
2308 isr_stats->sw++; in iwl_pcie_irq_msix_handler()
2310 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { in iwl_pcie_irq_msix_handler()
2311 trans_pcie->imr_status = IMR_D2S_ERROR; in iwl_pcie_irq_msix_handler()
2312 wake_up(&trans_pcie->imr_waitq); in iwl_pcie_irq_msix_handler()
2313 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { in iwl_pcie_irq_msix_handler()
2314 trans_pcie->fw_reset_state = FW_RESET_ERROR; in iwl_pcie_irq_msix_handler()
2315 wake_up(&trans_pcie->fw_reset_waitq); in iwl_pcie_irq_msix_handler()
2325 entry->entry, inta_hw, trans_pcie->hw_mask, in iwl_pcie_irq_msix_handler()
2327 if (inta_hw & ~trans_pcie->hw_mask) in iwl_pcie_irq_msix_handler()
2330 inta_hw & ~trans_pcie->hw_mask); in iwl_pcie_irq_msix_handler()
2333 inta_hw &= trans_pcie->hw_mask; in iwl_pcie_irq_msix_handler()
2335 /* Alive notification via Rx interrupt will do the real work */ in iwl_pcie_irq_msix_handler()
2338 isr_stats->alive++; in iwl_pcie_irq_msix_handler()
2339 if (trans->trans_cfg->gen2) { in iwl_pcie_irq_msix_handler()
2341 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); in iwl_pcie_irq_msix_handler()
2350 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { in iwl_pcie_irq_msix_handler()
2352 le32_to_cpu(trans_pcie->prph_info->sleep_notif); in iwl_pcie_irq_msix_handler()
2358 trans_pcie->sx_complete = true; in iwl_pcie_irq_msix_handler()
2359 wake_up(&trans_pcie->sx_waitq); in iwl_pcie_irq_msix_handler()
2361 /* uCode wakes up after power-down sleep */ in iwl_pcie_irq_msix_handler()
2366 isr_stats->wakeup++; in iwl_pcie_irq_msix_handler()
2373 isr_stats->ctkill++; in iwl_pcie_irq_msix_handler()
2384 isr_stats->hw++; in iwl_pcie_irq_msix_handler()
2385 trans->dbg.hw_error = true; in iwl_pcie_irq_msix_handler()
2391 trans_pcie->fw_reset_state = FW_RESET_OK; in iwl_pcie_irq_msix_handler()
2392 wake_up(&trans_pcie->fw_reset_waitq); in iwl_pcie_irq_msix_handler()
2396 iwl_pcie_clear_irq(trans, entry->entry); in iwl_pcie_irq_msix_handler()
2398 lock_map_release(&trans->sync_cmd_lockdep_map); in iwl_pcie_irq_msix_handler()