Lines Matching refs:md_ctrl
60 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, in md_cd_queue_struct_reset() argument
65 queue->md_ctrl = md_ctrl; in md_cd_queue_struct_reset()
71 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, in md_cd_queue_struct_init() argument
74 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); in md_cd_queue_struct_init()
91 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, in t7xx_cldma_alloc_and_map_skb() argument
98 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); in t7xx_cldma_alloc_and_map_skb()
99 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { in t7xx_cldma_alloc_and_map_skb()
103 dev_err(md_ctrl->dev, "DMA mapping failed\n"); in t7xx_cldma_alloc_and_map_skb()
112 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_gpd_rx_from_q() local
119 hw_info = &md_ctrl->hw_info; in t7xx_cldma_gpd_rx_from_q()
135 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { in t7xx_cldma_gpd_rx_from_q()
136 dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); in t7xx_cldma_gpd_rx_from_q()
154 dma_unmap_single(md_ctrl->dev, req->mapped_buff, in t7xx_cldma_gpd_rx_from_q()
176 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); in t7xx_cldma_gpd_rx_from_q()
198 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_gpd_rx_collect() local
205 hw_info = &md_ctrl->hw_info; in t7xx_cldma_gpd_rx_collect()
216 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_gpd_rx_collect()
217 if (md_ctrl->rxq_active & BIT(queue->index)) { in t7xx_cldma_gpd_rx_collect()
227 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_gpd_rx_collect()
232 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_gpd_rx_collect()
241 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_rx_done() local
245 if (value && md_ctrl->rxq_active & BIT(queue->index)) { in t7xx_cldma_rx_done()
250 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); in t7xx_cldma_rx_done()
251 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); in t7xx_cldma_rx_done()
252 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); in t7xx_cldma_rx_done()
253 pm_runtime_mark_last_busy(md_ctrl->dev); in t7xx_cldma_rx_done()
254 pm_runtime_put_autosuspend(md_ctrl->dev); in t7xx_cldma_rx_done()
259 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_gpd_tx_collect() local
288 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); in t7xx_cldma_gpd_tx_collect()
300 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_txq_empty_hndl() local
306 if (!(md_ctrl->txq_active & BIT(queue->index))) in t7xx_cldma_txq_empty_hndl()
315 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_txq_empty_hndl()
317 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_txq_empty_hndl()
323 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_txq_empty_hndl()
324 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", in t7xx_cldma_txq_empty_hndl()
325 md_ctrl->hif_id, queue->index); in t7xx_cldma_txq_empty_hndl()
331 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_txq_empty_hndl()
337 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_tx_done() local
342 hw_info = &md_ctrl->hw_info; in t7xx_cldma_tx_done()
357 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_tx_done()
358 if (md_ctrl->txq_active & BIT(queue->index)) { in t7xx_cldma_tx_done()
363 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_tx_done()
365 pm_runtime_mark_last_busy(md_ctrl->dev); in t7xx_cldma_tx_done()
366 pm_runtime_put_autosuspend(md_ctrl->dev); in t7xx_cldma_tx_done()
369 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, in t7xx_cldma_ring_free() argument
376 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, in t7xx_cldma_ring_free()
384 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); in t7xx_cldma_ring_free()
391 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) in t7xx_alloc_rx_request() argument
400 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); in t7xx_alloc_rx_request()
404 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); in t7xx_alloc_rx_request()
411 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); in t7xx_alloc_rx_request()
419 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) in t7xx_cldma_rx_ring_init() argument
429 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); in t7xx_cldma_rx_ring_init()
431 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); in t7xx_cldma_rx_ring_init()
452 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) in t7xx_alloc_tx_request() argument
460 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); in t7xx_alloc_tx_request()
469 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) in t7xx_cldma_tx_ring_init() argument
479 req = t7xx_alloc_tx_request(md_ctrl); in t7xx_cldma_tx_ring_init()
481 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); in t7xx_cldma_tx_ring_init()
522 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_rxq_init() local
525 queue->tr_ring = &md_ctrl->rx_ring[queue->index]; in t7xx_cldma_rxq_init()
531 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_txq_init() local
534 queue->tr_ring = &md_ctrl->tx_ring[queue->index]; in t7xx_cldma_txq_init()
538 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) in t7xx_cldma_enable_irq() argument
540 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); in t7xx_cldma_enable_irq()
543 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) in t7xx_cldma_disable_irq() argument
545 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); in t7xx_cldma_disable_irq()
548 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) in t7xx_cldma_irq_work_cb() argument
551 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_irq_work_cb()
575 pm_runtime_get(md_ctrl->dev); in t7xx_cldma_irq_work_cb()
578 queue_work(md_ctrl->txq[i].worker, in t7xx_cldma_irq_work_cb()
579 &md_ctrl->txq[i].cldma_work); in t7xx_cldma_irq_work_cb()
581 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); in t7xx_cldma_irq_work_cb()
600 pm_runtime_get(md_ctrl->dev); in t7xx_cldma_irq_work_cb()
603 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); in t7xx_cldma_irq_work_cb()
609 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) in t7xx_cldma_qs_are_active() argument
611 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_qs_are_active()
615 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) in t7xx_cldma_qs_are_active()
635 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) in t7xx_cldma_stop() argument
637 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_stop()
641 md_ctrl->rxq_active = 0; in t7xx_cldma_stop()
643 md_ctrl->txq_active = 0; in t7xx_cldma_stop()
645 md_ctrl->txq_started = 0; in t7xx_cldma_stop()
646 t7xx_cldma_disable_irq(md_ctrl); in t7xx_cldma_stop()
652 if (md_ctrl->is_late_init) { in t7xx_cldma_stop()
654 flush_work(&md_ctrl->txq[i].cldma_work); in t7xx_cldma_stop()
657 flush_work(&md_ctrl->rxq[i].cldma_work); in t7xx_cldma_stop()
661 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); in t7xx_cldma_stop()
663 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); in t7xx_cldma_stop()
668 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) in t7xx_cldma_late_release() argument
672 if (!md_ctrl->is_late_init) in t7xx_cldma_late_release()
676 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); in t7xx_cldma_late_release()
679 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); in t7xx_cldma_late_release()
681 dma_pool_destroy(md_ctrl->gpd_dmapool); in t7xx_cldma_late_release()
682 md_ctrl->gpd_dmapool = NULL; in t7xx_cldma_late_release()
683 md_ctrl->is_late_init = false; in t7xx_cldma_late_release()
686 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) in t7xx_cldma_reset() argument
691 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
692 md_ctrl->txq_active = 0; in t7xx_cldma_reset()
693 md_ctrl->rxq_active = 0; in t7xx_cldma_reset()
694 t7xx_cldma_disable_irq(md_ctrl); in t7xx_cldma_reset()
695 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
698 cancel_work_sync(&md_ctrl->txq[i].cldma_work); in t7xx_cldma_reset()
700 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
701 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); in t7xx_cldma_reset()
702 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
706 cancel_work_sync(&md_ctrl->rxq[i].cldma_work); in t7xx_cldma_reset()
708 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
709 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); in t7xx_cldma_reset()
710 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_reset()
713 t7xx_cldma_late_release(md_ctrl); in t7xx_cldma_reset()
723 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) in t7xx_cldma_start() argument
727 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_start()
728 if (md_ctrl->is_late_init) { in t7xx_cldma_start()
729 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_start()
732 t7xx_cldma_enable_irq(md_ctrl); in t7xx_cldma_start()
735 if (md_ctrl->txq[i].tr_done) in t7xx_cldma_start()
737 md_ctrl->txq[i].tr_done->gpd_addr, in t7xx_cldma_start()
742 if (md_ctrl->rxq[i].tr_done) in t7xx_cldma_start()
744 md_ctrl->rxq[i].tr_done->gpd_addr, in t7xx_cldma_start()
751 md_ctrl->txq_started = 0; in t7xx_cldma_start()
752 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; in t7xx_cldma_start()
753 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; in t7xx_cldma_start()
755 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_start()
758 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) in t7xx_cldma_clear_txq() argument
760 struct cldma_queue *txq = &md_ctrl->txq[qnum]; in t7xx_cldma_clear_txq()
778 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) in t7xx_cldma_clear_rxq() argument
780 struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; in t7xx_cldma_clear_rxq()
803 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); in t7xx_cldma_clear_rxq()
814 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) in t7xx_cldma_clear_all_qs() argument
820 t7xx_cldma_clear_txq(md_ctrl, i); in t7xx_cldma_clear_all_qs()
823 t7xx_cldma_clear_rxq(md_ctrl, i); in t7xx_cldma_clear_all_qs()
827 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) in t7xx_cldma_stop_all_qs() argument
829 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_stop_all_qs()
832 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_stop_all_qs()
836 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; in t7xx_cldma_stop_all_qs()
838 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; in t7xx_cldma_stop_all_qs()
840 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_stop_all_qs()
846 struct cldma_ctrl *md_ctrl = queue->md_ctrl; in t7xx_cldma_gpd_handle_tx_request() local
851 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); in t7xx_cldma_gpd_handle_tx_request()
853 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { in t7xx_cldma_gpd_handle_tx_request()
854 dev_err(md_ctrl->dev, "DMA mapping failed\n"); in t7xx_cldma_gpd_handle_tx_request()
864 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_gpd_handle_tx_request()
865 if (md_ctrl->txq_active & BIT(queue->index)) in t7xx_cldma_gpd_handle_tx_request()
868 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_gpd_handle_tx_request()
875 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, in t7xx_cldma_hw_start_send() argument
878 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_hw_start_send()
884 md_ctrl->txq_started &= ~BIT(qno); in t7xx_cldma_hw_start_send()
888 if (md_ctrl->txq_started & BIT(qno)) in t7xx_cldma_hw_start_send()
893 md_ctrl->txq_started |= BIT(qno); in t7xx_cldma_hw_start_send()
921 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) in t7xx_cldma_send_skb() argument
931 ret = pm_runtime_resume_and_get(md_ctrl->dev); in t7xx_cldma_send_skb()
935 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); in t7xx_cldma_send_skb()
936 queue = &md_ctrl->txq[qno]; in t7xx_cldma_send_skb()
938 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
939 if (!(md_ctrl->txq_active & BIT(qno))) { in t7xx_cldma_send_skb()
941 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
944 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
957 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { in t7xx_cldma_send_skb()
966 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
967 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); in t7xx_cldma_send_skb()
968 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
974 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { in t7xx_cldma_send_skb()
979 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { in t7xx_cldma_send_skb()
980 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
981 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); in t7xx_cldma_send_skb()
982 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_send_skb()
989 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); in t7xx_cldma_send_skb()
990 pm_runtime_mark_last_busy(md_ctrl->dev); in t7xx_cldma_send_skb()
991 pm_runtime_put_autosuspend(md_ctrl->dev); in t7xx_cldma_send_skb()
995 static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) in t7xx_cldma_adjust_config() argument
1000 md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; in t7xx_cldma_adjust_config()
1001 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb); in t7xx_cldma_adjust_config()
1004 md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ; in t7xx_cldma_adjust_config()
1007 md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; in t7xx_cldma_adjust_config()
1010 md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; in t7xx_cldma_adjust_config()
1011 md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; in t7xx_cldma_adjust_config()
1012 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP], in t7xx_cldma_adjust_config()
1017 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) in t7xx_cldma_late_init() argument
1022 if (md_ctrl->is_late_init) { in t7xx_cldma_late_init()
1023 dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); in t7xx_cldma_late_init()
1027 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); in t7xx_cldma_late_init()
1029 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, in t7xx_cldma_late_init()
1031 if (!md_ctrl->gpd_dmapool) { in t7xx_cldma_late_init()
1032 dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); in t7xx_cldma_late_init()
1037 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); in t7xx_cldma_late_init()
1039 dev_err(md_ctrl->dev, "control TX ring init fail\n"); in t7xx_cldma_late_init()
1045 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); in t7xx_cldma_late_init()
1047 dev_err(md_ctrl->dev, "Control RX ring init fail\n"); in t7xx_cldma_late_init()
1053 t7xx_cldma_txq_init(&md_ctrl->txq[i]); in t7xx_cldma_late_init()
1056 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); in t7xx_cldma_late_init()
1058 md_ctrl->is_late_init = true; in t7xx_cldma_late_init()
1063 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); in t7xx_cldma_late_init()
1067 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); in t7xx_cldma_late_init()
1077 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) in t7xx_hw_info_init() argument
1079 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; in t7xx_hw_info_init()
1080 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_hw_info_init()
1085 if (md_ctrl->hif_id == CLDMA_ID_MD) { in t7xx_hw_info_init()
1110 struct cldma_ctrl *md_ctrl; in t7xx_cldma_alloc() local
1113 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); in t7xx_cldma_alloc()
1114 if (!md_ctrl) in t7xx_cldma_alloc()
1117 md_ctrl->t7xx_dev = t7xx_dev; in t7xx_cldma_alloc()
1118 md_ctrl->dev = dev; in t7xx_cldma_alloc()
1119 md_ctrl->hif_id = hif_id; in t7xx_cldma_alloc()
1121 md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb; in t7xx_cldma_alloc()
1123 t7xx_hw_info_init(md_ctrl); in t7xx_cldma_alloc()
1124 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; in t7xx_cldma_alloc()
1130 struct cldma_ctrl *md_ctrl = entity_param; in t7xx_cldma_resume_early() local
1135 hw_info = &md_ctrl->hw_info; in t7xx_cldma_resume_early()
1137 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_resume_early()
1140 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, in t7xx_cldma_resume_early()
1142 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, in t7xx_cldma_resume_early()
1145 t7xx_cldma_enable_irq(md_ctrl); in t7xx_cldma_resume_early()
1147 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; in t7xx_cldma_resume_early()
1150 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_resume_early()
1155 struct cldma_ctrl *md_ctrl = entity_param; in t7xx_cldma_resume() local
1158 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_resume()
1159 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; in t7xx_cldma_resume()
1160 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); in t7xx_cldma_resume()
1161 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); in t7xx_cldma_resume()
1162 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_resume()
1164 if (md_ctrl->hif_id == CLDMA_ID_MD) in t7xx_cldma_resume()
1172 struct cldma_ctrl *md_ctrl = entity_param; in t7xx_cldma_suspend_late() local
1176 hw_info = &md_ctrl->hw_info; in t7xx_cldma_suspend_late()
1178 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_suspend_late()
1181 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; in t7xx_cldma_suspend_late()
1184 t7xx_cldma_disable_irq(md_ctrl); in t7xx_cldma_suspend_late()
1185 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_suspend_late()
1190 struct cldma_ctrl *md_ctrl = entity_param; in t7xx_cldma_suspend() local
1194 if (md_ctrl->hif_id == CLDMA_ID_MD) in t7xx_cldma_suspend()
1197 hw_info = &md_ctrl->hw_info; in t7xx_cldma_suspend()
1199 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_suspend()
1202 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; in t7xx_cldma_suspend()
1204 md_ctrl->txq_started = 0; in t7xx_cldma_suspend()
1205 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_suspend()
1210 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) in t7xx_cldma_pm_init() argument
1212 md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); in t7xx_cldma_pm_init()
1213 if (!md_ctrl->pm_entity) in t7xx_cldma_pm_init()
1216 md_ctrl->pm_entity->entity_param = md_ctrl; in t7xx_cldma_pm_init()
1218 if (md_ctrl->hif_id == CLDMA_ID_MD) in t7xx_cldma_pm_init()
1219 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; in t7xx_cldma_pm_init()
1221 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; in t7xx_cldma_pm_init()
1223 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; in t7xx_cldma_pm_init()
1224 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; in t7xx_cldma_pm_init()
1225 md_ctrl->pm_entity->resume = t7xx_cldma_resume; in t7xx_cldma_pm_init()
1226 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; in t7xx_cldma_pm_init()
1228 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); in t7xx_cldma_pm_init()
1231 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) in t7xx_cldma_pm_uninit() argument
1233 if (!md_ctrl->pm_entity) in t7xx_cldma_pm_uninit()
1236 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); in t7xx_cldma_pm_uninit()
1237 kfree(md_ctrl->pm_entity); in t7xx_cldma_pm_uninit()
1238 md_ctrl->pm_entity = NULL; in t7xx_cldma_pm_uninit()
1242 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) in t7xx_cldma_hif_hw_init() argument
1244 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_hif_hw_init()
1247 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); in t7xx_cldma_hif_hw_init()
1253 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); in t7xx_cldma_hif_hw_init()
1258 struct cldma_ctrl *md_ctrl = data; in t7xx_cldma_isr_handler() local
1261 interrupt = md_ctrl->hw_info.phy_interrupt_id; in t7xx_cldma_isr_handler()
1262 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); in t7xx_cldma_isr_handler()
1263 t7xx_cldma_irq_work_cb(md_ctrl); in t7xx_cldma_isr_handler()
1264 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); in t7xx_cldma_isr_handler()
1265 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); in t7xx_cldma_isr_handler()
1269 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) in t7xx_cldma_destroy_wqs() argument
1274 if (md_ctrl->txq[i].worker) { in t7xx_cldma_destroy_wqs()
1275 destroy_workqueue(md_ctrl->txq[i].worker); in t7xx_cldma_destroy_wqs()
1276 md_ctrl->txq[i].worker = NULL; in t7xx_cldma_destroy_wqs()
1281 if (md_ctrl->rxq[i].worker) { in t7xx_cldma_destroy_wqs()
1282 destroy_workqueue(md_ctrl->rxq[i].worker); in t7xx_cldma_destroy_wqs()
1283 md_ctrl->rxq[i].worker = NULL; in t7xx_cldma_destroy_wqs()
1300 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) in t7xx_cldma_init() argument
1302 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; in t7xx_cldma_init()
1305 md_ctrl->txq_active = 0; in t7xx_cldma_init()
1306 md_ctrl->rxq_active = 0; in t7xx_cldma_init()
1307 md_ctrl->is_late_init = false; in t7xx_cldma_init()
1309 ret = t7xx_cldma_pm_init(md_ctrl); in t7xx_cldma_init()
1313 spin_lock_init(&md_ctrl->cldma_lock); in t7xx_cldma_init()
1316 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); in t7xx_cldma_init()
1317 md_ctrl->txq[i].worker = in t7xx_cldma_init()
1320 md_ctrl->hif_id, i); in t7xx_cldma_init()
1321 if (!md_ctrl->txq[i].worker) in t7xx_cldma_init()
1324 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); in t7xx_cldma_init()
1328 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); in t7xx_cldma_init()
1329 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); in t7xx_cldma_init()
1331 md_ctrl->rxq[i].worker = in t7xx_cldma_init()
1334 md_ctrl->hif_id, i); in t7xx_cldma_init()
1335 if (!md_ctrl->rxq[i].worker) in t7xx_cldma_init()
1339 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); in t7xx_cldma_init()
1340 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; in t7xx_cldma_init()
1341 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; in t7xx_cldma_init()
1342 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; in t7xx_cldma_init()
1343 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); in t7xx_cldma_init()
1347 t7xx_cldma_destroy_wqs(md_ctrl); in t7xx_cldma_init()
1348 t7xx_cldma_pm_uninit(md_ctrl); in t7xx_cldma_init()
1352 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) in t7xx_cldma_switch_cfg() argument
1354 t7xx_cldma_late_release(md_ctrl); in t7xx_cldma_switch_cfg()
1355 t7xx_cldma_adjust_config(md_ctrl, cfg_id); in t7xx_cldma_switch_cfg()
1356 t7xx_cldma_late_init(md_ctrl); in t7xx_cldma_switch_cfg()
1359 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) in t7xx_cldma_exit() argument
1361 t7xx_cldma_stop(md_ctrl); in t7xx_cldma_exit()
1362 t7xx_cldma_late_release(md_ctrl); in t7xx_cldma_exit()
1363 t7xx_cldma_destroy_wqs(md_ctrl); in t7xx_cldma_exit()
1364 t7xx_cldma_pm_uninit(md_ctrl); in t7xx_cldma_exit()