Lines Matching +full:pc +full:- +full:ack

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Qualcomm BAM-DMUX WWAN network driver
10 #include <linux/dma-mapping.h>
27 #define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
73 struct qcom_smem_state *pc, *pc_ack; member
98 reinit_completion(&dmux->pc_ack_completion); in bam_dmux_pc_vote()
99 qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask, in bam_dmux_pc_vote()
100 enable ? dmux->pc_mask : 0); in bam_dmux_pc_vote()
105 qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask, in bam_dmux_pc_ack()
106 dmux->pc_ack_state ? 0 : dmux->pc_ack_mask); in bam_dmux_pc_ack()
107 dmux->pc_ack_state = !dmux->pc_ack_state; in bam_dmux_pc_ack()
113 struct device *dev = skb_dma->dmux->dev; in bam_dmux_skb_dma_map()
115 skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir); in bam_dmux_skb_dma_map()
116 if (dma_mapping_error(dev, skb_dma->addr)) { in bam_dmux_skb_dma_map()
118 skb_dma->addr = 0; in bam_dmux_skb_dma_map()
128 dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir); in bam_dmux_skb_dma_unmap()
129 skb_dma->addr = 0; in bam_dmux_skb_dma_unmap()
136 dev_dbg(dmux->dev, "wake queues\n"); in bam_dmux_tx_wake_queues()
139 struct net_device *netdev = dmux->netdevs[i]; in bam_dmux_tx_wake_queues()
150 dev_dbg(dmux->dev, "stop queues\n"); in bam_dmux_tx_stop_queues()
153 struct net_device *netdev = dmux->netdevs[i]; in bam_dmux_tx_stop_queues()
162 struct bam_dmux *dmux = skb_dma->dmux; in bam_dmux_tx_done()
165 pm_runtime_mark_last_busy(dmux->dev); in bam_dmux_tx_done()
166 pm_runtime_put_autosuspend(dmux->dev); in bam_dmux_tx_done()
168 if (skb_dma->addr) in bam_dmux_tx_done()
171 spin_lock_irqsave(&dmux->tx_lock, flags); in bam_dmux_tx_done()
172 skb_dma->skb = NULL; in bam_dmux_tx_done()
173 if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB]) in bam_dmux_tx_done()
175 spin_unlock_irqrestore(&dmux->tx_lock, flags); in bam_dmux_tx_done()
181 struct sk_buff *skb = skb_dma->skb; in bam_dmux_tx_callback()
189 struct bam_dmux *dmux = skb_dma->dmux; in bam_dmux_skb_dma_submit_tx()
192 desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr, in bam_dmux_skb_dma_submit_tx()
193 skb_dma->skb->len, DMA_MEM_TO_DEV, in bam_dmux_skb_dma_submit_tx()
196 dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n"); in bam_dmux_skb_dma_submit_tx()
200 desc->callback = bam_dmux_tx_callback; in bam_dmux_skb_dma_submit_tx()
201 desc->callback_param = skb_dma; in bam_dmux_skb_dma_submit_tx()
202 desc->cookie = dmaengine_submit(desc); in bam_dmux_skb_dma_submit_tx()
212 spin_lock_irqsave(&dmux->tx_lock, flags); in bam_dmux_tx_queue()
214 skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB]; in bam_dmux_tx_queue()
215 if (skb_dma->skb) { in bam_dmux_tx_queue()
217 spin_unlock_irqrestore(&dmux->tx_lock, flags); in bam_dmux_tx_queue()
220 skb_dma->skb = skb; in bam_dmux_tx_queue()
222 dmux->tx_next_skb++; in bam_dmux_tx_queue()
223 if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb) in bam_dmux_tx_queue()
226 spin_unlock_irqrestore(&dmux->tx_lock, flags); in bam_dmux_tx_queue()
232 struct bam_dmux *dmux = bndev->dmux; in bam_dmux_send_cmd()
240 return -ENOMEM; in bam_dmux_send_cmd()
243 hdr->magic = BAM_DMUX_HDR_MAGIC; in bam_dmux_send_cmd()
244 hdr->cmd = cmd; in bam_dmux_send_cmd()
245 hdr->ch = bndev->ch; in bam_dmux_send_cmd()
249 ret = -EAGAIN; in bam_dmux_send_cmd()
253 ret = pm_runtime_get_sync(dmux->dev); in bam_dmux_send_cmd()
258 ret = -ENOMEM; in bam_dmux_send_cmd()
263 ret = -EIO; in bam_dmux_send_cmd()
267 dma_async_issue_pending(dmux->tx); in bam_dmux_send_cmd()
303 return needed - avail; in needed_room()
310 unsigned int pad = sizeof(u32) - skb->len % sizeof(u32); in bam_dmux_tx_prepare_skb()
322 hdr->magic = BAM_DMUX_HDR_MAGIC; in bam_dmux_tx_prepare_skb()
323 hdr->signal = 0; in bam_dmux_tx_prepare_skb()
324 hdr->cmd = BAM_DMUX_CMD_DATA; in bam_dmux_tx_prepare_skb()
325 hdr->pad = pad; in bam_dmux_tx_prepare_skb()
326 hdr->ch = bndev->ch; in bam_dmux_tx_prepare_skb()
327 hdr->len = skb->len - sizeof(*hdr); in bam_dmux_tx_prepare_skb()
338 struct bam_dmux *dmux = bndev->dmux; in bam_dmux_netdev_start_xmit()
346 active = pm_runtime_get(dmux->dev); in bam_dmux_netdev_start_xmit()
347 if (active < 0 && active != -EINPROGRESS) in bam_dmux_netdev_start_xmit()
359 if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs), in bam_dmux_netdev_start_xmit()
360 &dmux->tx_deferred_skb)) in bam_dmux_netdev_start_xmit()
361 queue_pm_work(&dmux->tx_wakeup_work); in bam_dmux_netdev_start_xmit()
368 dma_async_issue_pending(dmux->tx); in bam_dmux_netdev_start_xmit()
383 ret = pm_runtime_resume_and_get(dmux->dev); in bam_dmux_tx_wakeup_work()
385 dev_err(dmux->dev, "Failed to resume: %d\n", ret); in bam_dmux_tx_wakeup_work()
389 pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0); in bam_dmux_tx_wakeup_work()
393 dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending); in bam_dmux_tx_wakeup_work()
395 bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]); in bam_dmux_tx_wakeup_work()
397 dma_async_issue_pending(dmux->tx); in bam_dmux_tx_wakeup_work()
400 pm_runtime_mark_last_busy(dmux->dev); in bam_dmux_tx_wakeup_work()
401 pm_runtime_put_autosuspend(dmux->dev); in bam_dmux_tx_wakeup_work()
416 dev->netdev_ops = &bam_dmux_ops; in bam_dmux_netdev_setup()
418 dev->type = ARPHRD_RAWIP; in bam_dmux_netdev_setup()
420 dev->flags = IFF_POINTOPOINT | IFF_NOARP; in bam_dmux_netdev_setup()
422 dev->mtu = ETH_DATA_LEN; in bam_dmux_netdev_setup()
423 dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE; in bam_dmux_netdev_setup()
424 dev->needed_headroom = sizeof(struct bam_dmux_hdr); in bam_dmux_netdev_setup()
425 dev->needed_tailroom = sizeof(u32); /* word-aligned */ in bam_dmux_netdev_setup()
426 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; in bam_dmux_netdev_setup()
429 dev->addr_assign_type = NET_ADDR_RANDOM; in bam_dmux_netdev_setup()
430 eth_random_addr(dev->perm_addr); in bam_dmux_netdev_setup()
440 for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) { in bam_dmux_register_netdev_work()
441 if (dmux->netdevs[ch]) in bam_dmux_register_netdev_work()
449 SET_NETDEV_DEV(netdev, dmux->dev); in bam_dmux_register_netdev_work()
450 netdev->dev_port = ch; in bam_dmux_register_netdev_work()
453 bndev->dmux = dmux; in bam_dmux_register_netdev_work()
454 bndev->ch = ch; in bam_dmux_register_netdev_work()
458 dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n", in bam_dmux_register_netdev_work()
464 dmux->netdevs[ch] = netdev; in bam_dmux_register_netdev_work()
472 struct bam_dmux *dmux = skb_dma->dmux; in bam_dmux_skb_dma_submit_rx()
475 desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr, in bam_dmux_skb_dma_submit_rx()
476 skb_dma->skb->len, DMA_DEV_TO_MEM, in bam_dmux_skb_dma_submit_rx()
479 dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n"); in bam_dmux_skb_dma_submit_rx()
483 desc->callback = bam_dmux_rx_callback; in bam_dmux_skb_dma_submit_rx()
484 desc->callback_param = skb_dma; in bam_dmux_skb_dma_submit_rx()
485 desc->cookie = dmaengine_submit(desc); in bam_dmux_skb_dma_submit_rx()
491 if (!skb_dma->skb) { in bam_dmux_skb_dma_queue_rx()
492 skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp); in bam_dmux_skb_dma_queue_rx()
493 if (!skb_dma->skb) in bam_dmux_skb_dma_queue_rx()
495 skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE); in bam_dmux_skb_dma_queue_rx()
504 struct bam_dmux *dmux = skb_dma->dmux; in bam_dmux_cmd_data()
505 struct sk_buff *skb = skb_dma->skb; in bam_dmux_cmd_data()
506 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data; in bam_dmux_cmd_data()
507 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_data()
510 dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch); in bam_dmux_cmd_data()
514 if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) { in bam_dmux_cmd_data()
515 dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n", in bam_dmux_cmd_data()
516 hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE); in bam_dmux_cmd_data()
520 skb_dma->skb = NULL; /* Hand over to network stack */ in bam_dmux_cmd_data()
523 skb_trim(skb, hdr->len); in bam_dmux_cmd_data()
524 skb->dev = netdev; in bam_dmux_cmd_data()
526 /* Only Raw-IP/QMAP is supported by this driver */ in bam_dmux_cmd_data()
527 switch (skb->data[0] & 0xf0) { in bam_dmux_cmd_data()
529 skb->protocol = htons(ETH_P_IP); in bam_dmux_cmd_data()
532 skb->protocol = htons(ETH_P_IPV6); in bam_dmux_cmd_data()
535 skb->protocol = htons(ETH_P_MAP); in bam_dmux_cmd_data()
544 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_open()
546 dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch); in bam_dmux_cmd_open()
548 if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) { in bam_dmux_cmd_open()
549 dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch); in bam_dmux_cmd_open()
557 schedule_work(&dmux->register_netdev_work); in bam_dmux_cmd_open()
563 struct net_device *netdev = dmux->netdevs[hdr->ch]; in bam_dmux_cmd_close()
565 dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch); in bam_dmux_cmd_close()
567 if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) { in bam_dmux_cmd_close()
568 dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch); in bam_dmux_cmd_close()
579 struct bam_dmux *dmux = skb_dma->dmux; in bam_dmux_rx_callback()
580 struct sk_buff *skb = skb_dma->skb; in bam_dmux_rx_callback()
581 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data; in bam_dmux_rx_callback()
585 if (hdr->magic != BAM_DMUX_HDR_MAGIC) { in bam_dmux_rx_callback()
586 dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic); in bam_dmux_rx_callback()
590 if (hdr->ch >= BAM_DMUX_NUM_CH) { in bam_dmux_rx_callback()
591 dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch); in bam_dmux_rx_callback()
595 switch (hdr->cmd) { in bam_dmux_rx_callback()
606 dev_err(dmux->dev, "Unsupported command %u on channel %u\n", in bam_dmux_rx_callback()
607 hdr->cmd, hdr->ch); in bam_dmux_rx_callback()
613 dma_async_issue_pending(dmux->rx); in bam_dmux_rx_callback()
618 struct device *dev = dmux->dev; in bam_dmux_power_on()
625 dmux->rx = dma_request_chan(dev, "rx"); in bam_dmux_power_on()
626 if (IS_ERR(dmux->rx)) { in bam_dmux_power_on()
627 dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx); in bam_dmux_power_on()
628 dmux->rx = NULL; in bam_dmux_power_on()
631 dmaengine_slave_config(dmux->rx, &dma_rx_conf); in bam_dmux_power_on()
634 if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL)) in bam_dmux_power_on()
637 dma_async_issue_pending(dmux->rx); in bam_dmux_power_on()
650 if (skb_dma->addr) in bam_dmux_free_skbs()
652 if (skb_dma->skb) { in bam_dmux_free_skbs()
653 dev_kfree_skb(skb_dma->skb); in bam_dmux_free_skbs()
654 skb_dma->skb = NULL; in bam_dmux_free_skbs()
661 if (dmux->tx) { in bam_dmux_power_off()
662 dmaengine_terminate_sync(dmux->tx); in bam_dmux_power_off()
663 dma_release_channel(dmux->tx); in bam_dmux_power_off()
664 dmux->tx = NULL; in bam_dmux_power_off()
667 if (dmux->rx) { in bam_dmux_power_off()
668 dmaengine_terminate_sync(dmux->rx); in bam_dmux_power_off()
669 dma_release_channel(dmux->rx); in bam_dmux_power_off()
670 dmux->rx = NULL; in bam_dmux_power_off()
673 bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE); in bam_dmux_power_off()
679 bool new_state = !dmux->pc_state; in bam_dmux_pc_irq()
681 dev_dbg(dmux->dev, "pc: %u\n", new_state); in bam_dmux_pc_irq()
693 dmux->pc_state = new_state; in bam_dmux_pc_irq()
694 wake_up_all(&dmux->pc_wait); in bam_dmux_pc_irq()
703 dev_dbg(dmux->dev, "pc ack\n"); in bam_dmux_pc_ack_irq()
704 complete_all(&dmux->pc_ack_completion); in bam_dmux_pc_ack_irq()
726 if (!wait_for_completion_timeout(&dmux->pc_ack_completion, in bam_dmux_runtime_resume()
728 return -ETIMEDOUT; in bam_dmux_runtime_resume()
733 /* Wait for ack */ in bam_dmux_runtime_resume()
734 if (!wait_for_completion_timeout(&dmux->pc_ack_completion, in bam_dmux_runtime_resume()
737 return -ETIMEDOUT; in bam_dmux_runtime_resume()
741 if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state, in bam_dmux_runtime_resume()
744 return -ETIMEDOUT; in bam_dmux_runtime_resume()
748 if (!dmux->rx) { in bam_dmux_runtime_resume()
750 return -ENXIO; in bam_dmux_runtime_resume()
754 if (dmux->tx) in bam_dmux_runtime_resume()
757 dmux->tx = dma_request_chan(dev, "tx"); in bam_dmux_runtime_resume()
758 if (IS_ERR(dmux->tx)) { in bam_dmux_runtime_resume()
759 dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx); in bam_dmux_runtime_resume()
760 dmux->tx = NULL; in bam_dmux_runtime_resume()
762 return -ENXIO; in bam_dmux_runtime_resume()
770 struct device *dev = &pdev->dev; in bam_dmux_probe()
777 return -ENOMEM; in bam_dmux_probe()
779 dmux->dev = dev; in bam_dmux_probe()
782 dmux->pc_irq = platform_get_irq_byname(pdev, "pc"); in bam_dmux_probe()
783 if (dmux->pc_irq < 0) in bam_dmux_probe()
784 return dmux->pc_irq; in bam_dmux_probe()
786 pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack"); in bam_dmux_probe()
790 dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit); in bam_dmux_probe()
791 if (IS_ERR(dmux->pc)) in bam_dmux_probe()
792 return dev_err_probe(dev, PTR_ERR(dmux->pc), in bam_dmux_probe()
793 "Failed to get pc state\n"); in bam_dmux_probe()
794 dmux->pc_mask = BIT(bit); in bam_dmux_probe()
796 dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit); in bam_dmux_probe()
797 if (IS_ERR(dmux->pc_ack)) in bam_dmux_probe()
798 return dev_err_probe(dev, PTR_ERR(dmux->pc_ack), in bam_dmux_probe()
799 "Failed to get pc-ack state\n"); in bam_dmux_probe()
800 dmux->pc_ack_mask = BIT(bit); in bam_dmux_probe()
802 init_waitqueue_head(&dmux->pc_wait); in bam_dmux_probe()
803 init_completion(&dmux->pc_ack_completion); in bam_dmux_probe()
804 complete_all(&dmux->pc_ack_completion); in bam_dmux_probe()
806 spin_lock_init(&dmux->tx_lock); in bam_dmux_probe()
807 INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work); in bam_dmux_probe()
808 INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work); in bam_dmux_probe()
811 dmux->rx_skbs[i].dmux = dmux; in bam_dmux_probe()
812 dmux->tx_skbs[i].dmux = dmux; in bam_dmux_probe()
828 ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq, in bam_dmux_probe()
833 ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL, in bam_dmux_probe()
834 &dmux->pc_state); in bam_dmux_probe()
839 if (dmux->pc_state) { in bam_dmux_probe()
857 struct device *dev = dmux->dev; in bam_dmux_remove()
862 cancel_work_sync(&dmux->register_netdev_work); in bam_dmux_remove()
865 if (dmux->netdevs[i]) in bam_dmux_remove()
866 unregister_netdevice_queue(dmux->netdevs[i], &list); in bam_dmux_remove()
869 cancel_work_sync(&dmux->tx_wakeup_work); in bam_dmux_remove()
878 if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT)) in bam_dmux_remove()
882 disable_irq(dmux->pc_irq); in bam_dmux_remove()
884 bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE); in bam_dmux_remove()
892 { .compatible = "qcom,bam-dmux" },
901 .name = "bam-dmux",
909 MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");