Lines Matching +full:pc +full:- +full:ack
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
18 #include <linux/dma-mapping.h>
33 /* maximum program/data TCM (Tightly-Coupled Memory) size */
68 /* vpu inter-processor communication interrupt */
74 * enum vpu_fw_type - VPU firmware type
86 * struct vpu_mem - VPU extended program/data memory information
98 * struct vpu_regs - VPU TCM and configuration registers
100 * @tcm: the register for VPU Tightly-Coupled Memory
111 * struct vpu_wdt_handler - VPU watchdog reset handler
122 * struct vpu_wdt - VPU watchdog workqueue
135 * struct vpu_run - VPU initialization status
154 * struct vpu_ipi_desc - VPU IPI descriptor
167 * struct share_obj - DTCM (Data Tightly-Coupled Memory) buffer shared with
181 * struct mtk_vpu - vpu driver data
229 writel(val, vpu->reg.cfg + offset); in vpu_cfg_writel()
234 return readl(vpu->reg.cfg + offset); in vpu_cfg_readl()
245 mutex_lock(&vpu->vpu_mutex); in vpu_clock_disable()
246 if (!--vpu->wdt_refcnt) in vpu_clock_disable()
250 mutex_unlock(&vpu->vpu_mutex); in vpu_clock_disable()
252 clk_disable(vpu->clk); in vpu_clock_disable()
259 ret = clk_enable(vpu->clk); in vpu_clock_enable()
263 mutex_lock(&vpu->vpu_mutex); in vpu_clock_enable()
264 if (!vpu->wdt_refcnt++) in vpu_clock_enable()
268 mutex_unlock(&vpu->vpu_mutex); in vpu_clock_enable()
275 dev_info(vpu->dev, in vpu_dump_status()
276 "vpu: run %x, pc = 0x%x, ra = 0x%x, sp = 0x%x, idle = 0x%x\n" in vpu_dump_status()
295 dev_err(&pdev->dev, "vpu device in not ready\n"); in vpu_ipi_register()
296 return -EPROBE_DEFER; in vpu_ipi_register()
300 ipi_desc = vpu->ipi_desc; in vpu_ipi_register()
307 dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n", in vpu_ipi_register()
309 return -EINVAL; in vpu_ipi_register()
318 struct share_obj __iomem *send_obj = vpu->send_buf; in vpu_ipi_send()
323 len > sizeof(send_obj->share_buf) || !buf) { in vpu_ipi_send()
324 dev_err(vpu->dev, "failed to send ipi message\n"); in vpu_ipi_send()
325 return -EINVAL; in vpu_ipi_send()
330 dev_err(vpu->dev, "failed to enable vpu clock\n"); in vpu_ipi_send()
334 dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n"); in vpu_ipi_send()
335 ret = -EINVAL; in vpu_ipi_send()
339 mutex_lock(&vpu->vpu_mutex); in vpu_ipi_send()
345 dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n"); in vpu_ipi_send()
346 ret = -EIO; in vpu_ipi_send()
352 memcpy_toio(send_obj->share_buf, buf, len); in vpu_ipi_send()
353 writel(len, &send_obj->len); in vpu_ipi_send()
354 writel(id, &send_obj->id); in vpu_ipi_send()
356 vpu->ipi_id_ack[id] = false; in vpu_ipi_send()
360 mutex_unlock(&vpu->vpu_mutex); in vpu_ipi_send()
362 /* wait for VPU's ACK */ in vpu_ipi_send()
364 ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout); in vpu_ipi_send()
365 vpu->ipi_id_ack[id] = false; in vpu_ipi_send()
367 dev_err(vpu->dev, "vpu ipi %d ack time out !\n", id); in vpu_ipi_send()
368 ret = -EIO; in vpu_ipi_send()
377 mutex_unlock(&vpu->vpu_mutex); in vpu_ipi_send()
389 struct vpu_wdt_handler *handler = wdt->handler; in vpu_wdt_reset_func()
392 dev_info(vpu->dev, "vpu reset\n"); in vpu_wdt_reset_func()
395 dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret); in vpu_wdt_reset_func()
398 mutex_lock(&vpu->vpu_mutex); in vpu_wdt_reset_func()
400 vpu->fw_loaded = false; in vpu_wdt_reset_func()
401 mutex_unlock(&vpu->vpu_mutex); in vpu_wdt_reset_func()
407 dev_dbg(vpu->dev, "wdt handler func %d\n", index); in vpu_wdt_reset_func()
420 dev_err(&pdev->dev, "vpu device in not ready\n"); in vpu_wdt_reg_handler()
421 return -EPROBE_DEFER; in vpu_wdt_reg_handler()
424 handler = vpu->wdt.handler; in vpu_wdt_reg_handler()
427 dev_dbg(vpu->dev, "wdt register id %d\n", id); in vpu_wdt_reg_handler()
428 mutex_lock(&vpu->vpu_mutex); in vpu_wdt_reg_handler()
431 mutex_unlock(&vpu->vpu_mutex); in vpu_wdt_reg_handler()
435 dev_err(vpu->dev, "register vpu wdt handler failed\n"); in vpu_wdt_reg_handler()
436 return -EINVAL; in vpu_wdt_reg_handler()
444 return vpu->run.dec_capability; in vpu_get_vdec_hw_capa()
452 return vpu->run.enc_capability; in vpu_get_venc_hw_capa()
463 dev_err(vpu->dev, "invalid virtual data memory address\n"); in vpu_mapping_dm_addr()
464 return ERR_PTR(-EINVAL); in vpu_mapping_dm_addr()
468 return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm + in vpu_mapping_dm_addr()
471 return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE); in vpu_mapping_dm_addr()
477 struct device *dev = &pdev->dev; in vpu_get_plat_device()
481 vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0); in vpu_get_plat_device()
512 ret = request_firmware(&vpu_fw, fw_new_name, vpu->dev); in load_requested_vpu()
514 dev_info(vpu->dev, "Failed to load %s, %d, retry\n", in load_requested_vpu()
517 ret = request_firmware(&vpu_fw, fw_name, vpu->dev); in load_requested_vpu()
519 dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, in load_requested_vpu()
524 dl_size = vpu_fw->size; in load_requested_vpu()
526 dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name, in load_requested_vpu()
529 return -EFBIG; in load_requested_vpu()
531 dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n", in load_requested_vpu()
539 dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n", in load_requested_vpu()
541 extra_fw_size = dl_size - tcm_size; in load_requested_vpu()
542 dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size); in load_requested_vpu()
545 dest = (__force void *)vpu->reg.tcm; in load_requested_vpu()
548 memcpy(dest, vpu_fw->data, dl_size); in load_requested_vpu()
551 dest = vpu->extmem[fw_type].va; in load_requested_vpu()
552 dev_dbg(vpu->dev, "download extended memory type %x\n", in load_requested_vpu()
554 memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size); in load_requested_vpu()
571 return -EINVAL; in vpu_load_firmware()
574 dev = &pdev->dev; in vpu_load_firmware()
577 run = &vpu->run; in vpu_load_firmware()
579 mutex_lock(&vpu->vpu_mutex); in vpu_load_firmware()
580 if (vpu->fw_loaded) { in vpu_load_firmware()
581 mutex_unlock(&vpu->vpu_mutex); in vpu_load_firmware()
584 mutex_unlock(&vpu->vpu_mutex); in vpu_load_firmware()
592 mutex_lock(&vpu->vpu_mutex); in vpu_load_firmware()
594 run->signaled = false; in vpu_load_firmware()
595 dev_dbg(vpu->dev, "firmware request\n"); in vpu_load_firmware()
610 vpu->fw_loaded = true; in vpu_load_firmware()
614 ret = wait_event_interruptible_timeout(run->wq, in vpu_load_firmware()
615 run->signaled, in vpu_load_firmware()
619 ret = -ETIME; in vpu_load_firmware()
622 } else if (-ERESTARTSYS == ret) { in vpu_load_firmware()
628 dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver); in vpu_load_firmware()
631 mutex_unlock(&vpu->vpu_mutex); in vpu_load_firmware()
643 vpu->run.signaled = run->signaled; in vpu_init_ipi_handler()
644 strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver)); in vpu_init_ipi_handler()
645 vpu->run.dec_capability = run->dec_capability; in vpu_init_ipi_handler()
646 vpu->run.enc_capability = run->enc_capability; in vpu_init_ipi_handler()
647 wake_up_interruptible(&vpu->run.wq); in vpu_init_ipi_handler()
656 unsigned int running, pc, vpu_to_host, host_to_vpu, wdt, idle, ra, sp; in vpu_debug_read() local
658 struct device *dev = file->private_data; in vpu_debug_read()
663 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret); in vpu_debug_read()
669 pc = vpu_cfg_readl(vpu, VPU_PC_REG); in vpu_debug_read()
682 "PC: 0x%x\n" in vpu_debug_read()
689 vpu->run.fw_ver, pc, wdt, in vpu_debug_read()
706 struct device *dev = vpu->dev; in vpu_free_ext_mem()
709 dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va, in vpu_free_ext_mem()
710 vpu->extmem[fw_type].pa); in vpu_free_ext_mem()
715 struct device *dev = vpu->dev; in vpu_alloc_ext_mem()
719 u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0; in vpu_alloc_ext_mem()
721 vpu->extmem[fw_type].va = dma_alloc_coherent(dev, in vpu_alloc_ext_mem()
723 &vpu->extmem[fw_type].pa, in vpu_alloc_ext_mem()
725 if (!vpu->extmem[fw_type].va) { in vpu_alloc_ext_mem()
727 return -ENOMEM; in vpu_alloc_ext_mem()
732 vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb, in vpu_alloc_ext_mem()
737 (unsigned long long)vpu->extmem[fw_type].pa, in vpu_alloc_ext_mem()
738 vpu->extmem[fw_type].va); in vpu_alloc_ext_mem()
745 struct share_obj __iomem *rcv_obj = vpu->recv_buf; in vpu_ipi_handler()
746 struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc; in vpu_ipi_handler()
748 s32 id = readl(&rcv_obj->id); in vpu_ipi_handler()
750 memcpy_fromio(data, rcv_obj->share_buf, sizeof(data)); in vpu_ipi_handler()
752 ipi_desc[id].handler(data, readl(&rcv_obj->len), in vpu_ipi_handler()
755 vpu->ipi_id_ack[id] = true; in vpu_ipi_handler()
756 wake_up(&vpu->ack_wq); in vpu_ipi_handler()
759 dev_err(vpu->dev, "No such ipi id = %d\n", id); in vpu_ipi_handler()
769 vpu->recv_buf = vpu->reg.tcm + VPU_DTCM_OFFSET; in vpu_ipi_init()
770 vpu->send_buf = vpu->recv_buf + 1; in vpu_ipi_init()
771 memset_io(vpu->recv_buf, 0, sizeof(struct share_obj)); in vpu_ipi_init()
772 memset_io(vpu->send_buf, 0, sizeof(struct share_obj)); in vpu_ipi_init()
788 ret = clk_enable(vpu->clk); in vpu_irq_handler()
790 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret); in vpu_irq_handler()
797 dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host); in vpu_irq_handler()
798 queue_work(vpu->wdt.wq, &vpu->wdt.ws); in vpu_irq_handler()
803 clk_disable(vpu->clk); in vpu_irq_handler()
817 dev_dbg(&pdev->dev, "initialization\n"); in mtk_vpu_probe()
819 dev = &pdev->dev; in mtk_vpu_probe()
822 return -ENOMEM; in mtk_vpu_probe()
824 vpu->dev = &pdev->dev; in mtk_vpu_probe()
825 vpu->reg.tcm = devm_platform_ioremap_resource_byname(pdev, "tcm"); in mtk_vpu_probe()
826 if (IS_ERR((__force void *)vpu->reg.tcm)) in mtk_vpu_probe()
827 return PTR_ERR((__force void *)vpu->reg.tcm); in mtk_vpu_probe()
829 vpu->reg.cfg = devm_platform_ioremap_resource_byname(pdev, "cfg_reg"); in mtk_vpu_probe()
830 if (IS_ERR((__force void *)vpu->reg.cfg)) in mtk_vpu_probe()
831 return PTR_ERR((__force void *)vpu->reg.cfg); in mtk_vpu_probe()
834 vpu->clk = devm_clk_get(dev, "main"); in mtk_vpu_probe()
835 if (IS_ERR(vpu->clk)) { in mtk_vpu_probe()
837 return PTR_ERR(vpu->clk); in mtk_vpu_probe()
842 ret = clk_prepare(vpu->clk); in mtk_vpu_probe()
849 vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt"); in mtk_vpu_probe()
850 if (!vpu->wdt.wq) { in mtk_vpu_probe()
852 ret = -ENOMEM; in mtk_vpu_probe()
855 INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func); in mtk_vpu_probe()
856 mutex_init(&vpu->vpu_mutex); in mtk_vpu_probe()
887 vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT)); in mtk_vpu_probe()
888 dev_info(dev, "4GB mode %u\n", vpu->enable_4GB); in mtk_vpu_probe()
890 if (vpu->enable_4GB) { in mtk_vpu_probe()
909 init_waitqueue_head(&vpu->run.wq); in mtk_vpu_probe()
910 init_waitqueue_head(&vpu->ack_wq); in mtk_vpu_probe()
915 vpu->reg.irq = ret; in mtk_vpu_probe()
916 ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0, in mtk_vpu_probe()
917 pdev->name, vpu); in mtk_vpu_probe()
937 memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX); in mtk_vpu_probe()
939 mutex_destroy(&vpu->vpu_mutex); in mtk_vpu_probe()
943 destroy_workqueue(vpu->wdt.wq); in mtk_vpu_probe()
945 clk_unprepare(vpu->clk); in mtk_vpu_probe()
952 .compatible = "mediatek,mt8173-vpu",
965 if (vpu->wdt.wq) in mtk_vpu_remove()
966 destroy_workqueue(vpu->wdt.wq); in mtk_vpu_remove()
969 mutex_destroy(&vpu->vpu_mutex); in mtk_vpu_remove()
970 clk_unprepare(vpu->clk); in mtk_vpu_remove()
987 clk_unprepare(vpu->clk); in mtk_vpu_suspend()
991 mutex_lock(&vpu->vpu_mutex); in mtk_vpu_suspend()
1000 mutex_unlock(&vpu->vpu_mutex); in mtk_vpu_suspend()
1002 return -EIO; in mtk_vpu_suspend()
1006 mutex_unlock(&vpu->vpu_mutex); in mtk_vpu_suspend()
1008 clk_unprepare(vpu->clk); in mtk_vpu_suspend()
1018 clk_prepare(vpu->clk); in mtk_vpu_resume()
1021 clk_unprepare(vpu->clk); in mtk_vpu_resume()
1026 mutex_lock(&vpu->vpu_mutex); in mtk_vpu_resume()
1031 mutex_unlock(&vpu->vpu_mutex); in mtk_vpu_resume()