Lines Matching +full:fault +full:- +full:q

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
76 #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
253 return "Transaction marks non-substream disabled"; in ivpu_mmu_event_to_str()
265 return "Translation fault"; in ivpu_mmu_event_to_str()
267 return " Output address caused address size fault"; in ivpu_mmu_event_to_str()
269 return "Access flag fault"; in ivpu_mmu_event_to_str()
271 return "Permission fault occurred on page access"; in ivpu_mmu_event_to_str()
337 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc()
338 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_alloc()
341 cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL); in ivpu_mmu_cdtab_alloc()
342 if (!cdtab->base) in ivpu_mmu_cdtab_alloc()
343 return -ENOMEM; in ivpu_mmu_cdtab_alloc()
345 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); in ivpu_mmu_cdtab_alloc()
352 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc()
353 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_alloc()
356 strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL); in ivpu_mmu_strtab_alloc()
357 if (!strtab->base) in ivpu_mmu_strtab_alloc()
358 return -ENOMEM; in ivpu_mmu_strtab_alloc()
360 strtab->base_cfg = IVPU_MMU_STRTAB_CFG; in ivpu_mmu_strtab_alloc()
361 strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA; in ivpu_mmu_strtab_alloc()
362 strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK; in ivpu_mmu_strtab_alloc()
365 &strtab->dma, &strtab->dma_q, size); in ivpu_mmu_strtab_alloc()
372 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cmdq_alloc()
373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc() local
375 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_cmdq_alloc()
376 if (!q->base) in ivpu_mmu_cmdq_alloc()
377 return -ENOMEM; in ivpu_mmu_cmdq_alloc()
379 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_cmdq_alloc()
380 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_cmdq_alloc()
381 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_cmdq_alloc()
384 &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_alloc()
391 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_evtq_alloc()
392 struct ivpu_mmu_queue *q = &mmu->evtq; in ivpu_mmu_evtq_alloc() local
394 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL); in ivpu_mmu_evtq_alloc()
395 if (!q->base) in ivpu_mmu_evtq_alloc()
396 return -ENOMEM; in ivpu_mmu_evtq_alloc()
398 q->dma_q = IVPU_MMU_Q_BASE_RWA; in ivpu_mmu_evtq_alloc()
399 q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK; in ivpu_mmu_evtq_alloc()
400 q->dma_q |= IVPU_MMU_Q_COUNT_LOG2; in ivpu_mmu_evtq_alloc()
403 &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_evtq_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons()
468 ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod, in ivpu_mmu_cmdq_wait_for_cons()
473 cmdq->cons = cmdq->prod; in ivpu_mmu_cmdq_wait_for_cons()
478 static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q) in ivpu_mmu_queue_is_full() argument
480 return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && in ivpu_mmu_queue_is_full()
481 (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons))); in ivpu_mmu_queue_is_full()
484 static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q) in ivpu_mmu_queue_is_empty() argument
486 return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) && in ivpu_mmu_queue_is_empty()
487 (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons))); in ivpu_mmu_queue_is_empty()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write()
493 u64 *queue_buffer = cmdq->base; in ivpu_mmu_cmdq_cmd_write()
494 int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer)); in ivpu_mmu_cmdq_cmd_write()
498 return -EBUSY; in ivpu_mmu_cmdq_cmd_write()
503 cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_cmdq_cmd_write()
512 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync() local
523 clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_cmdq_sync()
524 REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); in ivpu_mmu_cmdq_sync()
566 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_reset()
570 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
572 clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
573 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
574 mmu->cmdq.cons = 0; in ivpu_mmu_reset()
576 memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE); in ivpu_mmu_reset()
577 mmu->evtq.prod = 0; in ivpu_mmu_reset()
578 mmu->evtq.cons = 0; in ivpu_mmu_reset()
592 REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); in ivpu_mmu_reset()
593 REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); in ivpu_mmu_reset()
595 REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); in ivpu_mmu_reset()
616 REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); in ivpu_mmu_reset()
640 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_link_cd()
641 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_link_cd()
642 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_strtab_link_cd()
643 u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE); in ivpu_mmu_strtab_link_cd()
650 (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK); in ivpu_mmu_strtab_link_cd()
682 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_invalidate_tlb()
685 mutex_lock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
686 if (!mmu->on) in ivpu_mmu_invalidate_tlb()
695 mutex_unlock(&mmu->lock); in ivpu_mmu_invalidate_tlb()
701 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cd_add()
702 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cd_add()
708 return -EINVAL; in ivpu_mmu_cd_add()
710 entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE); in ivpu_mmu_cd_add()
729 /* For global context generate memory fault on VPU */ in ivpu_mmu_cd_add()
747 mutex_lock(&mmu->lock); in ivpu_mmu_cd_add()
748 if (!mmu->on) in ivpu_mmu_cd_add()
757 mutex_unlock(&mmu->lock); in ivpu_mmu_cd_add()
765 ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma); in ivpu_mmu_cd_add_gbl()
778 return -EINVAL; in ivpu_mmu_cd_add_user()
790 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_init()
797 ret = drmm_mutex_init(&vdev->drm, &mmu->lock); in ivpu_mmu_init()
830 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_enable()
833 mutex_lock(&mmu->lock); in ivpu_mmu_enable()
835 mmu->on = true; in ivpu_mmu_enable()
855 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
859 mmu->on = false; in ivpu_mmu_enable()
860 mutex_unlock(&mmu->lock); in ivpu_mmu_enable()
866 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_disable()
868 mutex_lock(&mmu->lock); in ivpu_mmu_disable()
869 mmu->on = false; in ivpu_mmu_disable()
870 mutex_unlock(&mmu->lock); in ivpu_mmu_disable()
888 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq; in ivpu_mmu_get_event()
889 u32 idx = IVPU_MMU_Q_IDX(evtq->cons); in ivpu_mmu_get_event()
890 u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); in ivpu_mmu_get_event()
892 evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); in ivpu_mmu_get_event()
896 evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_get_event()
917 REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons); in ivpu_mmu_irq_evtq_handler()
920 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ)) in ivpu_mmu_irq_evtq_handler()
971 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma); in ivpu_mmu_set_pgtable()