Lines Matching refs:vpu_addr
232 u64 vpu_addr, dma_addr_t dma_addr, u64 prot) in ivpu_mmu_context_map_page() argument
235 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
236 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
237 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
238 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_map_page()
260 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_map_cont_64k() argument
265 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size)); in ivpu_mmu_context_map_cont_64k()
271 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_cont_64k()
277 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_map_cont_64k()
284 static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) in ivpu_mmu_context_unmap_page() argument
286 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
287 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
288 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
289 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_unmap_page()
297 u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) in ivpu_mmu_context_map_pages() argument
304 IS_ALIGNED(vpu_addr | dma_addr, IVPU_MMU_CONT_PAGES_SIZE)) { in ivpu_mmu_context_map_pages()
305 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
308 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); in ivpu_mmu_context_map_pages()
315 vpu_addr += map_size; in ivpu_mmu_context_map_pages()
324 u64 vpu_addr) in ivpu_mmu_context_set_page_ro() argument
326 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
327 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
328 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
329 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_set_page_ro()
335 u64 vpu_addr) in ivpu_mmu_context_split_page() argument
337 int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
338 int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
339 int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
340 int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); in ivpu_mmu_context_split_page()
346 u64 vpu_addr) in ivpu_mmu_context_split_64k_page() argument
348 u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); in ivpu_mmu_context_split_64k_page()
349 u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE); in ivpu_mmu_context_split_64k_page()
352 ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr); in ivpu_mmu_context_split_64k_page()
361 ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, in ivpu_mmu_context_set_pages_ro() argument
364 u64 end = vpu_addr + size; in ivpu_mmu_context_set_pages_ro()
371 if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE))) in ivpu_mmu_context_set_pages_ro()
377 ctx->id, vpu_addr, size); in ivpu_mmu_context_set_pages_ro()
381 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE)) in ivpu_mmu_context_set_pages_ro()
382 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
385 if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE)) in ivpu_mmu_context_set_pages_ro()
386 ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size); in ivpu_mmu_context_set_pages_ro()
390 if (vpu_addr < end) in ivpu_mmu_context_set_pages_ro()
391 ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr); in ivpu_mmu_context_set_pages_ro()
393 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_set_pages_ro()
408 static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) in ivpu_mmu_context_unmap_pages() argument
411 ivpu_mmu_context_unmap_page(ctx, vpu_addr); in ivpu_mmu_context_unmap_pages()
412 vpu_addr += IVPU_MMU_PAGE_SIZE; in ivpu_mmu_context_unmap_pages()
419 u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) in ivpu_mmu_context_map_sgt() argument
429 if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) in ivpu_mmu_context_map_sgt()
432 if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK) in ivpu_mmu_context_map_sgt()
446 ctx->id, dma_addr, vpu_addr, size); in ivpu_mmu_context_map_sgt()
448 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); in ivpu_mmu_context_map_sgt()
454 vpu_addr += size; in ivpu_mmu_context_map_sgt()
470 u64 vpu_addr, struct sg_table *sgt) in ivpu_mmu_context_unmap_sgt() argument
486 ctx->id, dma_addr, vpu_addr, size); in ivpu_mmu_context_unmap_sgt()
488 ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); in ivpu_mmu_context_unmap_sgt()
489 vpu_addr += size; in ivpu_mmu_context_unmap_sgt()