Lines Matching full:va
173 /** @va: Virtual range targeted by the VM operation. */
180 } va; member
186 * specified VA range.
189 * the specified VA range.
213 * @new_vma: The new VMA object that will be inserted to the VA tree.
240 * We delegate all the VA management to the common drm_gpuvm framework
289 * @mm: Memory management object representing the auto-VA/kernel-VA.
291 * Used to auto-allocate VA space for kernel-managed objects (tiler
294 * For the MCU VM, this is managing the VA range that's used to map
298 * exceed half of the VA space addressable.
305 /** @kernel_auto_va: Automatic VA-range for kernel BOs. */
307 /** @start: Start of the automatic VA-range for kernel BOs. */
310 /** @size: Size of the automatic VA-range for kernel BOs. */
1019 * panthor_vm_alloc_va() - Allocate a region in the auto-va space
1021 * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
1022 * wants the VA to be automatically allocated from the auto-VA range.
1023 * @size: size of the VA range.
1030 * This function takes care of allocating a region in the kernel auto-VA space.
1035 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, in panthor_vm_alloc_va() argument
1044 if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) in panthor_vm_alloc_va()
1048 if (va != PANTHOR_VM_KERNEL_AUTO_VA) { in panthor_vm_alloc_va()
1049 va_node->start = va; in panthor_vm_alloc_va()
1164 * which might contain both a prev and next VA. in panthor_vm_op_ctx_prealloc_vmas()
1170 /* Partial unmaps might trigger a remap with either a prev or a next VA, in panthor_vm_op_ctx_prealloc_vmas()
1202 u64 size, u64 va, in panthor_vm_prepare_map_op_ctx() argument
1217 /* Make sure the VA and size are aligned and in-bounds. */ in panthor_vm_prepare_map_op_ctx()
1229 op_ctx->va.range = size; in panthor_vm_prepare_map_op_ctx()
1230 op_ctx->va.addr = va; in panthor_vm_prepare_map_op_ctx()
1294 pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) + in panthor_vm_prepare_map_op_ctx()
1295 ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) + in panthor_vm_prepare_map_op_ctx()
1296 ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21); in panthor_vm_prepare_map_op_ctx()
1328 u64 va, u64 size) in panthor_vm_prepare_unmap_op_ctx() argument
1335 op_ctx->va.range = size; in panthor_vm_prepare_unmap_op_ctx()
1336 op_ctx->va.addr = va; in panthor_vm_prepare_unmap_op_ctx()
1342 if (va != ALIGN(va, SZ_2M)) in panthor_vm_prepare_unmap_op_ctx()
1345 if (va + size != ALIGN(va + size, SZ_2M) && in panthor_vm_prepare_unmap_op_ctx()
1346 ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M)) in panthor_vm_prepare_unmap_op_ctx()
1389 * @va: Virtual address to search for.
1401 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) in panthor_vm_get_bo_for_va() argument
1409 gpuva = drm_gpuva_find_first(&vm->base, va, 1); in panthor_vm_get_bo_for_va()
1414 *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr); in panthor_vm_get_bo_for_va()
1429 /* Make sure we have a minimum amount of VA space for kernel objects. */ in panthor_vm_create_get_user_va_range()
1437 /* If the task VM size is smaller than the GPU VA range, pick this in panthor_vm_create_get_user_va_range()
1438 * as our default user VA range, so userspace can CPU/GPU map buffers in panthor_vm_create_get_user_va_range()
1443 /* If the GPU VA range is smaller than the task VM size, we in panthor_vm_create_get_user_va_range()
1447 * If the GPU VA range is bigger than 4G (more than 32-bit of in panthor_vm_create_get_user_va_range()
1448 * VA), we split the range in two, and assign half of it to in panthor_vm_create_get_user_va_range()
1450 * keep the kernel VA space as small as possible. in panthor_vm_create_get_user_va_range()
1481 /* Pick a kernel VA range that's a power of two, to have a clear split. */ in panthor_vm_create_check_args()
1696 "Unhandled Page fault in AS%d at VA 0x%016llX\n" in panthor_mmu_irq_handler()
2032 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), in panthor_gpuva_sm_step_map()
2034 op->map.va.range); in panthor_gpuva_sm_step_map()
2050 struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base); in panthor_gpuva_sm_step_remap()
2083 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); in panthor_gpuva_sm_step_remap()
2088 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); in panthor_gpuva_sm_step_remap()
2098 struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base); in panthor_gpuva_sm_step_unmap()
2102 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, in panthor_gpuva_sm_step_unmap()
2103 unmap_vma->base.va.range); in panthor_gpuva_sm_step_unmap()
2157 ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, in panthor_vm_exec_op()
2162 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); in panthor_vm_exec_op()
2256 * @auto_kernel_va_start: Start of the auto-VA kernel range.
2257 * @auto_kernel_va_size: Size of the auto-VA kernel range.
2387 if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) in panthor_vm_bind_prepare_op_ctx()
2397 op->va, in panthor_vm_bind_prepare_op_ctx()
2409 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); in panthor_vm_bind_prepare_op_ctx()
2418 if (op->va || op->size) in panthor_vm_bind_prepare_op_ctx()
2582 * @va: Virtual address to map the object to.
2592 u64 offset, u64 size, u64 va, u32 flags) in panthor_vm_map_bo_range() argument
2597 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); in panthor_vm_map_bo_range()
2608 * panthor_vm_unmap_range() - Unmap a portion of the VA space
2610 * @va: Virtual address to unmap. Must be 4k aligned.
2618 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) in panthor_vm_unmap_range() argument
2623 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); in panthor_vm_unmap_range()
2729 /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction, in panthor_mmu_init()