Lines Matching +full:resource +full:- +full:attachments
1 // SPDX-License-Identifier: MIT
3 * Copyright 2014-2018 Advanced Micro Devices, Inc.
23 #include <linux/dma-buf.h>
73 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
82 list_for_each_entry(entry, &mem->attachments, list) in kfd_mem_is_attached()
83 if (entry->bo_va->base.vm == avm) in kfd_mem_is_attached()
90 * reuse_dmamap() - Check whether adev can share the original
104 return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) || in reuse_dmamap()
105 (adev->dev->iommu_group == bo_adev->dev->iommu_group); in reuse_dmamap()
109 * System (TTM + userptr) memory - 15/16th System RAM
110 * TTM memory - 3/8th System RAM
121 mem = si.totalram - si.totalhigh; in amdgpu_amdkfd_gpuvm_init_mem_limits()
125 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6); in amdgpu_amdkfd_gpuvm_init_mem_limits()
129 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT; in amdgpu_amdkfd_gpuvm_init_mem_limits()
156 * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
167 * returns -ENOMEM in case of error, ZERO otherwise
175 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); in amdgpu_amdkfd_reserve_mem_limit()
197 return -EINVAL; in amdgpu_amdkfd_reserve_mem_limit()
200 if (adev->flags & AMD_IS_APU) { in amdgpu_amdkfd_reserve_mem_limit()
210 return -ENOMEM; in amdgpu_amdkfd_reserve_mem_limit()
223 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > in amdgpu_amdkfd_reserve_mem_limit()
224 vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) { in amdgpu_amdkfd_reserve_mem_limit()
225 ret = -ENOMEM; in amdgpu_amdkfd_reserve_mem_limit()
235 adev->kfd.vram_used[xcp_id] += vram_needed; in amdgpu_amdkfd_reserve_mem_limit()
236 adev->kfd.vram_used_aligned[xcp_id] += in amdgpu_amdkfd_reserve_mem_limit()
237 (adev->flags & AMD_IS_APU) ? in amdgpu_amdkfd_reserve_mem_limit()
255 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
256 kfd_mem_limit.ttm_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
264 adev->kfd.vram_used[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit()
265 if (adev->flags & AMD_IS_APU) { in amdgpu_amdkfd_unreserve_mem_limit()
266 adev->kfd.vram_used_aligned[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit()
267 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
268 kfd_mem_limit.ttm_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
270 adev->kfd.vram_used_aligned[xcp_id] -= in amdgpu_amdkfd_unreserve_mem_limit()
275 kfd_mem_limit.system_mem_used -= size; in amdgpu_amdkfd_unreserve_mem_limit()
282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit()
295 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_release_notify()
296 u32 alloc_flags = bo->kfd_bo->alloc_flags; in amdgpu_amdkfd_release_notify()
300 bo->xcp_id); in amdgpu_amdkfd_release_notify()
302 kfree(bo->kfd_bo); in amdgpu_amdkfd_release_notify()
306 * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
322 ret = amdgpu_bo_reserve(mem->bo, false); in create_dmamap_sg_bo()
326 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) in create_dmamap_sg_bo()
327 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT | in create_dmamap_sg_bo()
330 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, in create_dmamap_sg_bo()
332 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); in create_dmamap_sg_bo()
334 amdgpu_bo_unreserve(mem->bo); in create_dmamap_sg_bo()
338 return -EINVAL; in create_dmamap_sg_bo()
342 (*bo_out)->parent = amdgpu_bo_ref(mem->bo); in create_dmamap_sg_bo()
346 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
353 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
361 return -EINVAL; in amdgpu_amdkfd_remove_eviction_fence()
367 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, in amdgpu_amdkfd_remove_eviction_fence()
383 while (root->parent) in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
384 root = root->parent; in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
386 vm_bo = root->vm_bo; in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
390 vm = vm_bo->vm; in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
394 info = vm->process_info; in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
395 if (!info || !info->eviction_fence) in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
398 ef = container_of(dma_fence_get(&info->eviction_fence->base), in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
401 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
403 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
405 dma_fence_put(&ef->base); in amdgpu_amdkfd_remove_fence_on_pt_pd_bos()
415 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate()
417 return -EINVAL; in amdgpu_amdkfd_bo_validate()
420 if (bo->tbo.pin_count) in amdgpu_amdkfd_bo_validate()
425 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate()
448 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); in amdgpu_amdkfd_bo_validate_and_fence()
452 dma_resv_add_fence(bo->tbo.base.resv, fence, in amdgpu_amdkfd_bo_validate_and_fence()
463 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); in amdgpu_amdkfd_validate_vm_bo()
466 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
476 struct amdgpu_bo *pd = vm->root.bo; in vm_validate_pt_pd_bos()
477 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_validate_pt_pd_bos()
487 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); in vm_validate_pt_pd_bos()
494 struct amdgpu_bo *pd = vm->root.bo; in vm_update_pds()
495 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_update_pds()
502 return amdgpu_sync_fence(sync, vm->last_update); in vm_update_pds()
510 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags()
512 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags()
519 * create_sg_table() - Create an sg_table for a contiguous DMA addr range
542 sg_dma_address(sg->sgl) = addr; in create_sg_table()
543 sg->sgl->length = size; in create_sg_table()
545 sg->sgl->dma_length = size; in create_sg_table()
555 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr()
558 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_userptr()
559 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmamap_userptr()
560 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
561 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_userptr()
564 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) in kfd_mem_dmamap_userptr()
565 return -EINVAL; in kfd_mem_dmamap_userptr()
567 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); in kfd_mem_dmamap_userptr()
568 if (unlikely(!ttm->sg)) in kfd_mem_dmamap_userptr()
569 return -ENOMEM; in kfd_mem_dmamap_userptr()
572 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, in kfd_mem_dmamap_userptr()
573 ttm->num_pages, 0, in kfd_mem_dmamap_userptr()
574 (u64)ttm->num_pages << PAGE_SHIFT, in kfd_mem_dmamap_userptr()
579 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr()
584 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_userptr()
591 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr()
594 sg_free_table(ttm->sg); in kfd_mem_dmamap_userptr()
596 kfree(ttm->sg); in kfd_mem_dmamap_userptr()
597 ttm->sg = NULL; in kfd_mem_dmamap_userptr()
605 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_dmabuf()
609 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_dmabuf()
614 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_dmabuf()
618 * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
619 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
631 * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
633 * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
638 * - Mapping of DOORBELL or MMIO BO of same or peer device
639 * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
641 * Return: ZERO if successful, NON-ZERO otherwise
648 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmamap_sg_bo()
649 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmamap_sg_bo()
650 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_sg_bo()
657 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo()
658 if (unlikely(ttm->sg)) { in kfd_mem_dmamap_sg_bo()
659 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio); in kfd_mem_dmamap_sg_bo()
660 return -EINVAL; in kfd_mem_dmamap_sg_bo()
663 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo()
665 dma_addr = mem->bo->tbo.sg->sgl->dma_address; in kfd_mem_dmamap_sg_bo()
666 pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
668 dma_addr = dma_map_resource(adev->dev, dma_addr, in kfd_mem_dmamap_sg_bo()
669 mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmamap_sg_bo()
670 ret = dma_mapping_error(adev->dev, dma_addr); in kfd_mem_dmamap_sg_bo()
675 ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length); in kfd_mem_dmamap_sg_bo()
676 if (unlikely(!ttm->sg)) { in kfd_mem_dmamap_sg_bo()
677 ret = -ENOMEM; in kfd_mem_dmamap_sg_bo()
682 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmamap_sg_bo()
689 sg_free_table(ttm->sg); in kfd_mem_dmamap_sg_bo()
690 kfree(ttm->sg); in kfd_mem_dmamap_sg_bo()
691 ttm->sg = NULL; in kfd_mem_dmamap_sg_bo()
693 dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length, in kfd_mem_dmamap_sg_bo()
702 switch (attachment->type) { in kfd_mem_dmamap_attachment()
714 return -EINVAL; in kfd_mem_dmamap_attachment()
722 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_userptr()
725 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmaunmap_userptr()
726 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmaunmap_userptr()
727 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmaunmap_userptr()
729 if (unlikely(!ttm->sg)) in kfd_mem_dmaunmap_userptr()
733 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmaunmap_userptr()
735 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmaunmap_userptr()
736 sg_free_table(ttm->sg); in kfd_mem_dmaunmap_userptr()
737 kfree(ttm->sg); in kfd_mem_dmaunmap_userptr()
738 ttm->sg = NULL; in kfd_mem_dmaunmap_userptr()
744 /* This is a no-op. We don't want to trigger eviction fences when in kfd_mem_dmaunmap_dmabuf()
751 * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
752 * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
756 * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
757 * - Free SG Table that is used to encapsulate DMA mapped memory of
771 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_dmaunmap_sg_bo()
772 struct amdgpu_device *adev = attachment->adev; in kfd_mem_dmaunmap_sg_bo()
773 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmaunmap_sg_bo()
776 if (unlikely(!ttm->sg)) { in kfd_mem_dmaunmap_sg_bo()
782 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in kfd_mem_dmaunmap_sg_bo()
784 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmaunmap_sg_bo()
786 dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address, in kfd_mem_dmaunmap_sg_bo()
787 ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC); in kfd_mem_dmaunmap_sg_bo()
788 sg_free_table(ttm->sg); in kfd_mem_dmaunmap_sg_bo()
789 kfree(ttm->sg); in kfd_mem_dmaunmap_sg_bo()
790 ttm->sg = NULL; in kfd_mem_dmaunmap_sg_bo()
791 bo->tbo.sg = NULL; in kfd_mem_dmaunmap_sg_bo()
798 switch (attachment->type) { in kfd_mem_dmaunmap_attachment()
817 if (!mem->dmabuf) { in kfd_mem_export_dmabuf()
821 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_export_dmabuf()
822 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, in kfd_mem_export_dmabuf()
823 mem->gem_handle, in kfd_mem_export_dmabuf()
824 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_export_dmabuf()
828 mem->dmabuf = dmabuf; in kfd_mem_export_dmabuf()
845 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); in kfd_mem_attach_dmabuf()
850 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; in kfd_mem_attach_dmabuf()
855 /* kfd_mem_attach - Add a BO to a VM
864 * 3. Determine ASIC-specific PTE flags
871 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in kfd_mem_attach()
872 unsigned long bo_size = mem->bo->tbo.base.size; in kfd_mem_attach()
873 uint64_t va = mem->va; in kfd_mem_attach()
882 return -EINVAL; in kfd_mem_attach()
893 if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) && in kfd_mem_attach()
894 ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) || in kfd_mem_attach()
895 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) || in kfd_mem_attach()
896 (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) { in kfd_mem_attach()
897 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM) in kfd_mem_attach()
900 return -EINVAL; in kfd_mem_attach()
906 ret = -ENOMEM; in kfd_mem_attach()
910 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, in kfd_mem_attach()
913 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) || in kfd_mem_attach()
914 (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
915 (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) || in kfd_mem_attach()
921 attachment[i]->type = KFD_MEM_ATT_SHARED; in kfd_mem_attach()
922 bo[i] = mem->bo; in kfd_mem_attach()
923 drm_gem_object_get(&bo[i]->tbo.base); in kfd_mem_attach()
926 attachment[i]->type = KFD_MEM_ATT_SHARED; in kfd_mem_attach()
928 drm_gem_object_get(&bo[i]->tbo.base); in kfd_mem_attach()
929 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in kfd_mem_attach()
930 /* Create an SG BO to DMA-map userptrs on other GPUs */ in kfd_mem_attach()
931 attachment[i]->type = KFD_MEM_ATT_USERPTR; in kfd_mem_attach()
936 } else if (mem->bo->tbo.type == ttm_bo_type_sg) { in kfd_mem_attach()
937 WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL || in kfd_mem_attach()
938 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP), in kfd_mem_attach()
940 attachment[i]->type = KFD_MEM_ATT_SG; in kfd_mem_attach()
945 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT || in kfd_mem_attach()
946 mem->domain == AMDGPU_GEM_DOMAIN_VRAM) { in kfd_mem_attach()
947 attachment[i]->type = KFD_MEM_ATT_DMABUF; in kfd_mem_attach()
954 ret = -EINVAL; in kfd_mem_attach()
968 ++bo_va->ref_count; in kfd_mem_attach()
969 attachment[i]->bo_va = bo_va; in kfd_mem_attach()
971 if (unlikely(!attachment[i]->bo_va)) { in kfd_mem_attach()
972 ret = -ENOMEM; in kfd_mem_attach()
977 attachment[i]->va = va; in kfd_mem_attach()
978 attachment[i]->pte_flags = get_pte_flags(adev, mem); in kfd_mem_attach()
979 attachment[i]->adev = adev; in kfd_mem_attach()
980 list_add(&attachment[i]->list, &mem->attachments); in kfd_mem_attach()
988 for (; i >= 0; i--) { in kfd_mem_attach()
991 if (attachment[i]->bo_va) { in kfd_mem_attach()
993 if (--attachment[i]->bo_va->ref_count == 0) in kfd_mem_attach()
994 amdgpu_vm_bo_del(adev, attachment[i]->bo_va); in kfd_mem_attach()
996 list_del(&attachment[i]->list); in kfd_mem_attach()
999 drm_gem_object_put(&bo[i]->tbo.base); in kfd_mem_attach()
1007 struct amdgpu_bo *bo = attachment->bo_va->base.bo; in kfd_mem_detach()
1010 attachment->va, attachment); in kfd_mem_detach()
1011 if (--attachment->bo_va->ref_count == 0) in kfd_mem_detach()
1012 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va); in kfd_mem_detach()
1013 drm_gem_object_put(&bo->tbo.base); in kfd_mem_detach()
1014 list_del(&attachment->list); in kfd_mem_detach()
1022 mutex_lock(&process_info->lock); in add_kgd_mem_to_kfd_bo_list()
1024 list_add_tail(&mem->validate_list, in add_kgd_mem_to_kfd_bo_list()
1025 &process_info->userptr_valid_list); in add_kgd_mem_to_kfd_bo_list()
1027 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list); in add_kgd_mem_to_kfd_bo_list()
1028 mutex_unlock(&process_info->lock); in add_kgd_mem_to_kfd_bo_list()
1034 mutex_lock(&process_info->lock); in remove_kgd_mem_from_kfd_bo_list()
1035 list_del(&mem->validate_list); in remove_kgd_mem_from_kfd_bo_list()
1036 mutex_unlock(&process_info->lock); in remove_kgd_mem_from_kfd_bo_list()
1046 * Takes the process_info->lock to protect against concurrent restore
1054 struct amdkfd_process_info *process_info = mem->process_info; in init_user_pages()
1055 struct amdgpu_bo *bo = mem->bo; in init_user_pages()
1060 mutex_lock(&process_info->lock); in init_user_pages()
1062 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); in init_user_pages()
1082 mutex_lock(&process_info->notifier_lock); in init_user_pages()
1083 mem->invalid++; in init_user_pages()
1084 mutex_unlock(&process_info->notifier_lock); in init_user_pages()
1085 mutex_unlock(&process_info->lock); in init_user_pages()
1089 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); in init_user_pages()
1091 if (ret == -EAGAIN) in init_user_pages()
1103 amdgpu_bo_placement_from_domain(bo, mem->domain); in init_user_pages()
1104 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in init_user_pages()
1110 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); in init_user_pages()
1115 mutex_unlock(&process_info->lock); in init_user_pages()
1140 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1149 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_vm()
1154 ctx->n_vms = 1; in reserve_bo_and_vm()
1155 ctx->sync = &mem->sync; in reserve_bo_and_vm()
1156 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); in reserve_bo_and_vm()
1157 drm_exec_until_all_locked(&ctx->exec) { in reserve_bo_and_vm()
1158 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); in reserve_bo_and_vm()
1159 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_vm()
1163 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); in reserve_bo_and_vm()
1164 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_vm()
1172 drm_exec_fini(&ctx->exec); in reserve_bo_and_vm()
1177 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1191 struct amdgpu_bo *bo = mem->bo; in reserve_bo_and_cond_vms()
1194 ctx->sync = &mem->sync; in reserve_bo_and_cond_vms()
1195 drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | in reserve_bo_and_cond_vms()
1197 drm_exec_until_all_locked(&ctx->exec) { in reserve_bo_and_cond_vms()
1198 ctx->n_vms = 0; in reserve_bo_and_cond_vms()
1199 list_for_each_entry(entry, &mem->attachments, list) { in reserve_bo_and_cond_vms()
1200 if ((vm && vm != entry->bo_va->base.vm) || in reserve_bo_and_cond_vms()
1201 (entry->is_mapped != map_type in reserve_bo_and_cond_vms()
1205 ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm, in reserve_bo_and_cond_vms()
1206 &ctx->exec, 2); in reserve_bo_and_cond_vms()
1207 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_cond_vms()
1210 ++ctx->n_vms; in reserve_bo_and_cond_vms()
1213 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); in reserve_bo_and_cond_vms()
1214 drm_exec_retry_on_contention(&ctx->exec); in reserve_bo_and_cond_vms()
1222 drm_exec_fini(&ctx->exec); in reserve_bo_and_cond_vms()
1227 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1242 ret = amdgpu_sync_wait(ctx->sync, intr); in unreserve_bo_and_vms()
1244 drm_exec_fini(&ctx->exec); in unreserve_bo_and_vms()
1245 ctx->sync = NULL; in unreserve_bo_and_vms()
1253 struct amdgpu_bo_va *bo_va = entry->bo_va; in unmap_bo_from_gpuvm()
1254 struct amdgpu_device *adev = entry->adev; in unmap_bo_from_gpuvm()
1255 struct amdgpu_vm *vm = bo_va->base.vm; in unmap_bo_from_gpuvm()
1257 if (bo_va->queue_refcount) { in unmap_bo_from_gpuvm()
1258 pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount); in unmap_bo_from_gpuvm()
1259 return -EBUSY; in unmap_bo_from_gpuvm()
1262 amdgpu_vm_bo_unmap(adev, bo_va, entry->va); in unmap_bo_from_gpuvm()
1264 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); in unmap_bo_from_gpuvm()
1266 amdgpu_sync_fence(sync, bo_va->last_pt_update); in unmap_bo_from_gpuvm()
1275 struct amdgpu_bo_va *bo_va = entry->bo_va; in update_gpuvm_pte()
1276 struct amdgpu_device *adev = entry->adev; in update_gpuvm_pte()
1290 return amdgpu_sync_fence(sync, bo_va->last_pt_update); in update_gpuvm_pte()
1301 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, in map_bo_to_gpuvm()
1302 amdgpu_bo_size(entry->bo_va->base.bo), in map_bo_to_gpuvm()
1303 entry->pte_flags); in map_bo_to_gpuvm()
1306 entry->va, ret); in map_bo_to_gpuvm()
1333 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_validate_vms()
1349 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_sync_pds_resv()
1351 struct amdgpu_bo *pd = peer_vm->root.bo; in process_sync_pds_resv()
1353 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, in process_sync_pds_resv()
1369 list_for_each_entry(peer_vm, &process_info->vm_list_head, in process_update_pds()
1388 return -ENOMEM; in init_kfd_vm()
1390 mutex_init(&info->lock); in init_kfd_vm()
1391 mutex_init(&info->notifier_lock); in init_kfd_vm()
1392 INIT_LIST_HEAD(&info->vm_list_head); in init_kfd_vm()
1393 INIT_LIST_HEAD(&info->kfd_bo_list); in init_kfd_vm()
1394 INIT_LIST_HEAD(&info->userptr_valid_list); in init_kfd_vm()
1395 INIT_LIST_HEAD(&info->userptr_inval_list); in init_kfd_vm()
1397 info->eviction_fence = in init_kfd_vm()
1399 current->mm, in init_kfd_vm()
1401 if (!info->eviction_fence) { in init_kfd_vm()
1403 ret = -ENOMEM; in init_kfd_vm()
1407 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); in init_kfd_vm()
1408 INIT_DELAYED_WORK(&info->restore_userptr_work, in init_kfd_vm()
1414 vm->process_info = *process_info; in init_kfd_vm()
1417 ret = amdgpu_bo_reserve(vm->root.bo, true); in init_kfd_vm()
1425 ret = amdgpu_bo_sync_wait(vm->root.bo, in init_kfd_vm()
1429 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); in init_kfd_vm()
1432 dma_resv_add_fence(vm->root.bo->tbo.base.resv, in init_kfd_vm()
1433 &vm->process_info->eviction_fence->base, in init_kfd_vm()
1435 amdgpu_bo_unreserve(vm->root.bo); in init_kfd_vm()
1438 mutex_lock(&vm->process_info->lock); in init_kfd_vm()
1439 list_add_tail(&vm->vm_list_node, in init_kfd_vm()
1440 &(vm->process_info->vm_list_head)); in init_kfd_vm()
1441 vm->process_info->n_vms++; in init_kfd_vm()
1443 *ef = dma_fence_get(&vm->process_info->eviction_fence->base); in init_kfd_vm()
1444 mutex_unlock(&vm->process_info->lock); in init_kfd_vm()
1451 amdgpu_bo_unreserve(vm->root.bo); in init_kfd_vm()
1453 vm->process_info = NULL; in init_kfd_vm()
1455 dma_fence_put(&info->eviction_fence->base); in init_kfd_vm()
1457 put_pid(info->pid); in init_kfd_vm()
1459 mutex_destroy(&info->lock); in init_kfd_vm()
1460 mutex_destroy(&info->notifier_lock); in init_kfd_vm()
1467 * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1471 * - USERPTR BOs are UNPINNABLE and will return error
1472 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1475 * Return: ZERO if successful in pinning, Non-Zero in case of error.
1485 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) { in amdgpu_amdkfd_gpuvm_pin_bo()
1490 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) { in amdgpu_amdkfd_gpuvm_pin_bo()
1494 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_gpuvm_pin_bo()
1496 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret); in amdgpu_amdkfd_gpuvm_pin_bo()
1513 * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1516 * - Is a illegal request for USERPTR BOs and is ignored
1517 * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1541 if (avm->pasid) { in amdgpu_amdkfd_gpuvm_set_vm_pasid()
1542 amdgpu_pasid_free(avm->pasid); in amdgpu_amdkfd_gpuvm_set_vm_pasid()
1561 if (avm->process_info) in amdgpu_amdkfd_gpuvm_acquire_process_vm()
1562 return -EINVAL; in amdgpu_amdkfd_gpuvm_acquire_process_vm()
1582 struct amdkfd_process_info *process_info = vm->process_info; in amdgpu_amdkfd_gpuvm_destroy_cb()
1588 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1589 process_info->n_vms--; in amdgpu_amdkfd_gpuvm_destroy_cb()
1590 list_del(&vm->vm_list_node); in amdgpu_amdkfd_gpuvm_destroy_cb()
1591 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1593 vm->process_info = NULL; in amdgpu_amdkfd_gpuvm_destroy_cb()
1595 /* Release per-process resources when last compute VM is destroyed */ in amdgpu_amdkfd_gpuvm_destroy_cb()
1596 if (!process_info->n_vms) { in amdgpu_amdkfd_gpuvm_destroy_cb()
1597 WARN_ON(!list_empty(&process_info->kfd_bo_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1598 WARN_ON(!list_empty(&process_info->userptr_valid_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1599 WARN_ON(!list_empty(&process_info->userptr_inval_list)); in amdgpu_amdkfd_gpuvm_destroy_cb()
1601 dma_fence_put(&process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_destroy_cb()
1602 cancel_delayed_work_sync(&process_info->restore_userptr_work); in amdgpu_amdkfd_gpuvm_destroy_cb()
1603 put_pid(process_info->pid); in amdgpu_amdkfd_gpuvm_destroy_cb()
1604 mutex_destroy(&process_info->lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1605 mutex_destroy(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_destroy_cb()
1634 struct amdgpu_bo *pd = avm->root.bo; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1635 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1637 if (adev->asic_type < CHIP_VEGA10) in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1638 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1639 return avm->pd_phys_addr; in amdgpu_amdkfd_gpuvm_get_process_page_dir()
1646 mutex_lock(&pinfo->lock); in amdgpu_amdkfd_block_mmu_notifications()
1647 WRITE_ONCE(pinfo->block_mmu_notifications, true); in amdgpu_amdkfd_block_mmu_notifications()
1648 mutex_unlock(&pinfo->lock); in amdgpu_amdkfd_block_mmu_notifications()
1656 mutex_lock(&pinfo->lock); in amdgpu_amdkfd_criu_resume()
1658 mutex_lock(&pinfo->notifier_lock); in amdgpu_amdkfd_criu_resume()
1659 pinfo->evicted_bos++; in amdgpu_amdkfd_criu_resume()
1660 mutex_unlock(&pinfo->notifier_lock); in amdgpu_amdkfd_criu_resume()
1661 if (!READ_ONCE(pinfo->block_mmu_notifications)) { in amdgpu_amdkfd_criu_resume()
1662 ret = -EINVAL; in amdgpu_amdkfd_criu_resume()
1665 WRITE_ONCE(pinfo->block_mmu_notifications, false); in amdgpu_amdkfd_criu_resume()
1667 &pinfo->restore_userptr_work, 0); in amdgpu_amdkfd_criu_resume()
1670 mutex_unlock(&pinfo->lock); in amdgpu_amdkfd_criu_resume()
1680 uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0); in amdgpu_amdkfd_get_available_memory()
1686 - adev->kfd.vram_used_aligned[xcp_id] in amdgpu_amdkfd_get_available_memory()
1687 - atomic64_read(&adev->vram_pin_size) in amdgpu_amdkfd_get_available_memory()
1688 - reserved_for_pt in amdgpu_amdkfd_get_available_memory()
1689 - reserved_for_ras; in amdgpu_amdkfd_get_available_memory()
1691 if (adev->flags & AMD_IS_APU) { in amdgpu_amdkfd_get_available_memory()
1694 kfd_mem_limit.max_system_mem_limit - in amdgpu_amdkfd_get_available_memory()
1697 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - in amdgpu_amdkfd_get_available_memory()
1729 int8_t xcp_id = -1; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1739 if (adev->flags & AMD_IS_APU) { in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1752 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1753 0 : fpriv->xcp_id; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1764 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1770 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1773 return -ENOMEM; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1775 return -EINVAL; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1788 ret = -ENOMEM; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1791 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1792 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1793 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1799 if ((*mem)->aql_queue) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1803 (*mem)->alloc_flags = flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1805 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1815 va, (*mem)->aql_queue ? size << 1 : size, in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1825 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1830 ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1835 bo->tbo.sg = sg; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1836 bo->tbo.ttm->sg = sg; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1838 bo->kfd_bo = *mem; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1839 (*mem)->bo = bo; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1841 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1843 (*mem)->va = va; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1844 (*mem)->domain = domain; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1845 (*mem)->mapped_to_gpu_memory = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1846 (*mem)->process_info = avm->process_info; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1848 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1862 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1863 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1865 mutex_lock(&avm->process_info->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1866 if (avm->process_info->eviction_fence && in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1867 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1869 &avm->process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1870 mutex_unlock(&avm->process_info->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1883 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1884 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1886 drm_vma_node_revoke(&gobj->vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1893 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1894 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu()
1911 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1912 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1913 bool use_release_notifier = (mem->bo->kfd_bo == mem); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1920 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1923 if (mem->alloc_flags & in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1926 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1929 mapped_to_gpu_memory = mem->mapped_to_gpu_memory; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1930 is_imported = mem->is_imported; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1931 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1938 mem->va, bo_size); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1939 return -EBUSY; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1943 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1944 list_del(&mem->validate_list); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1945 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1948 if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1949 amdgpu_hmm_unregister(mem->bo); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1950 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1951 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1952 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1959 amdgpu_amdkfd_remove_eviction_fence(mem->bo, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1960 process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1961 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1962 mem->va + bo_size * (1 + mem->aql_queue)); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1965 list_for_each_entry_safe(entry, tmp, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1973 amdgpu_sync_free(&mem->sync); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1978 if (mem->bo->tbo.sg) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1979 sg_free_table(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1980 kfree(mem->bo->tbo.sg); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1989 (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1990 ((adev->flags & AMD_IS_APU) && in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1991 mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1998 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
1999 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2000 if (mem->dmabuf) { in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2001 dma_buf_put(mem->dmabuf); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2002 mem->dmabuf = NULL; in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2004 mutex_destroy(&mem->lock); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2010 drm_gem_object_put(&mem->bo->tbo.base); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu()
2035 bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2038 return -EINVAL; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2045 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2051 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2052 mutex_lock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2053 is_invalid_userptr = !!mem->invalid; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2054 mutex_unlock(&mem->process_info->notifier_lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2057 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2059 domain = mem->domain; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2060 bo_size = bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2062 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2063 mem->va, in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2064 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2068 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2082 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2083 bo->tbo.resource->mem_type == TTM_PL_SYSTEM) in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2090 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2091 if (entry->bo_va->base.vm != avm || entry->is_mapped) in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2094 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2095 entry->va, entry->va + bo_size, entry); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2110 entry->is_mapped = true; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2111 mem->mapped_to_gpu_memory++; in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2113 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2123 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2124 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_map_memory_to_gpu()
2136 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2138 ret = amdgpu_bo_reserve(mem->bo, true); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2142 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2143 if (entry->bo_va->base.vm != vm) in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2145 if (entry->bo_va->base.bo->tbo.ttm && in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2146 !entry->bo_va->base.bo->tbo.ttm->sg) in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2152 amdgpu_bo_unreserve(mem->bo); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2154 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_dmaunmap_mem()
2163 unsigned long bo_size = mem->bo->tbo.base.size; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2168 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2175 ret = -EINVAL; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2183 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2184 mem->va, in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2185 mem->va + bo_size * (1 + mem->aql_queue), in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2188 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2189 if (entry->bo_va->base.vm != avm || !entry->is_mapped) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2192 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2193 entry->va, entry->va + bo_size, entry); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2199 entry->is_mapped = false; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2201 mem->mapped_to_gpu_memory--; in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2203 mem->mapped_to_gpu_memory); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2209 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu()
2221 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2222 amdgpu_sync_clone(&mem->sync, &sync); in amdgpu_amdkfd_gpuvm_sync_memory()
2223 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_sync_memory()
2231 * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2254 ret = amdgpu_ttm_alloc_gart(&bo->tbo); in amdgpu_amdkfd_map_gtt_bo_to_gart()
2261 bo, bo->vm_bo->vm->process_info->eviction_fence); in amdgpu_amdkfd_map_gtt_bo_to_gart()
2278 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2295 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2297 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2299 return -EINVAL; in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2302 mutex_lock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2323 bo, mem->process_info->eviction_fence); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2330 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2338 mutex_unlock(&mem->process_info->lock); in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel()
2343 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2353 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel()
2364 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2365 *mem = *adev->gmc.vm_fault_info; in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2367 atomic_set(&adev->gmc.vm_fault_info_updated, 0); in amdgpu_amdkfd_gpuvm_get_vm_fault_info()
2384 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | in import_obj_create()
2387 return -EINVAL; in import_obj_create()
2391 return -ENOMEM; in import_obj_create()
2393 ret = drm_vma_node_allow(&obj->vma_node, drm_priv); in import_obj_create()
2403 INIT_LIST_HEAD(&(*mem)->attachments); in import_obj_create()
2404 mutex_init(&(*mem)->lock); in import_obj_create()
2406 (*mem)->alloc_flags = in import_obj_create()
2407 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? in import_obj_create()
2413 (*mem)->dmabuf = dma_buf; in import_obj_create()
2414 (*mem)->bo = bo; in import_obj_create()
2415 (*mem)->va = va; in import_obj_create()
2416 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && in import_obj_create()
2417 !(adev->flags & AMD_IS_APU) ? in import_obj_create()
2420 (*mem)->mapped_to_gpu_memory = 0; in import_obj_create()
2421 (*mem)->process_info = avm->process_info; in import_obj_create()
2422 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); in import_obj_create()
2423 amdgpu_sync_create(&(*mem)->sync); in import_obj_create()
2424 (*mem)->is_imported = true; in import_obj_create()
2426 mutex_lock(&avm->process_info->lock); in import_obj_create()
2427 if (avm->process_info->eviction_fence && in import_obj_create()
2428 !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) in import_obj_create()
2429 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, in import_obj_create()
2430 &avm->process_info->eviction_fence->base); in import_obj_create()
2431 mutex_unlock(&avm->process_info->lock); in import_obj_create()
2438 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); in import_obj_create()
2439 drm_vma_node_revoke(&obj->vma_node, drm_priv); in import_obj_create()
2454 ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2458 obj = drm_gem_object_lookup(adev->kfd.client.file, handle); in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2460 ret = -EINVAL; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2464 ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2469 (*mem)->gem_handle = handle; in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2476 drm_gem_handle_delete(adev->kfd.client.file, handle); in amdgpu_amdkfd_gpuvm_import_dmabuf_fd()
2485 mutex_lock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2490 get_dma_buf(mem->dmabuf); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2491 *dma_buf = mem->dmabuf; in amdgpu_amdkfd_gpuvm_export_dmabuf()
2493 mutex_unlock(&mem->lock); in amdgpu_amdkfd_gpuvm_export_dmabuf()
2510 struct amdkfd_process_info *process_info = mem->process_info; in amdgpu_amdkfd_evict_userptr()
2516 if (READ_ONCE(process_info->block_mmu_notifications)) in amdgpu_amdkfd_evict_userptr()
2519 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_evict_userptr()
2522 mem->invalid++; in amdgpu_amdkfd_evict_userptr()
2523 if (++process_info->evicted_bos == 1) { in amdgpu_amdkfd_evict_userptr()
2525 r = kgd2kfd_quiesce_mm(mni->mm, in amdgpu_amdkfd_evict_userptr()
2530 &process_info->restore_userptr_work, in amdgpu_amdkfd_evict_userptr()
2533 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_evict_userptr()
2553 mutex_lock(&process_info->notifier_lock); in update_invalid_user_pages()
2557 &process_info->userptr_valid_list, in update_invalid_user_pages()
2559 if (mem->invalid) in update_invalid_user_pages()
2560 list_move_tail(&mem->validate_list, in update_invalid_user_pages()
2561 &process_info->userptr_inval_list); in update_invalid_user_pages()
2564 list_for_each_entry(mem, &process_info->userptr_inval_list, in update_invalid_user_pages()
2566 invalid = mem->invalid; in update_invalid_user_pages()
2573 bo = mem->bo; in update_invalid_user_pages()
2575 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); in update_invalid_user_pages()
2576 mem->range = NULL; in update_invalid_user_pages()
2581 mutex_unlock(&process_info->notifier_lock); in update_invalid_user_pages()
2586 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) { in update_invalid_user_pages()
2588 return -EAGAIN; in update_invalid_user_pages()
2590 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in update_invalid_user_pages()
2595 return -EAGAIN; in update_invalid_user_pages()
2600 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, in update_invalid_user_pages()
2601 &mem->range); in update_invalid_user_pages()
2605 /* Return -EFAULT bad address error as success. It will in update_invalid_user_pages()
2610 * Return other error -EBUSY or -ENOMEM to retry restore in update_invalid_user_pages()
2612 if (ret != -EFAULT) in update_invalid_user_pages()
2618 mutex_lock(&process_info->notifier_lock); in update_invalid_user_pages()
2623 if (mem->invalid != invalid) { in update_invalid_user_pages()
2624 ret = -EAGAIN; in update_invalid_user_pages()
2628 if (mem->range) in update_invalid_user_pages()
2629 mem->invalid = 0; in update_invalid_user_pages()
2633 mutex_unlock(&process_info->notifier_lock); in update_invalid_user_pages()
2660 list_for_each_entry(peer_vm, &process_info->vm_list_head, in validate_invalid_user_pages()
2669 list_for_each_entry(mem, &process_info->userptr_inval_list, in validate_invalid_user_pages()
2673 gobj = &mem->bo->tbo.base; in validate_invalid_user_pages()
2687 &process_info->userptr_inval_list, in validate_invalid_user_pages()
2691 bo = mem->bo; in validate_invalid_user_pages()
2694 if (bo->tbo.ttm->pages[0]) { in validate_invalid_user_pages()
2695 amdgpu_bo_placement_from_domain(bo, mem->domain); in validate_invalid_user_pages()
2696 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in validate_invalid_user_pages()
2709 list_for_each_entry(attachment, &mem->attachments, list) { in validate_invalid_user_pages()
2710 if (!attachment->is_mapped) in validate_invalid_user_pages()
2718 mutex_lock(&process_info->notifier_lock); in validate_invalid_user_pages()
2719 mem->invalid++; in validate_invalid_user_pages()
2720 mutex_unlock(&process_info->notifier_lock); in validate_invalid_user_pages()
2747 &process_info->userptr_inval_list, in confirm_valid_user_pages_locked()
2752 if (!mem->range) in confirm_valid_user_pages_locked()
2757 mem->bo->tbo.ttm, mem->range); in confirm_valid_user_pages_locked()
2759 mem->range = NULL; in confirm_valid_user_pages_locked()
2761 WARN(!mem->invalid, "Invalid BO not marked invalid"); in confirm_valid_user_pages_locked()
2762 ret = -EAGAIN; in confirm_valid_user_pages_locked()
2766 if (mem->invalid) { in confirm_valid_user_pages_locked()
2768 ret = -EAGAIN; in confirm_valid_user_pages_locked()
2772 list_move_tail(&mem->validate_list, in confirm_valid_user_pages_locked()
2773 &process_info->userptr_valid_list); in confirm_valid_user_pages_locked()
2795 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2796 evicted_bos = process_info->evicted_bos; in amdgpu_amdkfd_restore_userptr_worker()
2797 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2802 usertask = get_pid_task(process_info->pid, PIDTYPE_PID); in amdgpu_amdkfd_restore_userptr_worker()
2811 mutex_lock(&process_info->lock); in amdgpu_amdkfd_restore_userptr_worker()
2819 if (!list_empty(&process_info->userptr_inval_list)) { in amdgpu_amdkfd_restore_userptr_worker()
2828 mutex_lock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2829 if (process_info->evicted_bos != evicted_bos) in amdgpu_amdkfd_restore_userptr_worker()
2837 process_info->evicted_bos = evicted_bos = 0; in amdgpu_amdkfd_restore_userptr_worker()
2847 mutex_unlock(&process_info->notifier_lock); in amdgpu_amdkfd_restore_userptr_worker()
2849 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_restore_userptr_worker()
2854 &process_info->restore_userptr_work, in amdgpu_amdkfd_restore_userptr_worker()
2867 /* protected by process_info->lock */); in replace_eviction_fence()
2880 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2886 * should be called when the Process is still valid. BO restore involves -
2912 mutex_lock(&process_info->lock); in amdgpu_amdkfd_gpuvm_restore_process_bos()
2916 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2929 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2933 gobj = &mem->bo->tbo.base; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2946 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2949 struct amdgpu_bo *bo = mem->bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2950 uint32_t domain = mem->domain; in amdgpu_amdkfd_gpuvm_restore_process_bos()
2967 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2990 list_for_each_entry(mem, &process_info->kfd_bo_list, in amdgpu_amdkfd_gpuvm_restore_process_bos()
2994 list_for_each_entry(attachment, &mem->attachments, list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
2995 if (!attachment->is_mapped) in amdgpu_amdkfd_gpuvm_restore_process_bos()
3008 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3011 peer_vm->root.bo->tbo.bdev); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3049 if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3052 process_info->eviction_fence->base.context, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3053 process_info->eviction_fence->mm, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3058 ret = -ENOMEM; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3061 dma_fence_put(&process_info->eviction_fence->base); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3062 process_info->eviction_fence = new_fence; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3063 replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3065 WARN_ONCE(*ef != &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3070 list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { in amdgpu_amdkfd_gpuvm_restore_process_bos()
3071 if (mem->bo->tbo.pin_count) in amdgpu_amdkfd_gpuvm_restore_process_bos()
3074 dma_resv_add_fence(mem->bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3075 &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3079 list_for_each_entry(peer_vm, &process_info->vm_list_head, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3081 struct amdgpu_bo *bo = peer_vm->root.bo; in amdgpu_amdkfd_gpuvm_restore_process_bos()
3083 dma_resv_add_fence(bo->tbo.base.resv, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3084 &process_info->eviction_fence->base, in amdgpu_amdkfd_gpuvm_restore_process_bos()
3092 mutex_unlock(&process_info->lock); in amdgpu_amdkfd_gpuvm_restore_process_bos()
3103 return -EINVAL; in amdgpu_amdkfd_add_gws_to_process()
3107 return -ENOMEM; in amdgpu_amdkfd_add_gws_to_process()
3109 mutex_init(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3110 INIT_LIST_HEAD(&(*mem)->attachments); in amdgpu_amdkfd_add_gws_to_process()
3111 (*mem)->bo = amdgpu_bo_ref(gws_bo); in amdgpu_amdkfd_add_gws_to_process()
3112 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; in amdgpu_amdkfd_add_gws_to_process()
3113 (*mem)->process_info = process_info; in amdgpu_amdkfd_add_gws_to_process()
3115 amdgpu_sync_create(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3119 mutex_lock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3131 /* GWS resource is shared b/t amdgpu and amdkfd in amdgpu_amdkfd_add_gws_to_process()
3135 ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); in amdgpu_amdkfd_add_gws_to_process()
3138 dma_resv_add_fence(gws_bo->tbo.base.resv, in amdgpu_amdkfd_add_gws_to_process()
3139 &process_info->eviction_fence->base, in amdgpu_amdkfd_add_gws_to_process()
3142 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3150 mutex_unlock(&(*mem)->process_info->lock); in amdgpu_amdkfd_add_gws_to_process()
3151 amdgpu_sync_free(&(*mem)->sync); in amdgpu_amdkfd_add_gws_to_process()
3154 mutex_destroy(&(*mem)->lock); in amdgpu_amdkfd_add_gws_to_process()
3165 struct amdgpu_bo *gws_bo = kgd_mem->bo; in amdgpu_amdkfd_remove_gws_from_process()
3179 process_info->eviction_fence); in amdgpu_amdkfd_remove_gws_from_process()
3181 amdgpu_sync_free(&kgd_mem->sync); in amdgpu_amdkfd_remove_gws_from_process()
3183 mutex_destroy(&kgd_mem->lock); in amdgpu_amdkfd_remove_gws_from_process()
3188 /* Returns GPU-specific tiling mode information */
3192 config->gb_addr_config = adev->gfx.config.gb_addr_config; in amdgpu_amdkfd_get_tile_config()
3193 config->tile_config_ptr = adev->gfx.config.tile_mode_array; in amdgpu_amdkfd_get_tile_config()
3194 config->num_tile_configs = in amdgpu_amdkfd_get_tile_config()
3195 ARRAY_SIZE(adev->gfx.config.tile_mode_array); in amdgpu_amdkfd_get_tile_config()
3196 config->macro_tile_config_ptr = in amdgpu_amdkfd_get_tile_config()
3197 adev->gfx.config.macrotile_mode_array; in amdgpu_amdkfd_get_tile_config()
3198 config->num_macro_tile_configs = in amdgpu_amdkfd_get_tile_config()
3199 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); in amdgpu_amdkfd_get_tile_config()
3202 config->num_banks = adev->gfx.config.num_banks; in amdgpu_amdkfd_get_tile_config()
3203 config->num_ranks = adev->gfx.config.num_ranks; in amdgpu_amdkfd_get_tile_config()
3213 list_for_each_entry(entry, &mem->attachments, list) { in amdgpu_amdkfd_bo_mapped_to_dev()
3214 if (entry->is_mapped && entry->bo_va->base.vm == vm) in amdgpu_amdkfd_bo_mapped_to_dev()