Lines Matching +full:long +full:- +full:ram +full:- +full:code

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
52 struct amdgpu_device *adev = ring->adev; in svm_migrate_gart_map()
62 *gart_addr = adev->gmc.gart_start; in svm_migrate_gart_map()
64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); in svm_migrate_gart_map()
67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, in svm_migrate_gart_map()
76 src_addr += job->ibs[0].gpu_addr; in svm_migrate_gart_map()
78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); in svm_migrate_gart_map()
79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, in svm_migrate_gart_map()
82 amdgpu_ring_pad_ib(ring, &job->ibs[0]); in svm_migrate_gart_map()
83 WARN_ON(job->ibs[0].length_dw > num_dw); in svm_migrate_gart_map()
89 pte_flags |= adev->gart.gart_pte_flags; in svm_migrate_gart_map()
91 cpu_addr = &job->ibs[0].ptr[num_dw]; in svm_migrate_gart_map()
101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
110 * ram address uses GART table continuous entries mapping to ram pages,
120 * 0 - OK, otherwise error code
130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in svm_migrate_copy_memory_gart()
136 mutex_lock(&adev->mman.gtt_window_lock); in svm_migrate_copy_memory_gart()
151 dev_err(adev->dev, "fail %d create gart mapping\n", r); in svm_migrate_copy_memory_gart()
158 dev_err(adev->dev, "fail %d to copy memory\n", r); in svm_migrate_copy_memory_gart()
164 npages -= size; in svm_migrate_copy_memory_gart()
172 mutex_unlock(&adev->mman.gtt_window_lock); in svm_migrate_copy_memory_gart()
178 * svm_migrate_copy_done - wait for memory copy sdma is done
189 * 0 - success
190 * otherwise - error code from dma fence signal
206 unsigned long
207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) in svm_migrate_addr_to_pfn()
209 return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; in svm_migrate_addr_to_pfn()
213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) in svm_migrate_get_vram_page()
218 svm_range_bo_ref(prange->svm_bo); in svm_migrate_get_vram_page()
219 page->zone_device_data = prange->svm_bo; in svm_migrate_get_vram_page()
224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) in svm_migrate_put_vram_page()
233 static unsigned long
236 unsigned long addr; in svm_migrate_addr()
239 return (addr - adev->kfd.pgmap.range.start); in svm_migrate_addr()
243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) in svm_migrate_get_sys_page()
254 static void svm_migrate_put_sys_page(unsigned long addr) in svm_migrate_put_sys_page()
263 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) in svm_migrate_unsuccessful_pages()
265 unsigned long upages = 0; in svm_migrate_unsuccessful_pages()
266 unsigned long i; in svm_migrate_unsuccessful_pages()
268 for (i = 0; i < migrate->npages; i++) { in svm_migrate_unsuccessful_pages()
269 if (migrate->src[i] & MIGRATE_PFN_VALID && in svm_migrate_unsuccessful_pages()
270 !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in svm_migrate_unsuccessful_pages()
281 uint64_t npages = migrate->cpages; in svm_migrate_copy_to_vram()
282 struct amdgpu_device *adev = node->adev; in svm_migrate_copy_to_vram()
283 struct device *dev = adev->dev; in svm_migrate_copy_to_vram()
290 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, in svm_migrate_copy_to_vram()
291 prange->last, ttm_res_offset); in svm_migrate_copy_to_vram()
296 amdgpu_res_first(prange->ttm_res, ttm_res_offset, in svm_migrate_copy_to_vram()
302 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); in svm_migrate_copy_to_vram()
303 svm_migrate_get_vram_page(prange, migrate->dst[i]); in svm_migrate_copy_to_vram()
304 migrate->dst[i] = migrate_pfn(migrate->dst[i]); in svm_migrate_copy_to_vram()
306 spage = migrate_pfn_to_page(migrate->src[i]); in svm_migrate_copy_to_vram()
319 adev, src + i - j, in svm_migrate_copy_to_vram()
320 dst + i - j, j, in svm_migrate_copy_to_vram()
336 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { in svm_migrate_copy_to_vram()
337 r = svm_migrate_copy_memory_gart(adev, src + i - j, in svm_migrate_copy_to_vram()
338 dst + i - j, j + 1, in svm_migrate_copy_to_vram()
350 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, in svm_migrate_copy_to_vram()
356 while (i--) { in svm_migrate_copy_to_vram()
358 migrate->dst[i] = 0; in svm_migrate_copy_to_vram()
367 migrate->dst[i] = 0; in svm_migrate_copy_to_vram()
369 migrate->dst[i + 1] = 0; in svm_migrate_copy_to_vram()
371 migrate->dst[i + 2] = 0; in svm_migrate_copy_to_vram()
373 migrate->dst[i + 3] = 0; in svm_migrate_copy_to_vram()
380 static long
385 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); in svm_migrate_vma_to_vram()
386 uint64_t npages = (end - start) >> PAGE_SHIFT; in svm_migrate_vma_to_vram()
387 struct amdgpu_device *adev = node->adev; in svm_migrate_vma_to_vram()
391 unsigned long cpages = 0; in svm_migrate_vma_to_vram()
392 unsigned long mpages = 0; in svm_migrate_vma_to_vram()
395 int r = -ENOMEM; in svm_migrate_vma_to_vram()
414 kfd_smi_event_migration_start(node, p->lead_thread->pid, in svm_migrate_vma_to_vram()
416 0, node->id, prange->prefetch_loc, in svm_migrate_vma_to_vram()
417 prange->preferred_loc, trigger); in svm_migrate_vma_to_vram()
421 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", in svm_migrate_vma_to_vram()
422 __func__, r, prange->start, prange->last); in svm_migrate_vma_to_vram()
429 prange->start, prange->last); in svm_migrate_vma_to_vram()
444 mpages = cpages - svm_migrate_unsuccessful_pages(&migrate); in svm_migrate_vma_to_vram()
448 kfd_smi_event_migration_end(node, p->lead_thread->pid, in svm_migrate_vma_to_vram()
450 0, node->id, trigger); in svm_migrate_vma_to_vram()
452 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); in svm_migrate_vma_to_vram()
460 WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); in svm_migrate_vma_to_vram()
468 * svm_migrate_ram_to_vram - migrate svm range from system to device
479 * 0 - OK, otherwise error code
483 unsigned long start_mgr, unsigned long last_mgr, in svm_migrate_ram_to_vram()
486 unsigned long addr, start, end; in svm_migrate_ram_to_vram()
490 unsigned long mpages = 0; in svm_migrate_ram_to_vram()
491 long r = 0; in svm_migrate_ram_to_vram()
493 if (start_mgr < prange->start || last_mgr > prange->last) { in svm_migrate_ram_to_vram()
495 start_mgr, last_mgr, prange->start, prange->last); in svm_migrate_ram_to_vram()
496 return -EFAULT; in svm_migrate_ram_to_vram()
502 return -ENODEV; in svm_migrate_ram_to_vram()
506 prange->svms, start_mgr, last_mgr, prange->start, prange->last, in svm_migrate_ram_to_vram()
512 r = amdgpu_amdkfd_reserve_mem_limit(node->adev, in svm_migrate_ram_to_vram()
513 prange->npages * PAGE_SIZE, in svm_migrate_ram_to_vram()
515 node->xcp ? node->xcp->id : 0); in svm_migrate_ram_to_vram()
517 dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r); in svm_migrate_ram_to_vram()
518 return -ENOSPC; in svm_migrate_ram_to_vram()
523 dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); in svm_migrate_ram_to_vram()
526 ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; in svm_migrate_ram_to_vram()
529 unsigned long next; in svm_migrate_ram_to_vram()
535 next = min(vma->vm_end, end); in svm_migrate_ram_to_vram()
543 ttm_res_offset += next - addr; in svm_migrate_ram_to_vram()
548 prange->actual_loc = best_loc; in svm_migrate_ram_to_vram()
549 prange->vram_pages += mpages; in svm_migrate_ram_to_vram()
550 } else if (!prange->actual_loc) { in svm_migrate_ram_to_vram()
552 * sys ram drop svm_bo got from svm_range_vram_node_new in svm_migrate_ram_to_vram()
558 amdgpu_amdkfd_unreserve_mem_limit(node->adev, in svm_migrate_ram_to_vram()
559 prange->npages * PAGE_SIZE, in svm_migrate_ram_to_vram()
561 node->xcp ? node->xcp->id : 0); in svm_migrate_ram_to_vram()
567 struct svm_range_bo *svm_bo = page->zone_device_data; in svm_migrate_page_free()
570 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); in svm_migrate_page_free()
580 struct device *dev = adev->dev; in svm_migrate_copy_to_ram()
588 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, in svm_migrate_copy_to_ram()
589 prange->last); in svm_migrate_copy_to_ram()
591 addr = migrate->start; in svm_migrate_copy_to_ram()
599 spage = migrate_pfn_to_page(migrate->src[i]); in svm_migrate_copy_to_ram()
602 prange->svms, prange->start, prange->last); in svm_migrate_copy_to_ram()
604 r = svm_migrate_copy_memory_gart(adev, dst + i - j, in svm_migrate_copy_to_ram()
605 src + i - j, j, in svm_migrate_copy_to_ram()
615 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { in svm_migrate_copy_to_ram()
616 r = svm_migrate_copy_memory_gart(adev, dst + i - j, in svm_migrate_copy_to_ram()
617 src + i - j, j, in svm_migrate_copy_to_ram()
625 dpage = svm_migrate_get_sys_page(migrate->vma, addr); in svm_migrate_copy_to_ram()
628 prange->svms, prange->start, prange->last); in svm_migrate_copy_to_ram()
629 r = -ENOMEM; in svm_migrate_copy_to_ram()
636 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); in svm_migrate_copy_to_ram()
643 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); in svm_migrate_copy_to_ram()
647 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, in svm_migrate_copy_to_ram()
652 pr_debug("failed %d copy to ram\n", r); in svm_migrate_copy_to_ram()
653 while (i--) { in svm_migrate_copy_to_ram()
655 migrate->dst[i] = 0; in svm_migrate_copy_to_ram()
663 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
671 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
673 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
676 * negative values - indicate error
677 * positive values or zero - number of pages got migrated
679 static long
684 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); in svm_migrate_vma_to_ram()
685 uint64_t npages = (end - start) >> PAGE_SHIFT; in svm_migrate_vma_to_ram()
686 unsigned long upages = npages; in svm_migrate_vma_to_ram()
687 unsigned long cpages = 0; in svm_migrate_vma_to_ram()
688 unsigned long mpages = 0; in svm_migrate_vma_to_ram()
689 struct amdgpu_device *adev = node->adev; in svm_migrate_vma_to_ram()
695 int r = -ENOMEM; in svm_migrate_vma_to_ram()
702 if (adev->gmc.xgmi.connected_to_cpu) in svm_migrate_vma_to_ram()
718 kfd_smi_event_migration_start(node, p->lead_thread->pid, in svm_migrate_vma_to_ram()
720 node->id, 0, prange->prefetch_loc, in svm_migrate_vma_to_ram()
721 prange->preferred_loc, trigger); in svm_migrate_vma_to_ram()
725 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", in svm_migrate_vma_to_ram()
726 __func__, r, prange->start, prange->last); in svm_migrate_vma_to_ram()
733 prange->start, prange->last); in svm_migrate_vma_to_ram()
754 kfd_smi_event_migration_end(node, p->lead_thread->pid, in svm_migrate_vma_to_ram()
756 node->id, 0, trigger); in svm_migrate_vma_to_ram()
758 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); in svm_migrate_vma_to_ram()
764 mpages = cpages - upages; in svm_migrate_vma_to_ram()
767 WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); in svm_migrate_vma_to_ram()
774 * svm_migrate_vram_to_ram - migrate svm range from device to system
776 * @mm: process mm, use current->mm if NULL
777 * @start_mgr: start page need be migrated to sys ram
778 * @last_mgr: last page need be migrated to sys ram
780 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
782 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
785 * 0 - OK, otherwise error code
788 unsigned long start_mgr, unsigned long last_mgr, in svm_migrate_vram_to_ram()
793 unsigned long addr; in svm_migrate_vram_to_ram()
794 unsigned long start; in svm_migrate_vram_to_ram()
795 unsigned long end; in svm_migrate_vram_to_ram()
796 unsigned long mpages = 0; in svm_migrate_vram_to_ram()
797 long r = 0; in svm_migrate_vram_to_ram()
799 /* this pragne has no any vram page to migrate to sys ram */ in svm_migrate_vram_to_ram()
800 if (!prange->actual_loc) { in svm_migrate_vram_to_ram()
801 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", in svm_migrate_vram_to_ram()
802 prange->start, prange->last); in svm_migrate_vram_to_ram()
806 if (start_mgr < prange->start || last_mgr > prange->last) { in svm_migrate_vram_to_ram()
808 start_mgr, last_mgr, prange->start, prange->last); in svm_migrate_vram_to_ram()
809 return -EFAULT; in svm_migrate_vram_to_ram()
812 node = svm_range_get_node_by_id(prange, prange->actual_loc); in svm_migrate_vram_to_ram()
814 pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); in svm_migrate_vram_to_ram()
815 return -ENODEV; in svm_migrate_vram_to_ram()
817 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", in svm_migrate_vram_to_ram()
818 prange->svms, prange, start_mgr, last_mgr, in svm_migrate_vram_to_ram()
819 prange->actual_loc); in svm_migrate_vram_to_ram()
825 unsigned long next; in svm_migrate_vram_to_ram()
830 r = -EFAULT; in svm_migrate_vram_to_ram()
834 next = min(vma->vm_end, end); in svm_migrate_vram_to_ram()
847 prange->vram_pages -= mpages; in svm_migrate_vram_to_ram()
852 if (prange->vram_pages == 0 && prange->ttm_res) { in svm_migrate_vram_to_ram()
853 prange->actual_loc = 0; in svm_migrate_vram_to_ram()
862 * svm_migrate_vram_to_vram - migrate svm range from device to device
865 * @start: start page need be migrated to sys ram
866 * @last: last page need be migrated to sys ram
867 * @mm: process mm, use current->mm if NULL
872 * migrate all vram pages in prange to sys ram, then migrate
873 * [start, last] pages from sys ram to gpu node best_loc.
876 * 0 - OK, otherwise error code
880 unsigned long start, unsigned long last, in svm_migrate_vram_to_vram()
890 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); in svm_migrate_vram_to_vram()
893 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, in svm_migrate_vram_to_vram()
897 } while (prange->actual_loc && --retries); in svm_migrate_vram_to_vram()
899 if (prange->actual_loc) in svm_migrate_vram_to_vram()
900 return -EDEADLK; in svm_migrate_vram_to_vram()
907 unsigned long start, unsigned long last, in svm_migrate_to_vram()
910 if (!prange->actual_loc || prange->actual_loc == best_loc) in svm_migrate_to_vram()
921 * svm_migrate_to_ram - CPU page fault handler
927 * 0 - OK
928 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
932 unsigned long start, last, size; in svm_migrate_to_ram()
933 unsigned long addr = vmf->address; in svm_migrate_to_ram()
940 svm_bo = vmf->page->zone_device_data; in svm_migrate_to_ram()
945 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) { in svm_migrate_to_ram()
950 mm = svm_bo->eviction_fence->mm; in svm_migrate_to_ram()
951 if (mm != vmf->vma->vm_mm) in svm_migrate_to_ram()
960 if (READ_ONCE(p->svms.faulting_task) == current) { in svm_migrate_to_ram()
961 pr_debug("skipping ram migration\n"); in svm_migrate_to_ram()
966 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); in svm_migrate_to_ram()
969 mutex_lock(&p->svms.lock); in svm_migrate_to_ram()
971 prange = svm_range_from_addr(&p->svms, addr, NULL); in svm_migrate_to_ram()
973 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); in svm_migrate_to_ram()
974 r = -EFAULT; in svm_migrate_to_ram()
978 mutex_lock(&prange->migrate_mutex); in svm_migrate_to_ram()
980 if (!prange->actual_loc) in svm_migrate_to_ram()
984 size = 1UL << prange->granularity; in svm_migrate_to_ram()
985 start = max(ALIGN_DOWN(addr, size), prange->start); in svm_migrate_to_ram()
986 last = min(ALIGN(addr + 1, size) - 1, prange->last); in svm_migrate_to_ram()
988 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, in svm_migrate_to_ram()
989 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); in svm_migrate_to_ram()
992 r, prange->svms, prange, start, last); in svm_migrate_to_ram()
995 mutex_unlock(&prange->migrate_mutex); in svm_migrate_to_ram()
997 mutex_unlock(&p->svms.lock); in svm_migrate_to_ram()
999 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); in svm_migrate_to_ram()
1016 struct amdgpu_kfd_dev *kfddev = &adev->kfd; in kgd2kfd_init_zone_device()
1019 unsigned long size; in kgd2kfd_init_zone_device()
1024 return -EINVAL; in kgd2kfd_init_zone_device()
1026 if (adev->flags & AMD_IS_APU) in kgd2kfd_init_zone_device()
1029 pgmap = &kfddev->pgmap; in kgd2kfd_init_zone_device()
1035 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); in kgd2kfd_init_zone_device()
1036 if (adev->gmc.xgmi.connected_to_cpu) { in kgd2kfd_init_zone_device()
1037 pgmap->range.start = adev->gmc.aper_base; in kgd2kfd_init_zone_device()
1038 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; in kgd2kfd_init_zone_device()
1039 pgmap->type = MEMORY_DEVICE_COHERENT; in kgd2kfd_init_zone_device()
1041 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); in kgd2kfd_init_zone_device()
1044 pgmap->range.start = res->start; in kgd2kfd_init_zone_device()
1045 pgmap->range.end = res->end; in kgd2kfd_init_zone_device()
1046 pgmap->type = MEMORY_DEVICE_PRIVATE; in kgd2kfd_init_zone_device()
1049 pgmap->nr_range = 1; in kgd2kfd_init_zone_device()
1050 pgmap->ops = &svm_migrate_pgmap_ops; in kgd2kfd_init_zone_device()
1051 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); in kgd2kfd_init_zone_device()
1052 pgmap->flags = 0; in kgd2kfd_init_zone_device()
1053 /* Device manager releases device-specific resources, memory region and in kgd2kfd_init_zone_device()
1056 r = devm_memremap_pages(adev->dev, pgmap); in kgd2kfd_init_zone_device()
1059 if (pgmap->type == MEMORY_DEVICE_PRIVATE) in kgd2kfd_init_zone_device()
1060 devm_release_mem_region(adev->dev, res->start, resource_size(res)); in kgd2kfd_init_zone_device()
1062 pgmap->type = 0; in kgd2kfd_init_zone_device()