Lines Matching refs:svmm
67 struct nouveau_svmm *svmm; member
88 struct nouveau_svmm *svmm; member
169 if (!cli->svm.svmm) { in nouveau_svmm_bind()
186 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr, in nouveau_svmm_bind()
206 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) in nouveau_svmm_part() argument
209 if (svmm) { in nouveau_svmm_part()
210 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
211 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); in nouveau_svmm_part()
216 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
222 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst) in nouveau_svmm_join() argument
225 if (svmm) { in nouveau_svmm_join()
228 ivmm->svmm = svmm; in nouveau_svmm_join()
231 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
232 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); in nouveau_svmm_join()
233 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
240 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) in nouveau_svmm_invalidate() argument
243 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, in nouveau_svmm_invalidate()
255 struct nouveau_svmm *svmm = in nouveau_svmm_invalidate_range_start() local
263 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); in nouveau_svmm_invalidate_range_start()
265 mutex_lock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
266 if (unlikely(!svmm->vmm)) in nouveau_svmm_invalidate_range_start()
274 update->owner == svmm->vmm->cli->drm->dev) in nouveau_svmm_invalidate_range_start()
277 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { in nouveau_svmm_invalidate_range_start()
278 if (start < svmm->unmanaged.start) { in nouveau_svmm_invalidate_range_start()
279 nouveau_svmm_invalidate(svmm, start, in nouveau_svmm_invalidate_range_start()
280 svmm->unmanaged.limit); in nouveau_svmm_invalidate_range_start()
282 start = svmm->unmanaged.limit; in nouveau_svmm_invalidate_range_start()
285 nouveau_svmm_invalidate(svmm, start, limit); in nouveau_svmm_invalidate_range_start()
288 mutex_unlock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
305 struct nouveau_svmm *svmm = *psvmm; in nouveau_svmm_fini() local
306 if (svmm) { in nouveau_svmm_fini()
307 mutex_lock(&svmm->mutex); in nouveau_svmm_fini()
308 svmm->vmm = NULL; in nouveau_svmm_fini()
309 mutex_unlock(&svmm->mutex); in nouveau_svmm_fini()
310 mmu_notifier_put(&svmm->notifier); in nouveau_svmm_fini()
320 struct nouveau_svmm *svmm; in nouveau_svmm_init() local
329 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) in nouveau_svmm_init()
331 svmm->vmm = &cli->svm; in nouveau_svmm_init()
332 svmm->unmanaged.start = args->unmanaged_addr; in nouveau_svmm_init()
333 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size; in nouveau_svmm_init()
334 mutex_init(&svmm->mutex); in nouveau_svmm_init()
359 svmm->notifier.ops = &nouveau_mn_ops; in nouveau_svmm_init()
360 ret = __mmu_notifier_register(&svmm->notifier, current->mm); in nouveau_svmm_init()
365 cli->svm.svmm = svmm; in nouveau_svmm_init()
375 kfree(svmm); in nouveau_svmm_init()
503 struct nouveau_svmm *svmm; member
514 range->owner == sn->svmm->vmm->cli->drm->dev) in nouveau_svm_range_invalidate()
525 mutex_lock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
526 else if (!mutex_trylock(&sn->svmm->mutex)) in nouveau_svm_range_invalidate()
529 mutex_unlock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
585 static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, in nouveau_atomic_range_fault() argument
592 struct mm_struct *mm = svmm->notifier.mm; in nouveau_atomic_range_fault()
620 mutex_lock(&svmm->mutex); in nouveau_atomic_range_fault()
624 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
637 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_atomic_range_fault()
638 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
648 static int nouveau_range_fault(struct nouveau_svmm *svmm, in nouveau_range_fault() argument
664 struct mm_struct *mm = svmm->notifier.mm; in nouveau_range_fault()
692 mutex_lock(&svmm->mutex); in nouveau_range_fault()
695 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
703 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_range_fault()
704 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
718 struct nouveau_svmm *svmm; in nouveau_svm_fault() local
758 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) { in nouveau_svm_fault()
759 if (!svmm || buffer->fault[fi]->inst != inst) { in nouveau_svm_fault()
762 svmm = ivmm ? ivmm->svmm : NULL; in nouveau_svm_fault()
764 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm); in nouveau_svm_fault()
766 buffer->fault[fi]->svmm = svmm; in nouveau_svm_fault()
782 if (!(svmm = buffer->fault[fi]->svmm)) { in nouveau_svm_fault()
786 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
793 if (start < svmm->unmanaged.limit) in nouveau_svm_fault()
794 limit = min_t(u64, limit, svmm->unmanaged.start); in nouveau_svm_fault()
823 mm = svmm->notifier.mm; in nouveau_svm_fault()
829 notifier.svmm = svmm; in nouveau_svm_fault()
831 ret = nouveau_atomic_range_fault(svmm, svm->drm, in nouveau_svm_fault()
835 ret = nouveau_range_fault(svmm, svm->drm, &args.i, in nouveau_svm_fault()
851 if (buffer->fault[fn]->svmm != svmm || in nouveau_svm_fault()
922 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, in nouveau_pfns_map() argument
930 mutex_lock(&svmm->mutex); in nouveau_pfns_map()
932 nvif_object_ioctl(&svmm->vmm->vmm.object, args, in nouveau_pfns_map()
935 mutex_unlock(&svmm->mutex); in nouveau_pfns_map()