Lines Matching +full:no +full:- +full:cs +full:- +full:readback

1 // SPDX-License-Identifier: MIT
13 #include <drm/intel/intel-gtt.h>
51 if (node->color != color) in i915_ggtt_color_adjust()
52 *end -= I915_GTT_PAGE_SIZE; in i915_ggtt_color_adjust()
57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
61 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
63 /* Only VLV supports read-only GGTT mappings */ in ggtt_init_hw()
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
69 if (ggtt->mappable_end) { in ggtt_init_hw()
70 if (!io_mapping_init_wc(&ggtt->iomap, in ggtt_init_hw()
71 ggtt->gmadr.start, in ggtt_init_hw()
72 ggtt->mappable_end)) { in ggtt_init_hw()
73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
74 return -EIO; in ggtt_init_hw()
77 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, in ggtt_init_hw()
78 ggtt->mappable_end); in ggtt_init_hw()
87 * i915_ggtt_init_hw - Initialize GGTT hardware
96 * end of the address space. This is required as the CS may prefetch in i915_ggtt_init_hw()
100 ret = ggtt_init_hw(to_gt(i915)->ggtt); in i915_ggtt_init_hw()
108 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
119 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
122 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
124 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
130 save_skip_rewrite = vm->skip_pte_rewrite; in i915_ggtt_suspend_vm()
131 vm->skip_pte_rewrite = true; in i915_ggtt_suspend_vm()
133 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in i915_ggtt_suspend_vm()
134 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_suspend_vm()
136 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_ggtt_suspend_vm()
141 /* unlikely to race when GPU is idle, so no worry about slowpath.. */ in i915_ggtt_suspend_vm()
144 * No dead objects should appear here, GPU should be in i915_ggtt_suspend_vm()
149 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
156 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
164 drm_mm_remove_node(&vma->node); in i915_ggtt_suspend_vm()
170 vm->clear_range(vm, 0, vm->total); in i915_ggtt_suspend_vm()
172 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
174 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
181 i915_ggtt_suspend_vm(&ggtt->vm); in i915_ggtt_suspend()
182 ggtt->invalidate(ggtt); in i915_ggtt_suspend()
184 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_suspend()
190 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
192 spin_lock_irq(&uncore->lock); in gen6_ggtt_invalidate()
195 spin_unlock_irq(&uncore->lock); in gen6_ggtt_invalidate()
205 * readback check when writing GTT PTE entries. in needs_wc_ggtt_mapping()
215 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
223 if (needs_wc_ggtt_mapping(ggtt->vm.i915)) in gen8_ggtt_invalidate()
230 struct intel_uncore *uncore = gt->uncore; in guc_ggtt_ct_invalidate()
233 with_intel_runtime_pm_if_active(uncore->rpm, wakeref) in guc_ggtt_ct_invalidate()
239 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
244 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { in guc_ggtt_invalidate()
248 intel_uncore_write_fw(gt->uncore, in guc_ggtt_invalidate()
252 intel_uncore_write_fw(gt->uncore, in guc_ggtt_invalidate()
291 struct intel_gt *gt = ggtt->vm.gt; in should_update_ggtt_with_bind()
299 struct intel_gt *gt = ggtt->vm.gt; in gen8_ggtt_bind_get_ce()
304 ce = gt->engine[BCS0]->bind_context; in gen8_ggtt_bind_get_ce()
317 intel_engine_pm_get(ce->engine); in gen8_ggtt_bind_get_ce()
324 intel_engine_pm_put(ce->engine); in gen8_ggtt_bind_put_ce()
325 intel_gt_pm_put(ce->engine->gt, wakeref); in gen8_ggtt_bind_put_ce()
333 struct intel_gt *gt = ggtt->vm.gt; in gen8_ggtt_bind_ptes()
334 const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode; in gen8_ggtt_bind_ptes()
339 u32 *cs; in gen8_ggtt_bind_ptes() local
349 iter = __sgt_iter(pages->sgl, true); in gen8_ggtt_bind_ptes()
360 if (mutex_lock_interruptible(&ce->timeline->mutex)) in gen8_ggtt_bind_ptes()
368 mutex_unlock(&ce->timeline->mutex); in gen8_ggtt_bind_ptes()
372 cs = intel_ring_begin(rq, 2 * n_ptes + 2); in gen8_ggtt_bind_ptes()
373 if (IS_ERR(cs)) { in gen8_ggtt_bind_ptes()
375 i915_request_set_error_once(rq, PTR_ERR(cs)); in gen8_ggtt_bind_ptes()
380 *cs++ = MI_UPDATE_GTT | (2 * n_ptes); in gen8_ggtt_bind_ptes()
381 *cs++ = offset << 12; in gen8_ggtt_bind_ptes()
387 *cs++ = lower_32_bits(pte | addr); in gen8_ggtt_bind_ptes()
388 *cs++ = upper_32_bits(pte | addr); in gen8_ggtt_bind_ptes()
393 memset64((u64 *)cs, scratch_pte, in gen8_ggtt_bind_ptes()
394 n_ptes - count); in gen8_ggtt_bind_ptes()
395 cs += (n_ptes - count) * 2; in gen8_ggtt_bind_ptes()
398 memset64((u64 *)cs, pte, n_ptes); in gen8_ggtt_bind_ptes()
399 cs += n_ptes * 2; in gen8_ggtt_bind_ptes()
402 intel_ring_advance(rq, cs); in gen8_ggtt_bind_ptes()
408 mutex_unlock(&ce->timeline->mutex); in gen8_ggtt_bind_ptes()
411 if (rq->fence.error) in gen8_ggtt_bind_ptes()
416 num_entries -= n_ptes; in gen8_ggtt_bind_ptes()
443 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_page()
445 gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags)); in gen8_ggtt_insert_page()
447 ggtt->invalidate(ggtt); in gen8_ggtt_insert_page()
457 pte = ggtt->vm.pte_encode(addr, pat_index, flags); in gen8_ggtt_insert_page_bind()
460 return ggtt->invalidate(ggtt); in gen8_ggtt_insert_page_bind()
471 const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); in gen8_ggtt_insert_entries()
482 gte = (gen8_pte_t __iomem *)ggtt->gsm; in gen8_ggtt_insert_entries()
483 gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
484 end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
486 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
487 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
489 for_each_sgt_daddr(addr, iter, vma_res->bi.pages) in gen8_ggtt_insert_entries()
495 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
501 ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries()
509 gen8_pte_t scratch_pte = vm->scratch[0]->encode; in __gen8_ggtt_insert_entries_bind()
513 pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); in __gen8_ggtt_insert_entries_bind()
514 start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
515 end = start + vma_res->guard / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
516 if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) in __gen8_ggtt_insert_entries_bind()
520 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
521 if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages, in __gen8_ggtt_insert_entries_bind()
522 vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode)) in __gen8_ggtt_insert_entries_bind()
525 start += vma_res->node_size / I915_GTT_PAGE_SIZE; in __gen8_ggtt_insert_entries_bind()
526 if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) in __gen8_ggtt_insert_entries_bind()
543 return ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries_bind()
554 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
556 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; in gen8_ggtt_clear_range()
557 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_clear_range()
575 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_scratch_range_bind()
576 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_scratch_range_bind()
585 return ggtt->invalidate(ggtt); in gen8_ggtt_scratch_range_bind()
598 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_page()
600 iowrite32(vm->pte_encode(addr, pat_index, flags), pte); in gen6_ggtt_insert_page()
602 ggtt->invalidate(ggtt); in gen6_ggtt_insert_page()
609 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
622 gte = (gen6_pte_t __iomem *)ggtt->gsm; in gen6_ggtt_insert_entries()
623 gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
625 end = gte + vma_res->guard / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
627 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
628 end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
629 for_each_sgt_daddr(addr, iter, vma_res->bi.pages) in gen6_ggtt_insert_entries()
630 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++); in gen6_ggtt_insert_entries()
635 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
641 ggtt->invalidate(ggtt); in gen6_ggtt_insert_entries()
658 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
672 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, in bxt_vtd_ggtt_insert_page__cb()
673 arg->pat_index, 0); in bxt_vtd_ggtt_insert_page__cb()
674 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
701 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, in bxt_vtd_ggtt_insert_entries__cb()
702 arg->pat_index, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
703 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
725 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; in gen6_ggtt_clear_range()
726 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen6_ggtt_clear_range()
734 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
747 if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK)) in intel_ggtt_bind_vma()
750 vma_res->bound_flags |= flags; in intel_ggtt_bind_vma()
754 if (vma_res->bi.readonly) in intel_ggtt_bind_vma()
756 if (vma_res->bi.lmem) in intel_ggtt_bind_vma()
759 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in intel_ggtt_bind_vma()
760 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE; in intel_ggtt_bind_vma()
766 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in intel_ggtt_unbind_vma()
773 * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
778 #define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
785 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
788 GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE); in ggtt_reserve_guc_top()
789 offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE; in ggtt_reserve_guc_top()
791 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, in ggtt_reserve_guc_top()
795 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
803 if (drm_mm_node_allocated(&ggtt->uc_fw)) in ggtt_release_guc_top()
804 drm_mm_remove_node(&ggtt->uc_fw); in ggtt_release_guc_top()
810 if (drm_mm_node_allocated(&ggtt->error_capture)) in cleanup_init_ggtt()
811 drm_mm_remove_node(&ggtt->error_capture); in cleanup_init_ggtt()
812 mutex_destroy(&ggtt->error_mutex); in cleanup_init_ggtt()
833 * non-WOPCM memory. If GuC is not present or not in use we still need a in init_ggtt()
834 * small bias as ring wraparound at offset 0 sometimes hangs. No idea in init_ggtt()
837 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, in init_ggtt()
838 intel_wopcm_guc_size(&ggtt->vm.gt->wopcm)); in init_ggtt()
844 mutex_init(&ggtt->error_mutex); in init_ggtt()
845 if (ggtt->mappable_end) { in init_ggtt()
858 * for an error-capture, remain silent. We can afford not in init_ggtt()
865 * (write-combining allows it) add scratch page after error in init_ggtt()
868 ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE; in init_ggtt()
869 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; in init_ggtt()
870 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
871 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
872 &ggtt->error_capture, in init_ggtt()
873 ggtt->error_capture.size, 0, in init_ggtt()
874 ggtt->error_capture.color, in init_ggtt()
875 0, ggtt->mappable_end, in init_ggtt()
878 if (drm_mm_node_allocated(&ggtt->error_capture)) { in init_ggtt()
879 u64 start = ggtt->error_capture.start; in init_ggtt()
880 u64 size = ggtt->error_capture.size; in init_ggtt()
882 ggtt->vm.scratch_range(&ggtt->vm, start, size); in init_ggtt()
883 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
897 /* Clear any non-preallocated blocks */ in init_ggtt()
898 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
899 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
902 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
903 hole_end - hole_start); in init_ggtt()
907 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
926 if (vma_res->bi.readonly) in aliasing_gtt_bind_vma()
930 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
934 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in aliasing_gtt_bind_vma()
936 vma_res->bound_flags |= flags; in aliasing_gtt_bind_vma()
942 if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND) in aliasing_gtt_unbind_vma()
943 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in aliasing_gtt_unbind_vma()
945 if (vma_res->bound_flags & I915_VMA_LOCAL_BIND) in aliasing_gtt_unbind_vma()
946 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res); in aliasing_gtt_unbind_vma()
955 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); in init_aliasing_ppgtt()
959 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
960 err = -ENODEV; in init_aliasing_ppgtt()
964 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
968 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
969 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
970 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
975 * Note we only pre-allocate as far as the end of the global in init_aliasing_ppgtt()
976 * GTT. On 48b / 4-level page-tables, the difference is very, in init_aliasing_ppgtt()
980 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
982 ggtt->alias = ppgtt; in init_aliasing_ppgtt()
983 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
985 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma); in init_aliasing_ppgtt()
986 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
988 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma); in init_aliasing_ppgtt()
989 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
991 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
995 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
997 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
1005 ppgtt = fetch_and_zero(&ggtt->alias); in fini_aliasing_ppgtt()
1009 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
1011 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in fini_aliasing_ppgtt()
1012 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in fini_aliasing_ppgtt()
1019 ret = init_ggtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1024 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1026 cleanup_init_ggtt(to_gt(i915)->ggtt); in i915_init_ggtt()
1036 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
1037 i915_gem_drain_freed_objects(ggtt->vm.i915); in ggtt_cleanup_hw()
1039 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
1041 ggtt->vm.skip_pte_rewrite = true; in ggtt_cleanup_hw()
1043 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in ggtt_cleanup_hw()
1044 struct drm_i915_gem_object *obj = vma->obj; in ggtt_cleanup_hw()
1055 if (drm_mm_node_allocated(&ggtt->error_capture)) in ggtt_cleanup_hw()
1056 drm_mm_remove_node(&ggtt->error_capture); in ggtt_cleanup_hw()
1057 mutex_destroy(&ggtt->error_mutex); in ggtt_cleanup_hw()
1062 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
1064 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
1065 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
1067 arch_phys_wc_del(ggtt->mtrr); in ggtt_cleanup_hw()
1069 if (ggtt->iomap.size) in ggtt_cleanup_hw()
1070 io_mapping_fini(&ggtt->iomap); in ggtt_cleanup_hw()
1074 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
1079 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; in i915_ggtt_driver_release()
1088 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
1094 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; in i915_ggtt_driver_late_release()
1096 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
1097 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
1151 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
1152 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in ggtt_probe_common()
1153 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in ggtt_probe_common()
1161 drm_dbg(&i915->drm, "Using direct GSM access\n"); in ggtt_probe_common()
1168 ggtt->gsm = ioremap_wc(phys_addr, size); in ggtt_probe_common()
1170 ggtt->gsm = ioremap(phys_addr, size); in ggtt_probe_common()
1172 if (!ggtt->gsm) { in ggtt_probe_common()
1173 drm_err(&i915->drm, "Failed to map the ggtt page table\n"); in ggtt_probe_common()
1174 return -ENOMEM; in ggtt_probe_common()
1177 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
1178 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
1180 drm_err(&i915->drm, "Scratch setup failed\n"); in ggtt_probe_common()
1182 iounmap(ggtt->gsm); in ggtt_probe_common()
1187 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
1190 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
1191 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
1203 iounmap(ggtt->gsm); in gen6_gmch_remove()
1215 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
1216 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen8_gmch_probe()
1222 return -ENXIO; in gen8_gmch_probe()
1224 ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); in gen8_gmch_probe()
1225 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen8_gmch_probe()
1234 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
1235 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen8_gmch_probe()
1236 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; in gen8_gmch_probe()
1238 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
1239 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
1240 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1241 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
1242 ggtt->vm.scratch_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
1244 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1247 * Serialize GTT updates with aperture access on BXT if VT-d is on, in gen8_gmch_probe()
1251 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
1252 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
1260 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1261 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1263 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
1268 ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind; in gen8_gmch_probe()
1269 ggtt->vm.insert_page = gen8_ggtt_insert_page_bind; in gen8_gmch_probe()
1270 ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind; in gen8_gmch_probe()
1275 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1278 if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc)) in gen8_gmch_probe()
1279 ggtt->invalidate = guc_ggtt_invalidate; in gen8_gmch_probe()
1281 ggtt->invalidate = gen8_ggtt_invalidate; in gen8_gmch_probe()
1283 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen8_gmch_probe()
1284 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen8_gmch_probe()
1287 ggtt->vm.pte_encode = mtl_ggtt_pte_encode; in gen8_gmch_probe()
1289 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
1295 * For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
1296 * so the switch-case statements in these PTE encode functions are still valid.
1392 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1393 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen6_gmch_probe()
1398 return -ENXIO; in gen6_gmch_probe()
1400 ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR); in gen6_gmch_probe()
1401 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen6_gmch_probe()
1407 if (ggtt->mappable_end < (64 << 20) || in gen6_gmch_probe()
1408 ggtt->mappable_end > (512 << 20)) { in gen6_gmch_probe()
1409 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", in gen6_gmch_probe()
1410 &ggtt->mappable_end); in gen6_gmch_probe()
1411 return -ENXIO; in gen6_gmch_probe()
1417 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1419 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1420 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen6_gmch_probe()
1422 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1424 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1425 ggtt->vm.scratch_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1426 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1427 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1428 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1430 ggtt->invalidate = gen6_ggtt_invalidate; in gen6_gmch_probe()
1433 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1435 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1437 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1439 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1441 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1443 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen6_gmch_probe()
1444 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen6_gmch_probe()
1451 struct drm_i915_private *i915 = gt->i915; in ggtt_probe_hw()
1454 ggtt->vm.gt = gt; in ggtt_probe_hw()
1455 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1456 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1457 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1467 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1471 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1472 drm_err(&i915->drm, in ggtt_probe_hw()
1475 ggtt->vm.total >> 20); in ggtt_probe_hw()
1476 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1477 ggtt->mappable_end = in ggtt_probe_hw()
1478 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1481 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1482 drm_err(&i915->drm, in ggtt_probe_hw()
1485 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1486 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1490 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1491 drm_dbg(&i915->drm, "GMADR size = %lluM\n", in ggtt_probe_hw()
1492 (u64)ggtt->mappable_end >> 20); in ggtt_probe_hw()
1493 drm_dbg(&i915->drm, "DSM size = %lluM\n", in ggtt_probe_hw()
1500 * i915_ggtt_probe_hw - Probe GGTT hardware location
1514 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915)); in i915_ggtt_probe_hw()
1519 drm_info(&i915->drm, "VT-d active for gfx access\n"); in i915_ggtt_probe_hw()
1528 ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL); in i915_ggtt_create()
1530 return ERR_PTR(-ENOMEM); in i915_ggtt_create()
1532 INIT_LIST_HEAD(&ggtt->gt_list); in i915_ggtt_create()
1546 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1560 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_resume_vm()
1563 vm->clear_range(vm, 0, vm->total); in i915_ggtt_resume_vm()
1566 list_for_each_entry(vma, &vm->bound_list, vm_link) { in i915_ggtt_resume_vm()
1567 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_resume_vm()
1569 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; in i915_ggtt_resume_vm()
1577 vma->resource->bound_flags = 0; in i915_ggtt_resume_vm()
1578 vma->ops->bind_vma(vm, NULL, vma->resource, in i915_ggtt_resume_vm()
1579 obj ? obj->pat_index : in i915_ggtt_resume_vm()
1580 i915_gem_get_pat_index(vm->i915, in i915_ggtt_resume_vm()
1585 write_domain_objs |= fetch_and_zero(&obj->write_domain); in i915_ggtt_resume_vm()
1586 obj->read_domains |= I915_GEM_DOMAIN_GTT; in i915_ggtt_resume_vm()
1598 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_resume()
1601 flush = i915_ggtt_resume_vm(&ggtt->vm); in i915_ggtt_resume()
1603 if (drm_mm_node_allocated(&ggtt->error_capture)) in i915_ggtt_resume()
1604 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start, in i915_ggtt_resume()
1605 ggtt->error_capture.size); in i915_ggtt_resume()
1607 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) in i915_ggtt_resume()
1608 intel_uc_resume_mappings(&gt->uc); in i915_ggtt_resume()
1610 ggtt->invalidate(ggtt); in i915_ggtt_resume()