Lines Matching +full:non +full:- +full:interleaved
1 // SPDX-License-Identifier: MIT
3 * Copyright © 2008-2015 Intel Corporation
29 * engine - they're required for blitter commands and are optional for render
51 return fence->ggtt->vm.i915; in fence_to_i915()
56 return fence->ggtt->vm.gt->uncore; in fence_to_uncore()
66 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); in i965_write_fence_reg()
67 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); in i965_write_fence_reg()
71 fence_reg_lo = FENCE_REG_965_LO(fence->id); in i965_write_fence_reg()
72 fence_reg_hi = FENCE_REG_965_HI(fence->id); in i965_write_fence_reg()
77 if (fence->tiling) { in i965_write_fence_reg()
78 unsigned int stride = fence->stride; in i965_write_fence_reg()
82 val = fence->start + fence->size - I965_FENCE_PAGE; in i965_write_fence_reg()
84 val |= fence->start; in i965_write_fence_reg()
85 val |= (u64)((stride / 128) - 1) << fence_pitch_shift; in i965_write_fence_reg()
86 if (fence->tiling == I915_TILING_Y) in i965_write_fence_reg()
95 * To w/a incoherency with non-atomic 64-bit register updates, in i965_write_fence_reg()
96 * we split the 64-bit update into two 32-bit writes. In order in i965_write_fence_reg()
118 if (fence->tiling) { in i915_write_fence_reg()
119 unsigned int stride = fence->stride; in i915_write_fence_reg()
120 unsigned int tiling = fence->tiling; in i915_write_fence_reg()
129 val = fence->start; in i915_write_fence_reg()
132 val |= I915_FENCE_SIZE_BITS(fence->size); in i915_write_fence_reg()
140 i915_reg_t reg = FENCE_REG(fence->id); in i915_write_fence_reg()
152 if (fence->tiling) { in i830_write_fence_reg()
153 unsigned int stride = fence->stride; in i830_write_fence_reg()
155 val = fence->start; in i830_write_fence_reg()
156 if (fence->tiling == I915_TILING_Y) in i830_write_fence_reg()
158 val |= I830_FENCE_SIZE_BITS(fence->size); in i830_write_fence_reg()
165 i915_reg_t reg = FENCE_REG(fence->id); in i830_write_fence_reg()
203 struct i915_ggtt *ggtt = fence->ggtt; in fence_update()
209 fence->tiling = 0; in fence_update()
211 GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) || in fence_update()
212 !i915_gem_object_get_tiling(vma->obj)); in fence_update()
215 return -EINVAL; in fence_update()
224 GEM_BUG_ON(vma->fence_size > i915_vma_size(vma)); in fence_update()
225 fence->start = i915_ggtt_offset(vma); in fence_update()
226 fence->size = vma->fence_size; in fence_update()
227 fence->stride = i915_gem_object_get_stride(vma->obj); in fence_update()
228 fence->tiling = i915_gem_object_get_tiling(vma->obj); in fence_update()
230 WRITE_ONCE(fence->dirty, false); in fence_update()
232 old = xchg(&fence->vma, NULL); in fence_update()
235 ret = i915_active_wait(&fence->active); in fence_update()
237 fence->vma = old; in fence_update()
248 GEM_BUG_ON(old->fence != fence); in fence_update()
250 old->fence = NULL; in fence_update()
253 list_move(&fence->link, &ggtt->fence_list); in fence_update()
266 wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm); in fence_update()
272 WRITE_ONCE(fence->vma, vma); in fence_update()
276 vma->fence = fence; in fence_update()
277 list_move_tail(&fence->link, &ggtt->fence_list); in fence_update()
280 intel_runtime_pm_put(uncore->rpm, wakeref); in fence_update()
285 * i915_vma_revoke_fence - force-remove fence for a VMA
288 * This function force-removes any fence from the given object, which is useful
293 struct i915_fence_reg *fence = vma->fence; in i915_vma_revoke_fence()
296 lockdep_assert_held(&vma->vm->mutex); in i915_vma_revoke_fence()
300 GEM_BUG_ON(fence->vma != vma); in i915_vma_revoke_fence()
301 i915_active_wait(&fence->active); in i915_vma_revoke_fence()
302 GEM_BUG_ON(!i915_active_is_idle(&fence->active)); in i915_vma_revoke_fence()
303 GEM_BUG_ON(atomic_read(&fence->pin_count)); in i915_vma_revoke_fence()
305 fence->tiling = 0; in i915_vma_revoke_fence()
306 WRITE_ONCE(fence->vma, NULL); in i915_vma_revoke_fence()
307 vma->fence = NULL; in i915_vma_revoke_fence()
320 with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref) in i915_vma_revoke_fence()
326 return fence->vma && i915_vma_is_active(fence->vma); in fence_is_active()
334 list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) { in fence_find()
335 GEM_BUG_ON(fence->vma && fence->vma->fence != fence); in fence_find()
338 active = ERR_PTR(-EAGAIN); in fence_find()
341 if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) { in fence_find()
345 list_move_tail(&fence->link, &ggtt->fence_list); in fence_find()
349 if (atomic_read(&fence->pin_count)) in fence_find()
356 if (intel_has_pending_fb_unpin(ggtt->vm.i915)) in fence_find()
357 return ERR_PTR(-EAGAIN); in fence_find()
359 return ERR_PTR(-ENOBUFS); in fence_find()
364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); in __i915_vma_pin_fence()
366 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; in __i915_vma_pin_fence()
369 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_pin_fence()
372 if (vma->fence) { in __i915_vma_pin_fence()
373 fence = vma->fence; in __i915_vma_pin_fence()
374 GEM_BUG_ON(fence->vma != vma); in __i915_vma_pin_fence()
375 atomic_inc(&fence->pin_count); in __i915_vma_pin_fence()
376 if (!fence->dirty) { in __i915_vma_pin_fence()
377 list_move_tail(&fence->link, &ggtt->fence_list); in __i915_vma_pin_fence()
385 GEM_BUG_ON(atomic_read(&fence->pin_count)); in __i915_vma_pin_fence()
386 atomic_inc(&fence->pin_count); in __i915_vma_pin_fence()
395 GEM_BUG_ON(fence->vma != set); in __i915_vma_pin_fence()
396 GEM_BUG_ON(vma->fence != (set ? fence : NULL)); in __i915_vma_pin_fence()
402 atomic_dec(&fence->pin_count); in __i915_vma_pin_fence()
407 * i915_vma_pin_fence - set up fencing for a vma
427 if (!vma->fence && !i915_gem_object_is_tiled(vma->obj)) in i915_vma_pin_fence()
434 assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); in i915_vma_pin_fence()
437 err = mutex_lock_interruptible(&vma->vm->mutex); in i915_vma_pin_fence()
442 mutex_unlock(&vma->vm->mutex); in i915_vma_pin_fence()
448 * i915_reserve_fence - Reserve a fence for vGPU
460 lockdep_assert_held(&ggtt->vm.mutex); in i915_reserve_fence()
464 list_for_each_entry(fence, &ggtt->fence_list, link) in i915_reserve_fence()
465 count += !atomic_read(&fence->pin_count); in i915_reserve_fence()
467 return ERR_PTR(-ENOSPC); in i915_reserve_fence()
473 if (fence->vma) { in i915_reserve_fence()
474 /* Force-remove fence from VMA */ in i915_reserve_fence()
480 list_del(&fence->link); in i915_reserve_fence()
486 * i915_unreserve_fence - Reclaim a reserved fence
493 struct i915_ggtt *ggtt = fence->ggtt; in i915_unreserve_fence()
495 lockdep_assert_held(&ggtt->vm.mutex); in i915_unreserve_fence()
497 list_add(&fence->link, &ggtt->fence_list); in i915_unreserve_fence()
501 * intel_ggtt_restore_fences - restore fence state
512 for (i = 0; i < ggtt->num_fences; i++) in intel_ggtt_restore_fences()
513 fence_write(&ggtt->fence_regs[i]); in intel_ggtt_restore_fences()
525 * adjustments made to addressing of data when the memory is in interleaved
527 * For interleaved memory, the CPU sends every sequential 64 bytes
530 * The GPU also rearranges its accesses for increased bandwidth to interleaved
531 * memory, and it matches what the CPU does for non-tiled. However, when tiled
534 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
536 * are common to both the 915 and 965-class hardware.
548 * If we don't have interleaved memory, all tiling is safe and no swizzling is
556 * Otherwise, if interleaved, we have to tell the 3d driver what the address
565 * detect_bit_6_swizzle - detect bit 6 swizzling pattern
573 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in detect_bit_6_swizzle()
574 struct drm_i915_private *i915 = ggtt->vm.i915; in detect_bit_6_swizzle()
589 if (i915->preserve_bios_swizzle) { in detect_bit_6_swizzle()
637 * configuration. It will enable dual-channel mode in detect_bit_6_swizzle()
644 * 0A 0B 1A 1B 1-ch 2-ch in detect_bit_6_swizzle()
653 * the table above, or from the 1-ch value being less than in detect_bit_6_swizzle()
672 * determined by DCC. For single-channel, neither the CPU in detect_bit_6_swizzle()
673 * nor the GPU do swizzling. For dual channel interleaved, in detect_bit_6_swizzle()
705 /* check for L-shaped memory aka modified enhanced addressing */ in detect_bit_6_swizzle()
713 drm_err(&i915->drm, "Couldn't read from MCHBAR. " in detect_bit_6_swizzle()
725 * the get-tiling-ioctl by reporting the physical swizzle in detect_bit_6_swizzle()
732 i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES; in detect_bit_6_swizzle()
737 to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x; in detect_bit_6_swizzle()
738 to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y; in detect_bit_6_swizzle()
764 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
784 if (obj->bit_17 == NULL) in i915_gem_object_do_bit_17_swizzle()
791 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { in i915_gem_object_do_bit_17_swizzle()
801 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
813 const unsigned int page_count = obj->base.size >> PAGE_SHIFT; in i915_gem_object_save_bit_17_swizzle()
818 if (obj->bit_17 == NULL) { in i915_gem_object_save_bit_17_swizzle()
819 obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); in i915_gem_object_save_bit_17_swizzle()
820 if (obj->bit_17 == NULL) { in i915_gem_object_save_bit_17_swizzle()
821 drm_err(obj->base.dev, in i915_gem_object_save_bit_17_swizzle()
831 __set_bit(i, obj->bit_17); in i915_gem_object_save_bit_17_swizzle()
833 __clear_bit(i, obj->bit_17); in i915_gem_object_save_bit_17_swizzle()
840 struct drm_i915_private *i915 = ggtt->vm.i915; in intel_ggtt_init_fences()
841 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in intel_ggtt_init_fences()
845 INIT_LIST_HEAD(&ggtt->fence_list); in intel_ggtt_init_fences()
846 INIT_LIST_HEAD(&ggtt->userfault_list); in intel_ggtt_init_fences()
865 ggtt->fence_regs = kcalloc(num_fences, in intel_ggtt_init_fences()
866 sizeof(*ggtt->fence_regs), in intel_ggtt_init_fences()
868 if (!ggtt->fence_regs) in intel_ggtt_init_fences()
873 struct i915_fence_reg *fence = &ggtt->fence_regs[i]; in intel_ggtt_init_fences()
875 i915_active_init(&fence->active, NULL, NULL, 0); in intel_ggtt_init_fences()
876 fence->ggtt = ggtt; in intel_ggtt_init_fences()
877 fence->id = i; in intel_ggtt_init_fences()
878 list_add_tail(&fence->link, &ggtt->fence_list); in intel_ggtt_init_fences()
880 ggtt->num_fences = num_fences; in intel_ggtt_init_fences()
889 for (i = 0; i < ggtt->num_fences; i++) { in intel_ggtt_fini_fences()
890 struct i915_fence_reg *fence = &ggtt->fence_regs[i]; in intel_ggtt_fini_fences()
892 i915_active_fini(&fence->active); in intel_ggtt_fini_fences()
895 kfree(ggtt->fence_regs); in intel_ggtt_fini_fences()
900 struct drm_i915_private *i915 = gt->i915; in intel_gt_init_swizzling()
901 struct intel_uncore *uncore = gt->uncore; in intel_gt_init_swizzling()
904 to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) in intel_gt_init_swizzling()