Lines Matching +full:cs +full:- +full:x

1 // SPDX-License-Identifier: MIT
42 err = -EIO; in request_add_sync()
55 err = -ETIMEDOUT; in request_add_spin()
69 wa_init_start(&lists->gt_wa_list, gt, "GT_REF", "global"); in reference_lists_init()
70 gt_init_workarounds(gt, &lists->gt_wa_list); in reference_lists_init()
71 wa_init_finish(&lists->gt_wa_list); in reference_lists_init()
74 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init()
76 wa_init_start(wal, gt, "REF", engine->name); in reference_lists_init()
81 &lists->engine[id].ctx_wa_list, in reference_lists_init()
93 intel_wa_list_free(&lists->engine[id].wa_list); in reference_lists_fini()
95 intel_wa_list_free(&lists->gt_wa_list); in reference_lists_fini()
101 struct intel_engine_cs *engine = ce->engine; in read_nonprivs()
102 const u32 base = engine->mmio_base; in read_nonprivs()
106 u32 srm, *cs; in read_nonprivs() local
110 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); in read_nonprivs()
116 cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB); in read_nonprivs()
117 if (IS_ERR(cs)) { in read_nonprivs()
118 err = PTR_ERR(cs); in read_nonprivs()
121 memset(cs, 0xc5, PAGE_SIZE); in read_nonprivs()
125 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); in read_nonprivs()
146 if (GRAPHICS_VER(engine->i915) >= 8) in read_nonprivs()
149 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); in read_nonprivs()
150 if (IS_ERR(cs)) { in read_nonprivs()
151 err = PTR_ERR(cs); in read_nonprivs()
156 *cs++ = srm; in read_nonprivs()
157 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); in read_nonprivs()
158 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; in read_nonprivs()
159 *cs++ = 0; in read_nonprivs()
161 intel_ring_advance(rq, cs); in read_nonprivs()
180 i915_reg_t reg = i < engine->whitelist.count ? in get_whitelist_reg()
181 engine->whitelist.list[i].reg : in get_whitelist_reg()
182 RING_NOPID(engine->mmio_base); in get_whitelist_reg()
196 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", in print_results()
203 struct intel_engine_cs *engine = ce->engine; in check_whitelist()
216 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ in check_whitelist()
219 if (intel_gt_is_wedged(engine->gt)) in check_whitelist()
220 err = -EIO; in check_whitelist()
236 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", in check_whitelist()
239 err = -EINVAL; in check_whitelist()
253 intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); in do_device_reset()
264 /* Currently a no-op as the reset is handled by GuC */ in do_guc_reset()
308 engine->whitelist.count, engine->name, name); in check_whitelist_across_reset()
314 err = igt_spinner_init(&spin, engine->gt); in check_whitelist_across_reset()
331 err = -ETIMEDOUT; in check_whitelist_across_reset()
335 with_intel_runtime_pm(engine->uncore->rpm, wakeref) in check_whitelist_across_reset()
384 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE); in create_batch()
420 enum intel_platform platform = INTEL_INFO(engine->i915)->platform; in wo_register()
438 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; in timestamp()
461 int count = engine->whitelist.count; in whitelist_writable_count()
464 for (i = 0; i < engine->whitelist.count; i++) { in whitelist_writable_count()
465 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in whitelist_writable_count()
468 count--; in whitelist_writable_count()
502 struct intel_engine_cs *engine = ce->engine; in check_dirty_whitelist()
506 u32 *cs, *results; in check_dirty_whitelist() local
509 scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz); in check_dirty_whitelist()
513 batch = create_batch(ce->vm); in check_dirty_whitelist()
519 for (i = 0; i < engine->whitelist.count; i++) { in check_dirty_whitelist()
520 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in check_dirty_whitelist()
539 cs = NULL; in check_dirty_whitelist()
540 err = i915_gem_object_lock(scratch->obj, &ww); in check_dirty_whitelist()
542 err = i915_gem_object_lock(batch->obj, &ww); in check_dirty_whitelist()
548 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in check_dirty_whitelist()
549 if (IS_ERR(cs)) { in check_dirty_whitelist()
550 err = PTR_ERR(cs); in check_dirty_whitelist()
554 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in check_dirty_whitelist()
565 if (GRAPHICS_VER(engine->i915) >= 8) in check_dirty_whitelist()
568 pr_debug("%s: Writing garbage to %x\n", in check_dirty_whitelist()
569 engine->name, reg); in check_dirty_whitelist()
572 *cs++ = srm; in check_dirty_whitelist()
573 *cs++ = reg; in check_dirty_whitelist()
574 *cs++ = lower_32_bits(addr); in check_dirty_whitelist()
575 *cs++ = upper_32_bits(addr); in check_dirty_whitelist()
580 *cs++ = MI_LOAD_REGISTER_IMM(1); in check_dirty_whitelist()
581 *cs++ = reg; in check_dirty_whitelist()
582 *cs++ = values[v]; in check_dirty_whitelist()
585 *cs++ = srm; in check_dirty_whitelist()
586 *cs++ = reg; in check_dirty_whitelist()
587 *cs++ = lower_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
588 *cs++ = upper_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
593 *cs++ = MI_LOAD_REGISTER_IMM(1); in check_dirty_whitelist()
594 *cs++ = reg; in check_dirty_whitelist()
595 *cs++ = ~values[v]; in check_dirty_whitelist()
598 *cs++ = srm; in check_dirty_whitelist()
599 *cs++ = reg; in check_dirty_whitelist()
600 *cs++ = lower_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
601 *cs++ = upper_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
604 GEM_BUG_ON(idx * sizeof(u32) > scratch->size); in check_dirty_whitelist()
606 /* LRM original -- don't leave garbage in the context! */ in check_dirty_whitelist()
607 *cs++ = lrm; in check_dirty_whitelist()
608 *cs++ = reg; in check_dirty_whitelist()
609 *cs++ = lower_32_bits(addr); in check_dirty_whitelist()
610 *cs++ = upper_32_bits(addr); in check_dirty_whitelist()
612 *cs++ = MI_BATCH_BUFFER_END; in check_dirty_whitelist()
614 i915_gem_object_flush_map(batch->obj); in check_dirty_whitelist()
615 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist()
616 intel_gt_chipset_flush(engine->gt); in check_dirty_whitelist()
617 cs = NULL; in check_dirty_whitelist()
625 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in check_dirty_whitelist()
626 err = engine->emit_init_breadcrumb(rq); in check_dirty_whitelist()
640 err = engine->emit_bb_start(rq, in check_dirty_whitelist()
649 pr_err("%s: Futzing %x timedout; cancelling test\n", in check_dirty_whitelist()
650 engine->name, reg); in check_dirty_whitelist()
651 intel_gt_set_wedged(engine->gt); in check_dirty_whitelist()
655 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); in check_dirty_whitelist()
660 pr_err("%s: Unable to write to whitelisted register %x\n", in check_dirty_whitelist()
661 engine->name, reg); in check_dirty_whitelist()
662 err = -EINVAL; in check_dirty_whitelist()
692 …pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!… in check_dirty_whitelist()
693 engine->name, err, reg); in check_dirty_whitelist()
696 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", in check_dirty_whitelist()
697 engine->name, reg, results[0]); in check_dirty_whitelist()
699 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", in check_dirty_whitelist()
700 engine->name, reg, results[0], rsvd); in check_dirty_whitelist()
711 pr_info("Wrote %08x, read %08x, expect %08x\n", in check_dirty_whitelist()
722 pr_info("Wrote %08x, read %08x, expect %08x\n", in check_dirty_whitelist()
727 err = -EINVAL; in check_dirty_whitelist()
730 i915_gem_object_unpin_map(scratch->obj); in check_dirty_whitelist()
732 if (cs) in check_dirty_whitelist()
733 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist()
737 if (err == -EDEADLK) { in check_dirty_whitelist()
747 if (igt_flush_test(engine->i915)) in check_dirty_whitelist()
748 err = -EIO; in check_dirty_whitelist()
764 if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ in live_dirty_whitelist()
771 if (engine->whitelist.count == 0) in live_dirty_whitelist()
798 if (engine->whitelist.count == 0) in live_reset_whitelist()
845 struct intel_engine_cs *engine = ce->engine; in read_whitelisted_registers()
848 u32 srm, *cs; in read_whitelisted_registers() local
859 if (GRAPHICS_VER(engine->i915) >= 8) in read_whitelisted_registers()
862 cs = intel_ring_begin(rq, 4 * engine->whitelist.count); in read_whitelisted_registers()
863 if (IS_ERR(cs)) { in read_whitelisted_registers()
864 err = PTR_ERR(cs); in read_whitelisted_registers()
868 for (i = 0; i < engine->whitelist.count; i++) { in read_whitelisted_registers()
870 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in read_whitelisted_registers()
875 *cs++ = srm; in read_whitelisted_registers()
876 *cs++ = reg; in read_whitelisted_registers()
877 *cs++ = lower_32_bits(offset); in read_whitelisted_registers()
878 *cs++ = upper_32_bits(offset); in read_whitelisted_registers()
880 intel_ring_advance(rq, cs); in read_whitelisted_registers()
888 struct intel_engine_cs *engine = ce->engine; in scrub_whitelisted_registers()
892 u32 *cs; in scrub_whitelisted_registers() local
894 batch = create_batch(ce->vm); in scrub_whitelisted_registers()
898 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC); in scrub_whitelisted_registers()
899 if (IS_ERR(cs)) { in scrub_whitelisted_registers()
900 err = PTR_ERR(cs); in scrub_whitelisted_registers()
904 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); in scrub_whitelisted_registers()
905 for (i = 0; i < engine->whitelist.count; i++) { in scrub_whitelisted_registers()
906 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in scrub_whitelisted_registers()
914 *cs++ = reg; in scrub_whitelisted_registers()
915 *cs++ = 0xffffffff; in scrub_whitelisted_registers()
917 *cs++ = MI_BATCH_BUFFER_END; in scrub_whitelisted_registers()
919 i915_gem_object_flush_map(batch->obj); in scrub_whitelisted_registers()
920 intel_gt_chipset_flush(engine->gt); in scrub_whitelisted_registers()
928 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in scrub_whitelisted_registers()
929 err = engine->emit_init_breadcrumb(rq); in scrub_whitelisted_registers()
939 err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0); in scrub_whitelisted_registers()
945 i915_gem_object_unpin_map(batch->obj); in scrub_whitelisted_registers()
963 while (count--) { in find_reg()
964 if (GRAPHICS_VER(i915) == tbl->graphics_ver && in find_reg()
965 i915_mmio_reg_offset(tbl->reg) == offset) in find_reg()
987 if (a != b && !pardon_reg(engine->i915, reg)) { in result_eq()
988 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n", in result_eq()
1009 if (a == b && !writeonly_reg(engine->i915, reg)) { in result_neq()
1010 pr_err("Whitelist register 0x%4x:%08x was unwritable\n", in result_neq()
1029 a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB); in check_whitelisted_registers()
1033 b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB); in check_whitelisted_registers()
1040 for (i = 0; i < engine->whitelist.count; i++) { in check_whitelisted_registers()
1041 const struct i915_wa *wa = &engine->whitelist.list[i]; in check_whitelisted_registers()
1043 if (i915_mmio_reg_offset(wa->reg) & in check_whitelisted_registers()
1047 if (!fn(engine, a[i], b[i], wa->reg)) in check_whitelisted_registers()
1048 err = -EINVAL; in check_whitelisted_registers()
1051 i915_gem_object_unpin_map(B->obj); in check_whitelisted_registers()
1053 i915_gem_object_unpin_map(A->obj); in check_whitelisted_registers()
1072 if (!intel_engines_has_context_isolation(gt->i915)) in live_isolated_whitelist()
1077 __vm_create_scratch_for_read_pinned(gt->vm, 4096); in live_isolated_whitelist()
1084 __vm_create_scratch_for_read_pinned(gt->vm, 4096); in live_isolated_whitelist()
1095 if (!engine->kernel_context->vm) in live_isolated_whitelist()
1159 if (igt_flush_test(gt->i915)) in live_isolated_whitelist()
1160 err = -EIO; in live_isolated_whitelist()
1173 ok &= wa_list_verify(gt, &lists->gt_wa_list, str); in verify_wa_lists()
1183 &lists->engine[id].wa_list, in verify_wa_lists()
1187 &lists->engine[id].ctx_wa_list, in verify_wa_lists()
1209 return -ENOMEM; in live_gpu_reset_workarounds()
1214 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in live_gpu_reset_workarounds()
1228 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in live_gpu_reset_workarounds()
1232 return ok ? 0 : -ESRCH; in live_gpu_reset_workarounds()
1253 return -ENOMEM; in live_engine_reset_workarounds()
1256 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in live_engine_reset_workarounds()
1266 pr_info("Verifying after %s reset...\n", engine->name); in live_engine_reset_workarounds()
1281 ret = -ESRCH; in live_engine_reset_workarounds()
1287 pr_err("%s: Reset failed while idle\n", engine->name); in live_engine_reset_workarounds()
1293 ret = -ESRCH; in live_engine_reset_workarounds()
1298 ret = igt_spinner_init(&spin, engine->gt); in live_engine_reset_workarounds()
1311 pr_err("%s: Spinner failed to start\n", engine->name); in live_engine_reset_workarounds()
1318 ret = -ETIMEDOUT; in live_engine_reset_workarounds()
1326 engine->name); in live_engine_reset_workarounds()
1342 ret = -ESRCH; in live_engine_reset_workarounds()
1356 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in live_engine_reset_workarounds()
1360 igt_flush_test(gt->i915); in live_engine_reset_workarounds()