Lines Matching +full:cs +full:- +full:x
1 // SPDX-License-Identifier: MIT
24 * The per-platform tables are u8-encoded in @data. Decode @data and set the
29 * [7]: create NOPs - number of NOPs are set in lower bits
51 #define NOP(x) (BIT(7) | (x)) in set_offsets() argument
54 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) in set_offsets() argument
55 #define REG16(x) \ in set_offsets() argument
56 (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ in set_offsets()
57 (((x) >> 2) & 0x7f) in set_offsets()
60 const u32 base = engine->mmio_base; in set_offsets()
78 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets()
95 } while (--count); in set_offsets()
101 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets()
649 GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 && in reg_offsets()
652 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) { in reg_offsets()
653 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) in reg_offsets()
655 else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in reg_offsets()
657 else if (GRAPHICS_VER(engine->i915) >= 12) in reg_offsets()
659 else if (GRAPHICS_VER(engine->i915) >= 11) in reg_offsets()
661 else if (GRAPHICS_VER(engine->i915) >= 9) in reg_offsets()
666 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in reg_offsets()
668 else if (GRAPHICS_VER(engine->i915) >= 12) in reg_offsets()
670 else if (GRAPHICS_VER(engine->i915) >= 9) in reg_offsets()
679 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in lrc_ring_mi_mode()
681 else if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_mi_mode()
683 else if (GRAPHICS_VER(engine->i915) >= 9) in lrc_ring_mi_mode()
685 else if (engine->class == RENDER_CLASS) in lrc_ring_mi_mode()
688 return -1; in lrc_ring_mi_mode()
693 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in lrc_ring_bb_offset()
695 else if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_bb_offset()
697 else if (GRAPHICS_VER(engine->i915) >= 9) in lrc_ring_bb_offset()
699 else if (GRAPHICS_VER(engine->i915) >= 8 && in lrc_ring_bb_offset()
700 engine->class == RENDER_CLASS) in lrc_ring_bb_offset()
703 return -1; in lrc_ring_bb_offset()
708 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in lrc_ring_gpr0()
710 else if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_gpr0()
712 else if (GRAPHICS_VER(engine->i915) >= 9) in lrc_ring_gpr0()
714 else if (engine->class == RENDER_CLASS) in lrc_ring_gpr0()
717 return -1; in lrc_ring_gpr0()
722 if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_wa_bb_per_ctx()
724 else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS) in lrc_ring_wa_bb_per_ctx()
727 return -1; in lrc_ring_wa_bb_per_ctx()
732 int x; in lrc_ring_indirect_ptr() local
734 x = lrc_ring_wa_bb_per_ctx(engine); in lrc_ring_indirect_ptr()
735 if (x < 0) in lrc_ring_indirect_ptr()
736 return x; in lrc_ring_indirect_ptr()
738 return x + 2; in lrc_ring_indirect_ptr()
743 int x; in lrc_ring_indirect_offset() local
745 x = lrc_ring_indirect_ptr(engine); in lrc_ring_indirect_offset()
746 if (x < 0) in lrc_ring_indirect_offset()
747 return x; in lrc_ring_indirect_offset()
749 return x + 2; in lrc_ring_indirect_offset()
755 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) in lrc_ring_cmd_buf_cctl()
761 else if (engine->class != RENDER_CLASS) in lrc_ring_cmd_buf_cctl()
762 return -1; in lrc_ring_cmd_buf_cctl()
763 else if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_cmd_buf_cctl()
765 else if (GRAPHICS_VER(engine->i915) >= 11) in lrc_ring_cmd_buf_cctl()
768 return -1; in lrc_ring_cmd_buf_cctl()
774 if (GRAPHICS_VER(engine->i915) >= 12) in lrc_ring_indirect_offset_default()
776 else if (GRAPHICS_VER(engine->i915) >= 11) in lrc_ring_indirect_offset_default()
778 else if (GRAPHICS_VER(engine->i915) >= 9) in lrc_ring_indirect_offset_default()
780 else if (GRAPHICS_VER(engine->i915) >= 8) in lrc_ring_indirect_offset_default()
783 GEM_BUG_ON(GRAPHICS_VER(engine->i915) < 8); in lrc_ring_indirect_offset_default()
793 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); in lrc_setup_bb_per_ctx()
808 GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); in lrc_setup_indirect_ctx()
812 GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); in lrc_setup_indirect_ctx()
824 * the LRC run-alone bit or else the encryption will not happen. in ctx_needs_runalone()
826 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) && in ctx_needs_runalone()
827 (ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) { in ctx_needs_runalone()
829 gem_ctx = rcu_dereference(ce->gem_context); in ctx_needs_runalone()
831 ctx_is_protected = gem_ctx->uses_protected_content; in ctx_needs_runalone()
850 if (GRAPHICS_VER(engine->i915) < 11) in init_common_regs()
857 regs[CTX_TIMESTAMP] = ce->stats.runtime.last; in init_common_regs()
860 if (loc != -1) in init_common_regs()
867 const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; in init_wa_bb_regs()
869 if (wa_ctx->per_ctx.size) { in init_wa_bb_regs()
870 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); in init_wa_bb_regs()
872 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); in init_wa_bb_regs()
874 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; in init_wa_bb_regs()
877 if (wa_ctx->indirect_ctx.size) { in init_wa_bb_regs()
879 i915_ggtt_offset(wa_ctx->vma) + in init_wa_bb_regs()
880 wa_ctx->indirect_ctx.offset, in init_wa_bb_regs()
881 wa_ctx->indirect_ctx.size); in init_wa_bb_regs()
887 if (i915_vm_is_4lvl(&ppgtt->vm)) { in init_ppgtt_regs()
904 return i915_vm_to_ggtt(vm)->alias; in vm_alias()
911 int x; in __reset_stop_ring() local
913 x = lrc_ring_mi_mode(engine); in __reset_stop_ring()
914 if (x != -1) { in __reset_stop_ring()
915 regs[x + 1] &= ~STOP_RING; in __reset_stop_ring()
916 regs[x + 1] |= STOP_RING << 16; in __reset_stop_ring()
942 init_ppgtt_regs(regs, vm_alias(ce->vm)); in __lrc_init_regs()
953 __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit); in lrc_init_regs()
959 __reset_stop_ring(ce->lrc_reg_state, engine); in lrc_reset_regs()
968 vaddr += engine->context_size; in set_redzone()
979 vaddr += engine->context_size; in check_redzone()
982 drm_err_once(&engine->i915->drm, in check_redzone()
984 engine->name); in check_redzone()
989 return PAGE_SIZE * ce->wa_bb_page; in context_wa_bb_offset()
1002 GEM_BUG_ON(!ce->wa_bb_page); in context_wabb()
1004 ptr = ce->lrc_reg_state; in context_wabb()
1005 ptr -= LRC_STATE_OFFSET; /* back to start of context image */ in context_wabb()
1020 if (ce->default_state) { in lrc_init_state()
1021 shmem_read(ce->default_state, 0, state, engine->context_size); in lrc_init_state()
1022 __set_bit(CONTEXT_VALID_BIT, &ce->flags); in lrc_init_state()
1026 /* Clear the ppHWSP (inc. per-context counters) */ in lrc_init_state()
1030 if (ce->wa_bb_page) in lrc_init_state()
1042 return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce); in lrc_indirect_bb()
1045 static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs) in setup_predicate_disable_wa() argument
1048 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); in setup_predicate_disable_wa()
1049 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; in setup_predicate_disable_wa()
1050 *cs++ = 0; in setup_predicate_disable_wa()
1051 *cs++ = 0; /* No predication */ in setup_predicate_disable_wa()
1054 *cs++ = MI_BATCH_BUFFER_END | BIT(15); in setup_predicate_disable_wa()
1055 *cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE; in setup_predicate_disable_wa()
1058 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2); in setup_predicate_disable_wa()
1059 *cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA; in setup_predicate_disable_wa()
1060 *cs++ = 0; in setup_predicate_disable_wa()
1061 *cs++ = 1; /* enable predication before the next BB */ in setup_predicate_disable_wa()
1063 *cs++ = MI_BATCH_BUFFER_END; in setup_predicate_disable_wa()
1064 GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA); in setup_predicate_disable_wa()
1066 return cs; in setup_predicate_disable_wa()
1076 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); in __lrc_alloc_state()
1081 if (GRAPHICS_VER(engine->i915) >= 12) { in __lrc_alloc_state()
1082 ce->wa_bb_page = context_size / PAGE_SIZE; in __lrc_alloc_state()
1088 ce->parallel.guc.parent_page = context_size / PAGE_SIZE; in __lrc_alloc_state()
1092 obj = i915_gem_object_create_lmem(engine->i915, context_size, in __lrc_alloc_state()
1095 obj = i915_gem_object_create_shmem(engine->i915, context_size); in __lrc_alloc_state()
1104 if (intel_gt_needs_wa_22016122933(engine->gt)) in __lrc_alloc_state()
1108 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); in __lrc_alloc_state()
1120 struct intel_timeline *tl = fetch_and_zero(&ce->timeline); in pinned_timeline()
1131 GEM_BUG_ON(ce->state); in lrc_alloc()
1134 ce->default_state = engine->default_state; in lrc_alloc()
1140 ring = intel_engine_create_ring(engine, ce->ring_size); in lrc_alloc()
1146 if (!page_mask_bits(ce->timeline)) { in lrc_alloc()
1153 if (unlikely(ce->timeline)) in lrc_alloc()
1156 tl = intel_timeline_create(engine->gt); in lrc_alloc()
1162 ce->timeline = tl; in lrc_alloc()
1165 ce->ring = ring; in lrc_alloc()
1166 ce->state = vma; in lrc_alloc()
1181 intel_ring_reset(ce->ring, ce->ring->emit); in lrc_reset()
1184 lrc_init_regs(ce, ce->engine, true); in lrc_reset()
1185 ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail); in lrc_reset()
1194 GEM_BUG_ON(!ce->state); in lrc_pre_pin()
1195 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); in lrc_pre_pin()
1197 *vaddr = i915_gem_object_pin_map(ce->state->obj, in lrc_pre_pin()
1198 intel_gt_coherent_map_type(ce->engine->gt, in lrc_pre_pin()
1199 ce->state->obj, in lrc_pre_pin()
1211 ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; in lrc_pin()
1213 if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) in lrc_pin()
1216 ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail); in lrc_pin()
1222 if (unlikely(ce->parallel.last_rq)) { in lrc_unpin()
1223 i915_request_put(ce->parallel.last_rq); in lrc_unpin()
1224 ce->parallel.last_rq = NULL; in lrc_unpin()
1226 check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, in lrc_unpin()
1227 ce->engine); in lrc_unpin()
1232 i915_gem_object_unpin_map(ce->state->obj); in lrc_post_unpin()
1237 if (!ce->state) in lrc_fini()
1240 intel_ring_put(fetch_and_zero(&ce->ring)); in lrc_fini()
1241 i915_vma_put(fetch_and_zero(&ce->state)); in lrc_fini()
1248 GEM_BUG_ON(!i915_active_is_idle(&ce->active)); in lrc_destroy()
1258 gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) in gen12_emit_timestamp_wa() argument
1260 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | in gen12_emit_timestamp_wa()
1263 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_timestamp_wa()
1264 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + in gen12_emit_timestamp_wa()
1266 *cs++ = 0; in gen12_emit_timestamp_wa()
1268 *cs++ = MI_LOAD_REGISTER_REG | in gen12_emit_timestamp_wa()
1271 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_timestamp_wa()
1272 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); in gen12_emit_timestamp_wa()
1274 *cs++ = MI_LOAD_REGISTER_REG | in gen12_emit_timestamp_wa()
1277 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_timestamp_wa()
1278 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); in gen12_emit_timestamp_wa()
1280 return cs; in gen12_emit_timestamp_wa()
1284 gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) in gen12_emit_restore_scratch() argument
1286 GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1); in gen12_emit_restore_scratch()
1288 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | in gen12_emit_restore_scratch()
1291 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_restore_scratch()
1292 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + in gen12_emit_restore_scratch()
1293 (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32); in gen12_emit_restore_scratch()
1294 *cs++ = 0; in gen12_emit_restore_scratch()
1296 return cs; in gen12_emit_restore_scratch()
1300 gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs) in gen12_emit_cmd_buf_wa() argument
1302 GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1); in gen12_emit_cmd_buf_wa()
1304 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | in gen12_emit_cmd_buf_wa()
1307 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_cmd_buf_wa()
1308 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + in gen12_emit_cmd_buf_wa()
1309 (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32); in gen12_emit_cmd_buf_wa()
1310 *cs++ = 0; in gen12_emit_cmd_buf_wa()
1312 *cs++ = MI_LOAD_REGISTER_REG | in gen12_emit_cmd_buf_wa()
1315 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); in gen12_emit_cmd_buf_wa()
1316 *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0)); in gen12_emit_cmd_buf_wa()
1318 return cs; in gen12_emit_cmd_buf_wa()
1329 dg2_emit_draw_watermark_setting(u32 *cs) in dg2_emit_draw_watermark_setting() argument
1331 *cs++ = MI_LOAD_REGISTER_IMM(1); in dg2_emit_draw_watermark_setting()
1332 *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK); in dg2_emit_draw_watermark_setting()
1333 *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF); in dg2_emit_draw_watermark_setting()
1335 return cs; in dg2_emit_draw_watermark_setting()
1339 gen12_invalidate_state_cache(u32 *cs) in gen12_invalidate_state_cache() argument
1341 *cs++ = MI_LOAD_REGISTER_IMM(1); in gen12_invalidate_state_cache()
1342 *cs++ = i915_mmio_reg_offset(GEN12_CS_DEBUG_MODE2); in gen12_invalidate_state_cache()
1343 *cs++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE); in gen12_invalidate_state_cache()
1344 return cs; in gen12_invalidate_state_cache()
1348 gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) in gen12_emit_indirect_ctx_rcs() argument
1350 cs = gen12_emit_timestamp_wa(ce, cs); in gen12_emit_indirect_ctx_rcs()
1351 cs = gen12_emit_cmd_buf_wa(ce, cs); in gen12_emit_indirect_ctx_rcs()
1352 cs = gen12_emit_restore_scratch(ce, cs); in gen12_emit_indirect_ctx_rcs()
1355 if (IS_DG2_G11(ce->engine->i915)) in gen12_emit_indirect_ctx_rcs()
1356 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); in gen12_emit_indirect_ctx_rcs()
1358 cs = gen12_emit_aux_table_inv(ce->engine, cs); in gen12_emit_indirect_ctx_rcs()
1361 if (IS_GFX_GT_IP_RANGE(ce->engine->gt, IP_VER(12, 0), IP_VER(12, 10))) in gen12_emit_indirect_ctx_rcs()
1362 cs = gen12_invalidate_state_cache(cs); in gen12_emit_indirect_ctx_rcs()
1365 if (IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) || in gen12_emit_indirect_ctx_rcs()
1366 IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 71), STEP_A0, STEP_B0) || in gen12_emit_indirect_ctx_rcs()
1367 IS_DG2(ce->engine->i915)) in gen12_emit_indirect_ctx_rcs()
1368 cs = dg2_emit_draw_watermark_setting(cs); in gen12_emit_indirect_ctx_rcs()
1370 return cs; in gen12_emit_indirect_ctx_rcs()
1374 gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) in gen12_emit_indirect_ctx_xcs() argument
1376 cs = gen12_emit_timestamp_wa(ce, cs); in gen12_emit_indirect_ctx_xcs()
1377 cs = gen12_emit_restore_scratch(ce, cs); in gen12_emit_indirect_ctx_xcs()
1380 if (IS_DG2_G11(ce->engine->i915)) in gen12_emit_indirect_ctx_xcs()
1381 if (ce->engine->class == COMPUTE_CLASS) in gen12_emit_indirect_ctx_xcs()
1382 cs = gen8_emit_pipe_control(cs, in gen12_emit_indirect_ctx_xcs()
1386 return gen12_emit_aux_table_inv(ce->engine, cs); in gen12_emit_indirect_ctx_xcs()
1389 static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs) in xehp_emit_fastcolor_blt_wabb() argument
1391 struct intel_gt *gt = ce->engine->gt; in xehp_emit_fastcolor_blt_wabb()
1392 int mocs = gt->mocs.uc_index << 1; in xehp_emit_fastcolor_blt_wabb()
1402 * BG0 -> 5100000E in xehp_emit_fastcolor_blt_wabb()
1403 * BG1 -> 0000003F (Dest pitch) in xehp_emit_fastcolor_blt_wabb()
1404 * BG2 -> 00000000 (X1, Y1) = (0, 0) in xehp_emit_fastcolor_blt_wabb()
1405 * BG3 -> 00040001 (X2, Y2) = (1, 4) in xehp_emit_fastcolor_blt_wabb()
1406 * BG4 -> scratch in xehp_emit_fastcolor_blt_wabb()
1407 * BG5 -> scratch in xehp_emit_fastcolor_blt_wabb()
1408 * BG6-12 -> 00000000 in xehp_emit_fastcolor_blt_wabb()
1409 * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 ) in xehp_emit_fastcolor_blt_wabb()
1410 * BG14 -> 00000010 (Qpitch = 4) in xehp_emit_fastcolor_blt_wabb()
1411 * BG15 -> 00000000 in xehp_emit_fastcolor_blt_wabb()
1413 *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2); in xehp_emit_fastcolor_blt_wabb()
1414 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f; in xehp_emit_fastcolor_blt_wabb()
1415 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1416 *cs++ = 4 << 16 | 1; in xehp_emit_fastcolor_blt_wabb()
1417 *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); in xehp_emit_fastcolor_blt_wabb()
1418 *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); in xehp_emit_fastcolor_blt_wabb()
1419 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1420 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1421 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1422 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1423 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1424 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1425 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1426 *cs++ = 0x20004004; in xehp_emit_fastcolor_blt_wabb()
1427 *cs++ = 0x10; in xehp_emit_fastcolor_blt_wabb()
1428 *cs++ = 0; in xehp_emit_fastcolor_blt_wabb()
1430 return cs; in xehp_emit_fastcolor_blt_wabb()
1434 xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs) in xehp_emit_per_ctx_bb() argument
1437 if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine)) in xehp_emit_per_ctx_bb()
1438 cs = xehp_emit_fastcolor_blt_wabb(ce, cs); in xehp_emit_per_ctx_bb()
1440 return cs; in xehp_emit_per_ctx_bb()
1450 u32 *cs; in setup_per_ctx_bb() local
1452 cs = emit(ce, start); in setup_per_ctx_bb()
1455 *cs++ = MI_BATCH_BUFFER_END; in setup_per_ctx_bb()
1457 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); in setup_per_ctx_bb()
1458 lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine, in setup_per_ctx_bb()
1468 u32 *cs; in setup_indirect_ctx_bb() local
1470 cs = emit(ce, start); in setup_indirect_ctx_bb()
1471 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); in setup_indirect_ctx_bb()
1472 while ((unsigned long)cs % CACHELINE_BYTES) in setup_indirect_ctx_bb()
1473 *cs++ = MI_NOOP; in setup_indirect_ctx_bb()
1475 GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start)); in setup_indirect_ctx_bb()
1478 lrc_setup_indirect_ctx(ce->lrc_reg_state, engine, in setup_indirect_ctx_bb()
1480 (cs - start) * sizeof(*cs)); in setup_indirect_ctx_bb()
1491 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
1492 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
1493 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
1494 * bits 53-54: mbz, reserved for use by hardware
1495 * bits 55-63: group ID, currently unused and set to 0
1499 * bits 32-36: reserved
1500 * bits 37-47: SW context ID
1503 * bits 55-60: SW counter
1504 * bits 61-63: engine class
1508 * bits 32-37: virtual function number
1510 * bits 39-54: SW context ID
1511 * bits 55-57: reserved
1512 * bits 58-63: SW counter
1522 if (i915_vm_is_4lvl(ce->vm)) in lrc_descriptor()
1527 if (GRAPHICS_VER(ce->vm->i915) == 8) in lrc_descriptor()
1530 return i915_ggtt_offset(ce->state) | desc; in lrc_descriptor()
1537 struct intel_ring *ring = ce->ring; in lrc_update_regs()
1538 u32 *regs = ce->lrc_reg_state; in lrc_update_regs()
1541 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); in lrc_update_regs()
1543 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); in lrc_update_regs()
1545 regs[CTX_RING_TAIL] = ring->tail; in lrc_update_regs()
1546 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; in lrc_update_regs()
1549 if (engine->class == RENDER_CLASS) { in lrc_update_regs()
1551 intel_sseu_make_rpcs(engine->gt, &ce->sseu); in lrc_update_regs()
1556 if (ce->wa_bb_page) { in lrc_update_regs()
1557 u32 *(*fn)(const struct intel_context *ce, u32 *cs); in lrc_update_regs()
1560 if (ce->engine->class == RENDER_CLASS) in lrc_update_regs()
1564 GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); in lrc_update_regs()
1575 set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false); in lrc_update_offsets()
1582 const struct intel_ring *ring = ce->ring; in lrc_check_regs()
1583 u32 *regs = ce->lrc_reg_state; in lrc_check_regs()
1585 int x; in lrc_check_regs() local
1587 if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { in lrc_check_regs()
1588 pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", in lrc_check_regs()
1589 engine->name, in lrc_check_regs()
1591 i915_ggtt_offset(ring->vma)); in lrc_check_regs()
1592 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); in lrc_check_regs()
1597 (RING_CTL_SIZE(ring->size) | RING_VALID)) { in lrc_check_regs()
1598 pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", in lrc_check_regs()
1599 engine->name, in lrc_check_regs()
1601 (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); in lrc_check_regs()
1602 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; in lrc_check_regs()
1606 x = lrc_ring_mi_mode(engine); in lrc_check_regs()
1607 if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { in lrc_check_regs()
1608 pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", in lrc_check_regs()
1609 engine->name, regs[x + 1]); in lrc_check_regs()
1610 regs[x + 1] &= ~STOP_RING; in lrc_check_regs()
1611 regs[x + 1] |= STOP_RING << 16; in lrc_check_regs()
1627 * it for a short period and this batch in non-premptible. We can ofcourse
1640 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa()
1655 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa()
1683 if (IS_BROADWELL(engine->i915)) in gen8_init_indirectctx_bb()
1721 *batch++ = i915_mmio_reg_offset(lri->reg); in emit_lri()
1722 *batch++ = lri->value; in emit_lri()
1723 } while (lri++, --count); in emit_lri()
1770 if (HAS_POOLED_EU(engine->i915)) { in gen9_init_indirectctx_bb()
1809 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE); in lrc_create_wa_ctx()
1813 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); in lrc_create_wa_ctx()
1819 engine->wa_ctx.vma = vma; in lrc_create_wa_ctx()
1829 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); in lrc_fini_wa_ctx()
1836 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; in lrc_init_wa_ctx()
1838 &wa_ctx->indirect_ctx, &wa_ctx->per_ctx in lrc_init_wa_ctx()
1846 if (GRAPHICS_VER(engine->i915) >= 11 || in lrc_init_wa_ctx()
1847 !(engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) in lrc_init_wa_ctx()
1850 if (GRAPHICS_VER(engine->i915) == 9) { in lrc_init_wa_ctx()
1853 } else if (GRAPHICS_VER(engine->i915) == 8) { in lrc_init_wa_ctx()
1865 drm_err(&engine->i915->drm, in lrc_init_wa_ctx()
1871 if (!engine->wa_ctx.vma) in lrc_init_wa_ctx()
1876 err = i915_gem_object_lock(wa_ctx->vma->obj, &ww); in lrc_init_wa_ctx()
1878 err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH); in lrc_init_wa_ctx()
1882 batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); in lrc_init_wa_ctx()
1895 wa_bb[i]->offset = batch_ptr - batch; in lrc_init_wa_ctx()
1896 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, in lrc_init_wa_ctx()
1898 err = -EINVAL; in lrc_init_wa_ctx()
1903 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); in lrc_init_wa_ctx()
1905 GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE); in lrc_init_wa_ctx()
1907 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); in lrc_init_wa_ctx()
1908 __i915_gem_object_release_map(wa_ctx->vma->obj); in lrc_init_wa_ctx()
1912 err = i915_inject_probe_error(engine->i915, -ENODEV); in lrc_init_wa_ctx()
1916 i915_vma_unpin(wa_ctx->vma); in lrc_init_wa_ctx()
1918 if (err == -EDEADLK) { in lrc_init_wa_ctx()
1926 i915_vma_put(engine->wa_ctx.vma); in lrc_init_wa_ctx()
1936 stats->runtime.num_underflow++; in st_runtime_underflow()
1937 stats->runtime.max_underflow = in st_runtime_underflow()
1938 max_t(u32, stats->runtime.max_underflow, -dt); in st_runtime_underflow()
1950 return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); in lrc_get_runtime()
1955 struct intel_context_stats *stats = &ce->stats; in lrc_update_runtime()
1959 old = stats->runtime.last; in lrc_update_runtime()
1960 stats->runtime.last = lrc_get_runtime(ce); in lrc_update_runtime()
1961 dt = stats->runtime.last - old; in lrc_update_runtime()
1967 old, stats->runtime.last, dt); in lrc_update_runtime()
1972 ewma_runtime_add(&stats->runtime.avg, dt); in lrc_update_runtime()
1973 stats->runtime.total += dt; in lrc_update_runtime()