Lines Matching +full:post +full:- +full:delay
1 // SPDX-License-Identifier: MIT
18 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
22 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
23 * produced by non-pipelined state commands), software needs to first
24 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
27 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
28 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
32 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
33 * BEFORE the pipe-control with a post-sync op and no write-cache
41 * - Render Target Cache Flush Enable ([12] of DW1)
42 * - Depth Cache Flush Enable ([0] of DW1)
43 * - Stall at Pixel Scoreboard ([1] of DW1)
44 * - Depth Stall ([13] of DW1)
45 * - Post-Sync Operation ([13] of DW1)
46 * - Notify Enable ([8] of DW1)"
50 * Post-sync nonzero is what triggered this second workaround, so we
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
125 * TLB invalidate requires a post-sync write. in gen6_emit_flush_rcs()
153 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_breadcrumb_rcs()
167 *cs++ = rq->fence.seqno; in gen6_emit_breadcrumb_rcs()
172 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_rcs()
173 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_rcs()
197 * Bspec vol 1c.3 - blitter engine command streamer: in mi_flush_dw()
200 * Post-Sync Operation field is a value of 1h or 3h." in mi_flush_dw()
292 intel_gt_scratch_offset(rq->engine->gt, in gen7_emit_flush_rcs()
301 * read-cache invalidate bits set) must have the CS_STALL bit set. We in gen7_emit_flush_rcs()
307 * CS_STALL suggests at least a post-sync write. in gen7_emit_flush_rcs()
333 * Workaround: we must issue a pipe_control with CS-stall bit in gen7_emit_flush_rcs()
364 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_rcs()
369 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_rcs()
370 assert_ring_tail_valid(rq->ring, rq->tail); in gen7_emit_breadcrumb_rcs()
377 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen6_emit_breadcrumb_xcs()
378 GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); in gen6_emit_breadcrumb_xcs()
382 *cs++ = rq->fence.seqno; in gen6_emit_breadcrumb_xcs()
386 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_xcs()
387 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_xcs()
397 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen7_emit_breadcrumb_xcs()
398 GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); in gen7_emit_breadcrumb_xcs()
403 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_xcs()
408 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_xcs()
418 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_xcs()
419 assert_ring_tail_valid(rq->ring, rq->tail); in gen7_emit_breadcrumb_xcs()
428 ~(engine->irq_enable_mask | engine->irq_keep_mask)); in gen6_irq_enable()
430 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ in gen6_irq_enable()
433 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); in gen6_irq_enable()
438 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); in gen6_irq_disable()
439 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); in gen6_irq_disable()
444 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); in hsw_irq_enable_vecs()
446 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ in hsw_irq_enable_vecs()
449 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask); in hsw_irq_enable_vecs()
455 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask); in hsw_irq_disable_vecs()