Lines Matching +full:cs +full:- +full:0
1 // SPDX-License-Identifier: MIT
19 u32 cmd, *cs; in gen2_emit_flush() local
25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush()
26 if (IS_ERR(cs)) in gen2_emit_flush()
27 return PTR_ERR(cs); in gen2_emit_flush()
29 *cs++ = cmd; in gen2_emit_flush()
30 while (num_store_dw--) { in gen2_emit_flush()
31 *cs++ = MI_STORE_DWORD_INDEX; in gen2_emit_flush()
32 *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32); in gen2_emit_flush()
33 *cs++ = 0; in gen2_emit_flush()
34 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; in gen2_emit_flush()
36 *cs++ = cmd; in gen2_emit_flush()
38 intel_ring_advance(rq, cs); in gen2_emit_flush()
40 return 0; in gen2_emit_flush()
45 u32 cmd, *cs; in gen4_emit_flush_rcs() local
55 * read-only caches: in gen4_emit_flush_rcs()
57 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if in gen4_emit_flush_rcs()
79 if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) in gen4_emit_flush_rcs()
87 cs = intel_ring_begin(rq, i); in gen4_emit_flush_rcs()
88 if (IS_ERR(cs)) in gen4_emit_flush_rcs()
89 return PTR_ERR(cs); in gen4_emit_flush_rcs()
91 *cs++ = cmd; in gen4_emit_flush_rcs()
94 * A random delay to let the CS invalidate take effect? Without this in gen4_emit_flush_rcs()
95 * delay, the GPU relocation path fails as the CS does not see in gen4_emit_flush_rcs()
104 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; in gen4_emit_flush_rcs()
105 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
108 *cs++ = 0; in gen4_emit_flush_rcs()
109 *cs++ = 0; in gen4_emit_flush_rcs()
111 for (i = 0; i < 12; i++) in gen4_emit_flush_rcs()
112 *cs++ = MI_FLUSH; in gen4_emit_flush_rcs()
114 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; in gen4_emit_flush_rcs()
115 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
118 *cs++ = 0; in gen4_emit_flush_rcs()
119 *cs++ = 0; in gen4_emit_flush_rcs()
122 *cs++ = cmd; in gen4_emit_flush_rcs()
124 intel_ring_advance(rq, cs); in gen4_emit_flush_rcs()
126 return 0; in gen4_emit_flush_rcs()
131 u32 *cs; in gen4_emit_flush_vcs() local
133 cs = intel_ring_begin(rq, 2); in gen4_emit_flush_vcs()
134 if (IS_ERR(cs)) in gen4_emit_flush_vcs()
135 return PTR_ERR(cs); in gen4_emit_flush_vcs()
137 *cs++ = MI_FLUSH; in gen4_emit_flush_vcs()
138 *cs++ = MI_NOOP; in gen4_emit_flush_vcs()
139 intel_ring_advance(rq, cs); in gen4_emit_flush_vcs()
141 return 0; in gen4_emit_flush_vcs()
144 static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, in __gen2_emit_breadcrumb() argument
147 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in __gen2_emit_breadcrumb()
148 GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); in __gen2_emit_breadcrumb()
150 *cs++ = MI_FLUSH; in __gen2_emit_breadcrumb()
152 while (flush--) { in __gen2_emit_breadcrumb()
153 *cs++ = MI_STORE_DWORD_INDEX; in __gen2_emit_breadcrumb()
154 *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32); in __gen2_emit_breadcrumb()
155 *cs++ = rq->fence.seqno; in __gen2_emit_breadcrumb()
158 while (post--) { in __gen2_emit_breadcrumb()
159 *cs++ = MI_STORE_DWORD_INDEX; in __gen2_emit_breadcrumb()
160 *cs++ = I915_GEM_HWS_SEQNO_ADDR; in __gen2_emit_breadcrumb()
161 *cs++ = rq->fence.seqno; in __gen2_emit_breadcrumb()
164 *cs++ = MI_USER_INTERRUPT; in __gen2_emit_breadcrumb()
166 rq->tail = intel_ring_offset(rq, cs); in __gen2_emit_breadcrumb()
167 assert_ring_tail_valid(rq->ring, rq->tail); in __gen2_emit_breadcrumb()
169 return cs; in __gen2_emit_breadcrumb()
172 u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) in gen3_emit_breadcrumb() argument
174 return __gen2_emit_breadcrumb(rq, cs, 16, 8); in gen3_emit_breadcrumb()
177 u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) in gen5_emit_breadcrumb() argument
179 return __gen2_emit_breadcrumb(rq, cs, 8, 8); in gen5_emit_breadcrumb()
190 u32 *cs, cs_offset = in i830_emit_bb_start() local
191 intel_gt_scratch_offset(rq->engine->gt, in i830_emit_bb_start()
194 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); in i830_emit_bb_start()
196 cs = intel_ring_begin(rq, 6); in i830_emit_bb_start()
197 if (IS_ERR(cs)) in i830_emit_bb_start()
198 return PTR_ERR(cs); in i830_emit_bb_start()
201 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; in i830_emit_bb_start()
202 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; in i830_emit_bb_start()
203 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ in i830_emit_bb_start()
204 *cs++ = cs_offset; in i830_emit_bb_start()
205 *cs++ = 0xdeadbeef; in i830_emit_bb_start()
206 *cs++ = MI_NOOP; in i830_emit_bb_start()
207 intel_ring_advance(rq, cs); in i830_emit_bb_start()
209 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { in i830_emit_bb_start()
211 return -ENOSPC; in i830_emit_bb_start()
213 cs = intel_ring_begin(rq, 6 + 2); in i830_emit_bb_start()
214 if (IS_ERR(cs)) in i830_emit_bb_start()
215 return PTR_ERR(cs); in i830_emit_bb_start()
219 * stable batch scratch bo area (so that the CS never in i830_emit_bb_start()
222 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); in i830_emit_bb_start()
223 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; in i830_emit_bb_start()
224 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; in i830_emit_bb_start()
225 *cs++ = cs_offset; in i830_emit_bb_start()
226 *cs++ = 4096; in i830_emit_bb_start()
227 *cs++ = offset; in i830_emit_bb_start()
229 *cs++ = MI_FLUSH; in i830_emit_bb_start()
230 *cs++ = MI_NOOP; in i830_emit_bb_start()
231 intel_ring_advance(rq, cs); in i830_emit_bb_start()
240 cs = intel_ring_begin(rq, 2); in i830_emit_bb_start()
241 if (IS_ERR(cs)) in i830_emit_bb_start()
242 return PTR_ERR(cs); in i830_emit_bb_start()
244 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; in i830_emit_bb_start()
245 *cs++ = offset; in i830_emit_bb_start()
246 intel_ring_advance(rq, cs); in i830_emit_bb_start()
248 return 0; in i830_emit_bb_start()
255 u32 *cs; in gen3_emit_bb_start() local
260 cs = intel_ring_begin(rq, 2); in gen3_emit_bb_start()
261 if (IS_ERR(cs)) in gen3_emit_bb_start()
262 return PTR_ERR(cs); in gen3_emit_bb_start()
264 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; in gen3_emit_bb_start()
265 *cs++ = offset; in gen3_emit_bb_start()
266 intel_ring_advance(rq, cs); in gen3_emit_bb_start()
268 return 0; in gen3_emit_bb_start()
276 u32 *cs; in gen4_emit_bb_start() local
280 security = 0; in gen4_emit_bb_start()
282 cs = intel_ring_begin(rq, 2); in gen4_emit_bb_start()
283 if (IS_ERR(cs)) in gen4_emit_bb_start()
284 return PTR_ERR(cs); in gen4_emit_bb_start()
286 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | security; in gen4_emit_bb_start()
287 *cs++ = offset; in gen4_emit_bb_start()
288 intel_ring_advance(rq, cs); in gen4_emit_bb_start()
290 return 0; in gen4_emit_bb_start()
295 struct drm_i915_private *i915 = engine->i915; in gen2_irq_enable()
297 i915->irq_mask &= ~engine->irq_enable_mask; in gen2_irq_enable()
298 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); in gen2_irq_enable()
304 struct drm_i915_private *i915 = engine->i915; in gen2_irq_disable()
306 i915->irq_mask |= engine->irq_enable_mask; in gen2_irq_disable()
307 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); in gen2_irq_disable()
312 engine->i915->irq_mask &= ~engine->irq_enable_mask; in gen3_irq_enable()
313 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); in gen3_irq_enable()
314 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); in gen3_irq_enable()
319 engine->i915->irq_mask |= engine->irq_enable_mask; in gen3_irq_disable()
320 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); in gen3_irq_disable()
325 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); in gen5_irq_enable()
330 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); in gen5_irq_disable()