Lines Matching +full:cs +full:- +full:0

1 // SPDX-License-Identifier: MIT
11 #define GT3_INLINE_DATA_DELAYS 0x1E00
12 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) argument
48 * a shader on every HW thread, and clear the thread-local registers. in num_primitives()
52 return bv->max_threads; in num_primitives()
59 switch (INTEL_INFO(i915)->gt) { in batch_get_defaults()
62 bv->max_threads = 70; in batch_get_defaults()
65 bv->max_threads = 140; in batch_get_defaults()
68 bv->max_threads = 280; in batch_get_defaults()
71 bv->surface_height = 16 * 16; in batch_get_defaults()
72 bv->surface_width = 32 * 2 * 16; in batch_get_defaults()
74 switch (INTEL_INFO(i915)->gt) { in batch_get_defaults()
77 bv->max_threads = 36; in batch_get_defaults()
80 bv->max_threads = 128; in batch_get_defaults()
83 bv->surface_height = 16 * 8; in batch_get_defaults()
84 bv->surface_width = 32 * 16; in batch_get_defaults()
86 bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K); in batch_get_defaults()
87 bv->surface_start = bv->state_start + SZ_4K; in batch_get_defaults()
88 bv->size = bv->surface_start + bv->surface_height * bv->surface_width; in batch_get_defaults()
95 bc->vma = vma; in batch_init()
96 bc->offset = offset; in batch_init()
97 bc->start = start + bc->offset / sizeof(*bc->start); in batch_init()
98 bc->end = bc->start; in batch_init()
99 bc->max_items = max_bytes / sizeof(*bc->start); in batch_init()
102 static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) in batch_offset() argument
104 return (cs - bc->start) * sizeof(*bc->start) + bc->offset; in batch_offset()
109 return i915_vma_offset(bc->vma); in batch_addr()
114 GEM_BUG_ON((bc->end - bc->start) >= bc->max_items); in batch_add()
115 *bc->end++ = d; in batch_add()
123 u32 *end = PTR_ALIGN(bc->end, align); in batch_alloc_items()
125 memset32(bc->end, 0, end - bc->end); in batch_alloc_items()
126 bc->end = end; in batch_alloc_items()
129 map = bc->end; in batch_alloc_items()
130 bc->end += items; in batch_alloc_items()
137 GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start))); in batch_alloc_bytes()
138 return batch_alloc_items(bc, align, bytes / sizeof(*bc->start)); in batch_alloc_bytes()
146 u32 surface_h = bv->surface_height; in gen7_fill_surface_state()
147 u32 surface_w = bv->surface_width; in gen7_fill_surface_state()
148 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_surface_state() local
149 u32 offset = batch_offset(state, cs); in gen7_fill_surface_state()
152 #define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0 in gen7_fill_surface_state()
155 *cs++ = SURFACE_2D << 29 | in gen7_fill_surface_state()
159 *cs++ = batch_addr(state) + dst_offset; in gen7_fill_surface_state()
161 *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); in gen7_fill_surface_state()
162 *cs++ = surface_w; in gen7_fill_surface_state()
163 *cs++ = 0; in gen7_fill_surface_state()
164 *cs++ = 0; in gen7_fill_surface_state()
165 *cs++ = 0; in gen7_fill_surface_state()
168 *cs++ = SHADER_CHANNELS(4, 5, 6, 7); in gen7_fill_surface_state()
169 batch_advance(state, cs); in gen7_fill_surface_state()
179 gen7_fill_surface_state(state, bv->surface_start, bv); in gen7_fill_binding_table()
180 u32 *cs = batch_alloc_items(state, 32, 8); in gen7_fill_binding_table() local
181 u32 offset = batch_offset(state, cs); in gen7_fill_binding_table()
183 *cs++ = surface_start - state->offset; in gen7_fill_binding_table()
184 *cs++ = 0; in gen7_fill_binding_table()
185 *cs++ = 0; in gen7_fill_binding_table()
186 *cs++ = 0; in gen7_fill_binding_table()
187 *cs++ = 0; in gen7_fill_binding_table()
188 *cs++ = 0; in gen7_fill_binding_table()
189 *cs++ = 0; in gen7_fill_binding_table()
190 *cs++ = 0; in gen7_fill_binding_table()
191 batch_advance(state, cs); in gen7_fill_binding_table()
213 gen7_fill_kernel_data(state, kernel->data, kernel->size); in gen7_fill_interface_descriptor()
215 u32 *cs = batch_alloc_items(state, 32, 8 * count); in gen7_fill_interface_descriptor() local
216 u32 offset = batch_offset(state, cs); in gen7_fill_interface_descriptor()
218 *cs++ = kernel_offset; in gen7_fill_interface_descriptor()
219 *cs++ = (1 << 7) | (1 << 13); in gen7_fill_interface_descriptor()
220 *cs++ = 0; in gen7_fill_interface_descriptor()
221 *cs++ = (binding_table - state->offset) | 1; in gen7_fill_interface_descriptor()
222 *cs++ = 0; in gen7_fill_interface_descriptor()
223 *cs++ = 0; in gen7_fill_interface_descriptor()
224 *cs++ = 0; in gen7_fill_interface_descriptor()
225 *cs++ = 0; in gen7_fill_interface_descriptor()
227 /* 1 - 63dummy idds */ in gen7_fill_interface_descriptor()
228 memset32(cs, 0x00, (count - 1) * 8); in gen7_fill_interface_descriptor()
229 batch_advance(state, cs + (count - 1) * 8); in gen7_fill_interface_descriptor()
238 u32 *cs = batch_alloc_items(batch, 0, 10); in gen7_emit_state_base_address() local
240 *cs++ = STATE_BASE_ADDRESS | (10 - 2); in gen7_emit_state_base_address()
242 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
244 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
246 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
248 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
250 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
253 *cs++ = 0; in gen7_emit_state_base_address()
254 *cs++ = BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
255 *cs++ = 0; in gen7_emit_state_base_address()
256 *cs++ = BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
257 batch_advance(batch, cs); in gen7_emit_state_base_address()
266 u32 threads = bv->max_threads - 1; in gen7_emit_vfe_state()
267 u32 *cs = batch_alloc_items(batch, 32, 8); in gen7_emit_vfe_state() local
269 *cs++ = MEDIA_VFE_STATE | (8 - 2); in gen7_emit_vfe_state()
272 *cs++ = 0; in gen7_emit_vfe_state()
275 *cs++ = threads << 16 | 1 << 8 | mode << 2; in gen7_emit_vfe_state()
277 *cs++ = 0; in gen7_emit_vfe_state()
280 *cs++ = urb_size << 16 | curbe_size; in gen7_emit_vfe_state()
283 *cs++ = 0; in gen7_emit_vfe_state()
284 *cs++ = 0; in gen7_emit_vfe_state()
285 *cs++ = 0; in gen7_emit_vfe_state()
286 batch_advance(batch, cs); in gen7_emit_vfe_state()
294 u32 *cs = batch_alloc_items(batch, 8, 4); in gen7_emit_interface_descriptor_load() local
296 *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2); in gen7_emit_interface_descriptor_load()
297 *cs++ = 0; in gen7_emit_interface_descriptor_load()
298 *cs++ = count * 8 * sizeof(*cs); in gen7_emit_interface_descriptor_load()
301 * interface descriptor address - it is relative to the dynamics base in gen7_emit_interface_descriptor_load()
304 *cs++ = interface_descriptor; in gen7_emit_interface_descriptor_load()
305 batch_advance(batch, cs); in gen7_emit_interface_descriptor_load()
315 u32 *cs; in gen7_emit_media_object() local
317 cs = batch_alloc_items(batch, 8, pkt); in gen7_emit_media_object()
319 *cs++ = MEDIA_OBJECT | (pkt - 2); in gen7_emit_media_object()
322 *cs++ = 0; in gen7_emit_media_object()
325 *cs++ = 0; in gen7_emit_media_object()
326 *cs++ = 0; in gen7_emit_media_object()
329 *cs++ = 0; in gen7_emit_media_object()
330 *cs++ = 0; in gen7_emit_media_object()
333 *cs++ = y_offset << 16 | x_offset; in gen7_emit_media_object()
334 *cs++ = 0; in gen7_emit_media_object()
335 *cs++ = GT3_INLINE_DATA_DELAYS; in gen7_emit_media_object()
337 batch_advance(batch, cs); in gen7_emit_media_object()
342 u32 *cs = batch_alloc_items(batch, 0, 4); in gen7_emit_pipeline_flush() local
344 *cs++ = GFX_OP_PIPE_CONTROL(4); in gen7_emit_pipeline_flush()
345 *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | in gen7_emit_pipeline_flush()
349 *cs++ = 0; in gen7_emit_pipeline_flush()
350 *cs++ = 0; in gen7_emit_pipeline_flush()
352 batch_advance(batch, cs); in gen7_emit_pipeline_flush()
357 u32 *cs = batch_alloc_items(batch, 0, 10); in gen7_emit_pipeline_invalidate() local
360 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen7_emit_pipeline_invalidate()
361 *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD | in gen7_emit_pipeline_invalidate()
363 *cs++ = 0; in gen7_emit_pipeline_invalidate()
364 *cs++ = 0; in gen7_emit_pipeline_invalidate()
365 *cs++ = 0; in gen7_emit_pipeline_invalidate()
367 *cs++ = GFX_OP_PIPE_CONTROL(5); in gen7_emit_pipeline_invalidate()
368 *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE; in gen7_emit_pipeline_invalidate()
369 *cs++ = 0; in gen7_emit_pipeline_invalidate()
370 *cs++ = 0; in gen7_emit_pipeline_invalidate()
371 *cs++ = 0; in gen7_emit_pipeline_invalidate()
373 batch_advance(batch, cs); in gen7_emit_pipeline_invalidate()
380 struct drm_i915_private *i915 = vma->vm->i915; in emit_batch()
387 batch_init(&cmds, vma, start, 0, bv->state_start); in emit_batch()
388 batch_init(&state, vma, start, bv->state_start, SZ_4K); in emit_batch()
401 batch_add(&cmds, 0xffff0000 | in emit_batch()
404 0)); in emit_batch()
406 batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); in emit_batch()
420 /* Set the clear-residual kernel state */ in emit_batch()
421 gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); in emit_batch()
425 for (i = 0; i < num_primitives(bv); i++) in emit_batch()
437 batch_get_defaults(engine->i915, &bv); in gen7_setup_clear_gpr_bb()
441 GEM_BUG_ON(vma->obj->base.size < bv.size); in gen7_setup_clear_gpr_bb()
443 batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); in gen7_setup_clear_gpr_bb()
447 emit_batch(vma, memset(batch, 0, bv.size), &bv); in gen7_setup_clear_gpr_bb()
449 i915_gem_object_flush_map(vma->obj); in gen7_setup_clear_gpr_bb()
450 __i915_gem_object_release_map(vma->obj); in gen7_setup_clear_gpr_bb()
452 return 0; in gen7_setup_clear_gpr_bb()