Lines Matching +full:4 +full:- +full:ring

40  * Most engines on the GPU are fed via ring buffers.  Ring
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
53 * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
55 * @type: ring type for which to return the limit.
73 * amdgpu_ring_alloc - allocate space on the ring buffer
75 * @ring: amdgpu_ring structure holding ring information
76 * @ndw: number of dwords to allocate in the ring buffer
78 * Allocate @ndw dwords in the ring buffer (all asics).
81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) in amdgpu_ring_alloc() argument
85 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_alloc()
90 if (WARN_ON_ONCE(ndw > ring->max_dw)) in amdgpu_ring_alloc()
91 return -ENOMEM; in amdgpu_ring_alloc()
93 ring->count_dw = ndw; in amdgpu_ring_alloc()
94 ring->wptr_old = ring->wptr; in amdgpu_ring_alloc()
96 if (ring->funcs->begin_use) in amdgpu_ring_alloc()
97 ring->funcs->begin_use(ring); in amdgpu_ring_alloc()
102 /** amdgpu_ring_insert_nop - insert NOP packets
104 * @ring: amdgpu_ring structure holding ring information
109 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in amdgpu_ring_insert_nop() argument
114 amdgpu_ring_write(ring, ring->funcs->nop); in amdgpu_ring_insert_nop()
118 * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
120 * @ring: amdgpu_ring structure holding ring information
125 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) in amdgpu_ring_generic_pad_ib() argument
127 while (ib->length_dw & ring->funcs->align_mask) in amdgpu_ring_generic_pad_ib()
128 ib->ptr[ib->length_dw++] = ring->funcs->nop; in amdgpu_ring_generic_pad_ib()
132 * amdgpu_ring_commit - tell the GPU to execute the new
133 * commands on the ring buffer
135 * @ring: amdgpu_ring structure holding ring information
138 * execute new commands on the ring buffer (all asics).
140 void amdgpu_ring_commit(struct amdgpu_ring *ring) in amdgpu_ring_commit() argument
145 count = ring->funcs->align_mask + 1 - in amdgpu_ring_commit()
146 (ring->wptr & ring->funcs->align_mask); in amdgpu_ring_commit()
147 count &= ring->funcs->align_mask; in amdgpu_ring_commit()
150 ring->funcs->insert_nop(ring, count); in amdgpu_ring_commit()
153 amdgpu_ring_set_wptr(ring); in amdgpu_ring_commit()
155 if (ring->funcs->end_use) in amdgpu_ring_commit()
156 ring->funcs->end_use(ring); in amdgpu_ring_commit()
160 * amdgpu_ring_undo - reset the wptr
162 * @ring: amdgpu_ring structure holding ring information
166 void amdgpu_ring_undo(struct amdgpu_ring *ring) in amdgpu_ring_undo() argument
168 ring->wptr = ring->wptr_old; in amdgpu_ring_undo()
170 if (ring->funcs->end_use) in amdgpu_ring_undo()
171 ring->funcs->end_use(ring); in amdgpu_ring_undo()
174 #define amdgpu_ring_get_gpu_addr(ring, offset) \ argument
175 (ring->is_mes_queue ? \
176 (ring->mes_ctx->meta_data_gpu_addr + offset) : \
177 (ring->adev->wb.gpu_addr + offset * 4))
179 #define amdgpu_ring_get_cpu_addr(ring, offset) \ argument
180 (ring->is_mes_queue ? \
181 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
182 (&ring->adev->wb.wb[offset]))
185 * amdgpu_ring_init - init driver ring struct.
188 * @ring: amdgpu_ring structure holding ring information
189 * @max_dw: maximum number of dw for ring alloc
190 * @irq_src: interrupt source to use for this ring
191 * @irq_type: interrupt type to use for this ring
192 * @hw_prio: ring priority (NORMAL/HIGH)
195 * Initialize the driver information for the selected ring (all asics).
198 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, in amdgpu_ring_init() argument
213 * KIQ tasks get submitted directly to the ring. in amdgpu_ring_init()
215 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) in amdgpu_ring_init()
217 if (ring->funcs->type == AMDGPU_RING_TYPE_MES) in amdgpu_ring_init()
219 else if (ring == &adev->sdma.instance[0].page) in amdgpu_ring_init()
222 if (ring->adev == NULL) { in amdgpu_ring_init()
223 if (adev->num_rings >= AMDGPU_MAX_RINGS) in amdgpu_ring_init()
224 return -EINVAL; in amdgpu_ring_init()
226 ring->adev = adev; in amdgpu_ring_init()
227 ring->num_hw_submission = sched_hw_submission; in amdgpu_ring_init()
228 ring->sched_score = sched_score; in amdgpu_ring_init()
229 ring->vmid_wait = dma_fence_get_stub(); in amdgpu_ring_init()
231 if (!ring->is_mes_queue) { in amdgpu_ring_init()
232 ring->idx = adev->num_rings++; in amdgpu_ring_init()
233 adev->rings[ring->idx] = ring; in amdgpu_ring_init()
236 r = amdgpu_fence_driver_init_ring(ring); in amdgpu_ring_init()
241 if (ring->is_mes_queue) { in amdgpu_ring_init()
242 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
244 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
246 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
248 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
250 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
253 r = amdgpu_device_wb_get(adev, &ring->rptr_offs); in amdgpu_ring_init()
255 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); in amdgpu_ring_init()
259 r = amdgpu_device_wb_get(adev, &ring->wptr_offs); in amdgpu_ring_init()
261 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); in amdgpu_ring_init()
265 r = amdgpu_device_wb_get(adev, &ring->fence_offs); in amdgpu_ring_init()
267 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); in amdgpu_ring_init()
271 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs); in amdgpu_ring_init()
273 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); in amdgpu_ring_init()
277 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); in amdgpu_ring_init()
279 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); in amdgpu_ring_init()
284 ring->fence_gpu_addr = in amdgpu_ring_init()
285 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs); in amdgpu_ring_init()
286 ring->fence_cpu_addr = in amdgpu_ring_init()
287 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs); in amdgpu_ring_init()
289 ring->rptr_gpu_addr = in amdgpu_ring_init()
290 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs); in amdgpu_ring_init()
291 ring->rptr_cpu_addr = in amdgpu_ring_init()
292 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs); in amdgpu_ring_init()
294 ring->wptr_gpu_addr = in amdgpu_ring_init()
295 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs); in amdgpu_ring_init()
296 ring->wptr_cpu_addr = in amdgpu_ring_init()
297 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs); in amdgpu_ring_init()
299 ring->trail_fence_gpu_addr = in amdgpu_ring_init()
300 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs); in amdgpu_ring_init()
301 ring->trail_fence_cpu_addr = in amdgpu_ring_init()
302 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs); in amdgpu_ring_init()
304 ring->cond_exe_gpu_addr = in amdgpu_ring_init()
305 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs); in amdgpu_ring_init()
306 ring->cond_exe_cpu_addr = in amdgpu_ring_init()
307 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs); in amdgpu_ring_init()
310 *ring->cond_exe_cpu_addr = 1; in amdgpu_ring_init()
312 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); in amdgpu_ring_init()
314 dev_err(adev->dev, "failed initializing fences (%d).\n", r); in amdgpu_ring_init()
318 max_ibs_dw = ring->funcs->emit_frame_size + in amdgpu_ring_init()
319 amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; in amdgpu_ring_init()
320 max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_init()
325 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); in amdgpu_ring_init()
327 ring->buf_mask = (ring->ring_size / 4) - 1; in amdgpu_ring_init()
328 ring->ptr_mask = ring->funcs->support_64bit_ptrs ? in amdgpu_ring_init()
329 0xffffffffffffffff : ring->buf_mask; in amdgpu_ring_init()
331 /* Allocate ring buffer */ in amdgpu_ring_init()
332 if (ring->is_mes_queue) { in amdgpu_ring_init()
335 BUG_ON(ring->ring_size > PAGE_SIZE*4); in amdgpu_ring_init()
337 offset = amdgpu_mes_ctx_get_offs(ring, in amdgpu_ring_init()
339 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); in amdgpu_ring_init()
340 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); in amdgpu_ring_init()
341 amdgpu_ring_clear_ring(ring); in amdgpu_ring_init()
343 } else if (ring->ring_obj == NULL) { in amdgpu_ring_init()
344 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, in amdgpu_ring_init()
346 &ring->ring_obj, in amdgpu_ring_init()
347 &ring->gpu_addr, in amdgpu_ring_init()
348 (void **)&ring->ring); in amdgpu_ring_init()
350 dev_err(adev->dev, "(%d) ring create failed\n", r); in amdgpu_ring_init()
353 amdgpu_ring_clear_ring(ring); in amdgpu_ring_init()
356 ring->max_dw = max_dw; in amdgpu_ring_init()
357 ring->hw_prio = hw_prio; in amdgpu_ring_init()
359 if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) { in amdgpu_ring_init()
360 hw_ip = ring->funcs->type; in amdgpu_ring_init()
361 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds; in amdgpu_ring_init()
362 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = in amdgpu_ring_init()
363 &ring->sched; in amdgpu_ring_init()
370 * amdgpu_ring_fini - tear down the driver ring struct.
372 * @ring: amdgpu_ring structure holding ring information
374 * Tear down the driver information for the selected ring (all asics).
376 void amdgpu_ring_fini(struct amdgpu_ring *ring) in amdgpu_ring_fini() argument
379 /* Not to finish a ring which is not initialized */ in amdgpu_ring_fini()
380 if (!(ring->adev) || in amdgpu_ring_fini()
381 (!ring->is_mes_queue && !(ring->adev->rings[ring->idx]))) in amdgpu_ring_fini()
384 ring->sched.ready = false; in amdgpu_ring_fini()
386 if (!ring->is_mes_queue) { in amdgpu_ring_fini()
387 amdgpu_device_wb_free(ring->adev, ring->rptr_offs); in amdgpu_ring_fini()
388 amdgpu_device_wb_free(ring->adev, ring->wptr_offs); in amdgpu_ring_fini()
390 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); in amdgpu_ring_fini()
391 amdgpu_device_wb_free(ring->adev, ring->fence_offs); in amdgpu_ring_fini()
393 amdgpu_bo_free_kernel(&ring->ring_obj, in amdgpu_ring_fini()
394 &ring->gpu_addr, in amdgpu_ring_fini()
395 (void **)&ring->ring); in amdgpu_ring_fini()
397 kfree(ring->fence_drv.fences); in amdgpu_ring_fini()
400 dma_fence_put(ring->vmid_wait); in amdgpu_ring_fini()
401 ring->vmid_wait = NULL; in amdgpu_ring_fini()
402 ring->me = 0; in amdgpu_ring_fini()
404 if (!ring->is_mes_queue) in amdgpu_ring_fini()
405 ring->adev->rings[ring->idx] = NULL; in amdgpu_ring_fini()
409 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
411 * @ring: ring to write to
420 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, in amdgpu_ring_emit_reg_write_reg_wait_helper() argument
424 amdgpu_ring_emit_wreg(ring, reg0, ref); in amdgpu_ring_emit_reg_write_reg_wait_helper()
425 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); in amdgpu_ring_emit_reg_write_reg_wait_helper()
429 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
431 * @ring: ring to try the recovery on
435 * Tries to get a ring proceeding again when it is stuck.
437 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, in amdgpu_ring_soft_recovery() argument
443 if (unlikely(ring->adev->debug_disable_soft_recovery)) in amdgpu_ring_soft_recovery()
448 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) in amdgpu_ring_soft_recovery()
451 spin_lock_irqsave(fence->lock, flags); in amdgpu_ring_soft_recovery()
453 dma_fence_set_error(fence, -ENODATA); in amdgpu_ring_soft_recovery()
454 spin_unlock_irqrestore(fence->lock, flags); in amdgpu_ring_soft_recovery()
456 atomic_inc(&ring->adev->gpu_reset_counter); in amdgpu_ring_soft_recovery()
459 ring->funcs->soft_recovery(ring, vmid); in amdgpu_ring_soft_recovery()
470 * - rptr
471 * - wptr
472 * - driver's copy of wptr
474 * followed by n-words of ring data
479 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_debugfs_ring_read() local
485 return -EINVAL; in amdgpu_debugfs_ring_read()
490 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask; in amdgpu_debugfs_ring_read()
491 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; in amdgpu_debugfs_ring_read()
492 early[2] = ring->wptr & ring->buf_mask; in amdgpu_debugfs_ring_read()
493 for (i = *pos / 4; i < 3 && size; i++) { in amdgpu_debugfs_ring_read()
497 buf += 4; in amdgpu_debugfs_ring_read()
498 result += 4; in amdgpu_debugfs_ring_read()
499 size -= 4; in amdgpu_debugfs_ring_read()
500 *pos += 4; in amdgpu_debugfs_ring_read()
505 if (*pos >= (ring->ring_size + 12)) in amdgpu_debugfs_ring_read()
508 value = ring->ring[(*pos - 12)/4]; in amdgpu_debugfs_ring_read()
512 buf += 4; in amdgpu_debugfs_ring_read()
513 result += 4; in amdgpu_debugfs_ring_read()
514 size -= 4; in amdgpu_debugfs_ring_read()
515 *pos += 4; in amdgpu_debugfs_ring_read()
530 struct amdgpu_ring *ring = file_inode(f)->i_private; in amdgpu_debugfs_mqd_read() local
537 return -EINVAL; in amdgpu_debugfs_mqd_read()
539 kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); in amdgpu_debugfs_mqd_read()
541 return -ENOMEM; in amdgpu_debugfs_mqd_read()
543 r = amdgpu_bo_reserve(ring->mqd_obj, false); in amdgpu_debugfs_mqd_read()
547 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); in amdgpu_debugfs_mqd_read()
555 for (i = 0; i < ring->mqd_size/sizeof(u32); i++) in amdgpu_debugfs_mqd_read()
558 amdgpu_bo_kunmap(ring->mqd_obj); in amdgpu_debugfs_mqd_read()
559 amdgpu_bo_unreserve(ring->mqd_obj); in amdgpu_debugfs_mqd_read()
563 if (*pos >= ring->mqd_size) in amdgpu_debugfs_mqd_read()
566 value = kbuf[*pos/4]; in amdgpu_debugfs_mqd_read()
570 buf += 4; in amdgpu_debugfs_mqd_read()
571 result += 4; in amdgpu_debugfs_mqd_read()
572 size -= 4; in amdgpu_debugfs_mqd_read()
573 *pos += 4; in amdgpu_debugfs_mqd_read()
580 amdgpu_bo_unreserve(ring->mqd_obj); in amdgpu_debugfs_mqd_read()
594 struct amdgpu_ring *ring = data; in amdgpu_debugfs_ring_error() local
596 amdgpu_fence_driver_set_error(ring, val); in amdgpu_debugfs_ring_error()
606 struct amdgpu_ring *ring) in amdgpu_debugfs_ring_init() argument
609 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_ring_init()
610 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_ring_init()
613 sprintf(name, "amdgpu_ring_%s", ring->name); in amdgpu_debugfs_ring_init()
614 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, in amdgpu_debugfs_ring_init()
616 ring->ring_size + 12); in amdgpu_debugfs_ring_init()
618 if (ring->mqd_obj) { in amdgpu_debugfs_ring_init()
619 sprintf(name, "amdgpu_mqd_%s", ring->name); in amdgpu_debugfs_ring_init()
620 debugfs_create_file_size(name, S_IFREG | 0444, root, ring, in amdgpu_debugfs_ring_init()
622 ring->mqd_size); in amdgpu_debugfs_ring_init()
625 sprintf(name, "amdgpu_error_%s", ring->name); in amdgpu_debugfs_ring_init()
626 debugfs_create_file(name, 0200, root, ring, in amdgpu_debugfs_ring_init()
633 * amdgpu_ring_test_helper - tests ring and set sched readiness status
635 * @ring: ring to try the recovery on
637 * Tests ring and set sched readiness status
641 int amdgpu_ring_test_helper(struct amdgpu_ring *ring) in amdgpu_ring_test_helper() argument
643 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_test_helper()
646 r = amdgpu_ring_test_ring(ring); in amdgpu_ring_test_helper()
648 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n", in amdgpu_ring_test_helper()
649 ring->name, r); in amdgpu_ring_test_helper()
651 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n", in amdgpu_ring_test_helper()
652 ring->name); in amdgpu_ring_test_helper()
654 ring->sched.ready = !r; in amdgpu_ring_test_helper()
659 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, in amdgpu_ring_to_mqd_prop() argument
662 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_to_mqd_prop()
663 bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && in amdgpu_ring_to_mqd_prop()
664 amdgpu_gfx_is_high_priority_compute_queue(adev, ring); in amdgpu_ring_to_mqd_prop()
665 bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX && in amdgpu_ring_to_mqd_prop()
666 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring); in amdgpu_ring_to_mqd_prop()
670 prop->mqd_gpu_addr = ring->mqd_gpu_addr; in amdgpu_ring_to_mqd_prop()
671 prop->hqd_base_gpu_addr = ring->gpu_addr; in amdgpu_ring_to_mqd_prop()
672 prop->rptr_gpu_addr = ring->rptr_gpu_addr; in amdgpu_ring_to_mqd_prop()
673 prop->wptr_gpu_addr = ring->wptr_gpu_addr; in amdgpu_ring_to_mqd_prop()
674 prop->queue_size = ring->ring_size; in amdgpu_ring_to_mqd_prop()
675 prop->eop_gpu_addr = ring->eop_gpu_addr; in amdgpu_ring_to_mqd_prop()
676 prop->use_doorbell = ring->use_doorbell; in amdgpu_ring_to_mqd_prop()
677 prop->doorbell_index = ring->doorbell_index; in amdgpu_ring_to_mqd_prop()
682 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ; in amdgpu_ring_to_mqd_prop()
684 prop->allow_tunneling = is_high_prio_compute; in amdgpu_ring_to_mqd_prop()
686 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; in amdgpu_ring_to_mqd_prop()
687 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; in amdgpu_ring_to_mqd_prop()
691 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring) in amdgpu_ring_init_mqd() argument
693 struct amdgpu_device *adev = ring->adev; in amdgpu_ring_init_mqd()
697 amdgpu_ring_to_mqd_prop(ring, &prop); in amdgpu_ring_init_mqd()
699 ring->wptr = 0; in amdgpu_ring_init_mqd()
701 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) in amdgpu_ring_init_mqd()
702 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE]; in amdgpu_ring_init_mqd()
704 mqd_mgr = &adev->mqds[ring->funcs->type]; in amdgpu_ring_init_mqd()
706 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop); in amdgpu_ring_init_mqd()
709 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring) in amdgpu_ring_ib_begin() argument
711 if (ring->is_sw_ring) in amdgpu_ring_ib_begin()
712 amdgpu_sw_ring_ib_begin(ring); in amdgpu_ring_ib_begin()
715 void amdgpu_ring_ib_end(struct amdgpu_ring *ring) in amdgpu_ring_ib_end() argument
717 if (ring->is_sw_ring) in amdgpu_ring_ib_end()
718 amdgpu_sw_ring_ib_end(ring); in amdgpu_ring_ib_end()
721 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_cntl() argument
723 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_cntl()
724 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL); in amdgpu_ring_ib_on_emit_cntl()
727 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_ce() argument
729 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_ce()
730 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE); in amdgpu_ring_ib_on_emit_ce()
733 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) in amdgpu_ring_ib_on_emit_de() argument
735 if (ring->is_sw_ring) in amdgpu_ring_ib_on_emit_de()
736 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); in amdgpu_ring_ib_on_emit_de()
739 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) in amdgpu_ring_sched_ready() argument
741 if (!ring) in amdgpu_ring_sched_ready()
744 if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) in amdgpu_ring_sched_ready()