Lines Matching +full:4 +full:- +full:ring

34 /* VPE CSA resides in the 4th page of CSA */
40 #define VPE_MAX_DPM_LEVEL 4
85 remainder -= arg2_value; in vpe_u1_8_from_fraction()
87 } while (--i != 0); in vpe_u1_8_from_fraction()
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
123 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_configure_dpm()
126 if (adev->pm.dpm_enabled) { in amdgpu_vpe_configure_dpm()
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
141 dev_dbg(adev->dev, "%s: get clock failed!\n", __func__); in amdgpu_vpe_configure_dpm()
152 for (idx = PP_SMU_NUM_VPECLK_DPM_LEVELS; idx && !vpeclk_enalbled_num; idx--) in amdgpu_vpe_configure_dpm()
153 if (VPEClks[idx-1].Freq) in amdgpu_vpe_configure_dpm()
156 /* vpe dpm only cares 4 levels. */ in amdgpu_vpe_configure_dpm()
167 if (soc_dpm_level > vpeclk_enalbled_num - 1) in amdgpu_vpe_configure_dpm()
168 soc_dpm_level = vpeclk_enalbled_num - 1; in amdgpu_vpe_configure_dpm()
199 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ in amdgpu_vpe_configure_dpm()
200 … WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ in amdgpu_vpe_configure_dpm()
201 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ in amdgpu_vpe_configure_dpm()
202 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
203 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ in amdgpu_vpe_configure_dpm()
204 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); in amdgpu_vpe_configure_dpm()
206 dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__); in amdgpu_vpe_configure_dpm()
213 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
215 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
216 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); in amdgpu_vpe_configure_dpm()
217 return -EINVAL; in amdgpu_vpe_configure_dpm()
224 .mc_addr = adev->vpe.cmdbuf_gpu_addr, in amdgpu_vpe_psp_update_sram()
228 return psp_execute_ip_fw_load(&adev->psp, &ucode); in amdgpu_vpe_psp_update_sram()
233 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_init_microcode()
239 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix); in amdgpu_vpe_init_microcode()
243 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; in amdgpu_vpe_init_microcode()
244 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version); in amdgpu_vpe_init_microcode()
245 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version); in amdgpu_vpe_init_microcode()
247 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { in amdgpu_vpe_init_microcode()
250 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX]; in amdgpu_vpe_init_microcode()
251 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX; in amdgpu_vpe_init_microcode()
252 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
253 adev->firmware.fw_size += in amdgpu_vpe_init_microcode()
254 ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE); in amdgpu_vpe_init_microcode()
256 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL]; in amdgpu_vpe_init_microcode()
257 info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL; in amdgpu_vpe_init_microcode()
258 info->fw = adev->vpe.fw; in amdgpu_vpe_init_microcode()
259 adev->firmware.fw_size += in amdgpu_vpe_init_microcode()
260 ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE); in amdgpu_vpe_init_microcode()
265 dev_err(adev->dev, "fail to initialize vpe microcode\n"); in amdgpu_vpe_init_microcode()
266 release_firmware(adev->vpe.fw); in amdgpu_vpe_init_microcode()
267 adev->vpe.fw = NULL; in amdgpu_vpe_init_microcode()
274 struct amdgpu_ring *ring = &vpe->ring; in amdgpu_vpe_ring_init() local
277 ring->ring_obj = NULL; in amdgpu_vpe_ring_init()
278 ring->use_doorbell = true; in amdgpu_vpe_ring_init()
279 ring->vm_hub = AMDGPU_MMHUB0(0); in amdgpu_vpe_ring_init()
280 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1); in amdgpu_vpe_ring_init()
281 snprintf(ring->name, 4, "vpe"); in amdgpu_vpe_ring_init()
283 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0, in amdgpu_vpe_ring_init()
293 amdgpu_ring_fini(&vpe->ring); in amdgpu_vpe_ring_fini()
301 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_early_init()
310 vpe->collaborate_mode = true; in vpe_early_init()
313 return -EINVAL; in vpe_early_init()
319 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false"); in vpe_early_init()
330 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); in vpe_idle_work_handler()
335 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_idle_work_handler()
345 &adev->vpe.cmdbuf_obj, in vpe_common_init()
346 &adev->vpe.cmdbuf_gpu_addr, in vpe_common_init()
347 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_common_init()
349 dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r); in vpe_common_init()
353 vpe->context_started = false; in vpe_common_init()
354 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); in vpe_common_init()
362 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_init()
387 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_sw_fini()
389 release_firmware(vpe->fw); in vpe_sw_fini()
390 vpe->fw = NULL; in vpe_sw_fini()
394 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj, in vpe_sw_fini()
395 &adev->vpe.cmdbuf_gpu_addr, in vpe_sw_fini()
396 (void **)&adev->vpe.cmdbuf_cpu_addr); in vpe_sw_fini()
404 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_init()
427 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_hw_fini()
441 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_suspend()
453 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in vpe_ring_insert_nop() argument
459 amdgpu_ring_write(ring, ring->funcs->nop | in vpe_ring_insert_nop()
460 VPE_CMD_NOP_HEADER_COUNT(count - 1)); in vpe_ring_insert_nop()
462 amdgpu_ring_write(ring, ring->funcs->nop); in vpe_ring_insert_nop()
465 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid) in vpe_get_csa_mc_addr() argument
467 struct amdgpu_device *adev = ring->adev; in vpe_get_csa_mc_addr()
471 if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) in vpe_get_csa_mc_addr()
480 static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring, in vpe_ring_emit_pred_exec() argument
484 if (!ring->adev->vpe.collaborate_mode) in vpe_ring_emit_pred_exec()
487 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) | in vpe_ring_emit_pred_exec()
489 amdgpu_ring_write(ring, exec_count & 0x1fff); in vpe_ring_emit_pred_exec()
492 static void vpe_ring_emit_ib(struct amdgpu_ring *ring, in vpe_ring_emit_ib() argument
498 uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid); in vpe_ring_emit_ib()
500 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) | in vpe_ring_emit_ib()
504 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); in vpe_ring_emit_ib()
505 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); in vpe_ring_emit_ib()
506 amdgpu_ring_write(ring, ib->length_dw); in vpe_ring_emit_ib()
507 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); in vpe_ring_emit_ib()
508 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); in vpe_ring_emit_ib()
511 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, in vpe_ring_emit_fence() argument
518 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); in vpe_ring_emit_fence()
521 amdgpu_ring_write(ring, lower_32_bits(addr)); in vpe_ring_emit_fence()
522 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_emit_fence()
523 amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq)); in vpe_ring_emit_fence()
524 addr += 4; in vpe_ring_emit_fence()
529 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0)); in vpe_ring_emit_fence()
530 amdgpu_ring_write(ring, 0); in vpe_ring_emit_fence()
535 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring) in vpe_ring_emit_pipeline_sync() argument
537 uint32_t seq = ring->fence_drv.sync_seq; in vpe_ring_emit_pipeline_sync()
538 uint64_t addr = ring->fence_drv.gpu_addr; in vpe_ring_emit_pipeline_sync()
540 vpe_ring_emit_pred_exec(ring, 0, 6); in vpe_ring_emit_pipeline_sync()
543 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, in vpe_ring_emit_pipeline_sync()
547 amdgpu_ring_write(ring, addr & 0xfffffffc); in vpe_ring_emit_pipeline_sync()
548 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_emit_pipeline_sync()
549 amdgpu_ring_write(ring, seq); /* reference */ in vpe_ring_emit_pipeline_sync()
550 amdgpu_ring_write(ring, 0xffffffff); /* mask */ in vpe_ring_emit_pipeline_sync()
551 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | in vpe_ring_emit_pipeline_sync()
552 VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4)); in vpe_ring_emit_pipeline_sync()
555 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) in vpe_ring_emit_wreg() argument
557 vpe_ring_emit_pred_exec(ring, 0, 3); in vpe_ring_emit_wreg()
559 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0)); in vpe_ring_emit_wreg()
560 amdgpu_ring_write(ring, reg << 2); in vpe_ring_emit_wreg()
561 amdgpu_ring_write(ring, val); in vpe_ring_emit_wreg()
564 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, in vpe_ring_emit_reg_wait() argument
567 vpe_ring_emit_pred_exec(ring, 0, 6); in vpe_ring_emit_reg_wait()
569 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM, in vpe_ring_emit_reg_wait()
573 amdgpu_ring_write(ring, reg << 2); in vpe_ring_emit_reg_wait()
574 amdgpu_ring_write(ring, 0); in vpe_ring_emit_reg_wait()
575 amdgpu_ring_write(ring, val); /* reference */ in vpe_ring_emit_reg_wait()
576 amdgpu_ring_write(ring, mask); /* mask */ in vpe_ring_emit_reg_wait()
577 amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | in vpe_ring_emit_reg_wait()
581 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, in vpe_ring_emit_vm_flush() argument
584 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); in vpe_ring_emit_vm_flush()
587 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring, in vpe_ring_init_cond_exec() argument
592 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0)); in vpe_ring_init_cond_exec()
593 amdgpu_ring_write(ring, lower_32_bits(addr)); in vpe_ring_init_cond_exec()
594 amdgpu_ring_write(ring, upper_32_bits(addr)); in vpe_ring_init_cond_exec()
595 amdgpu_ring_write(ring, 1); in vpe_ring_init_cond_exec()
596 ret = ring->wptr & ring->buf_mask; in vpe_ring_init_cond_exec()
597 amdgpu_ring_write(ring, 0); in vpe_ring_init_cond_exec()
602 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring) in vpe_ring_preempt_ib() argument
604 struct amdgpu_device *adev = ring->adev; in vpe_ring_preempt_ib()
605 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_preempt_ib()
606 uint32_t preempt_reg = vpe->regs.queue0_preempt; in vpe_ring_preempt_ib()
610 amdgpu_ring_set_preempt_cond_exec(ring, false); in vpe_ring_preempt_ib()
613 ring->trail_seq += 1; in vpe_ring_preempt_ib()
614 amdgpu_ring_alloc(ring, 10); in vpe_ring_preempt_ib()
615 vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0); in vpe_ring_preempt_ib()
616 amdgpu_ring_commit(ring); in vpe_ring_preempt_ib()
619 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1); in vpe_ring_preempt_ib()
622 for (i = 0; i < adev->usec_timeout; i++) { in vpe_ring_preempt_ib()
623 if (ring->trail_seq == in vpe_ring_preempt_ib()
624 le32_to_cpu(*(ring->trail_fence_cpu_addr))) in vpe_ring_preempt_ib()
629 if (i >= adev->usec_timeout) { in vpe_ring_preempt_ib()
630 r = -EINVAL; in vpe_ring_preempt_ib()
631 dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx); in vpe_ring_preempt_ib()
635 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0); in vpe_ring_preempt_ib()
638 amdgpu_ring_set_preempt_cond_exec(ring, true); in vpe_ring_preempt_ib()
653 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_set_powergating_state()
655 if (!adev->pm.dpm_enabled) in vpe_set_powergating_state()
656 dev_err(adev->dev, "Without PM, cannot support powergating\n"); in vpe_set_powergating_state()
658 dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE"); in vpe_set_powergating_state()
662 vpe->context_started = false; in vpe_set_powergating_state()
670 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring) in vpe_ring_get_rptr() argument
672 struct amdgpu_device *adev = ring->adev; in vpe_ring_get_rptr()
673 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_rptr()
676 if (ring->use_doorbell) { in vpe_ring_get_rptr()
677 rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr); in vpe_ring_get_rptr()
678 dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr); in vpe_ring_get_rptr()
680 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi)); in vpe_ring_get_rptr()
682 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo)); in vpe_ring_get_rptr()
683 dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr); in vpe_ring_get_rptr()
689 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring) in vpe_ring_get_wptr() argument
691 struct amdgpu_device *adev = ring->adev; in vpe_ring_get_wptr()
692 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_get_wptr()
695 if (ring->use_doorbell) { in vpe_ring_get_wptr()
696 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); in vpe_ring_get_wptr()
697 dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr); in vpe_ring_get_wptr()
699 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi)); in vpe_ring_get_wptr()
701 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo)); in vpe_ring_get_wptr()
702 dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr); in vpe_ring_get_wptr()
708 static void vpe_ring_set_wptr(struct amdgpu_ring *ring) in vpe_ring_set_wptr() argument
710 struct amdgpu_device *adev = ring->adev; in vpe_ring_set_wptr()
711 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_set_wptr()
713 if (ring->use_doorbell) { in vpe_ring_set_wptr()
714 dev_dbg(adev->dev, "Using doorbell, \ in vpe_ring_set_wptr()
716 lower_32_bits(ring->wptr) << 2 == 0x%08x, \ in vpe_ring_set_wptr()
717 upper_32_bits(ring->wptr) << 2 == 0x%08x\n", in vpe_ring_set_wptr()
718 ring->wptr_offs, in vpe_ring_set_wptr()
719 lower_32_bits(ring->wptr << 2), in vpe_ring_set_wptr()
720 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
721 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); in vpe_ring_set_wptr()
722 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); in vpe_ring_set_wptr()
723 if (vpe->collaborate_mode) in vpe_ring_set_wptr()
724 WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2); in vpe_ring_set_wptr()
728 for (i = 0; i < vpe->num_instances; i++) { in vpe_ring_set_wptr()
729 dev_dbg(adev->dev, "Not using doorbell, \ in vpe_ring_set_wptr()
732 lower_32_bits(ring->wptr << 2), in vpe_ring_set_wptr()
733 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
734 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo), in vpe_ring_set_wptr()
735 lower_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
736 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi), in vpe_ring_set_wptr()
737 upper_32_bits(ring->wptr << 2)); in vpe_ring_set_wptr()
742 static int vpe_ring_test_ring(struct amdgpu_ring *ring) in vpe_ring_test_ring() argument
744 struct amdgpu_device *adev = ring->adev; in vpe_ring_test_ring()
752 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); in vpe_ring_test_ring()
756 adev->wb.wb[index] = 0; in vpe_ring_test_ring()
757 wb_addr = adev->wb.gpu_addr + (index * 4); in vpe_ring_test_ring()
759 ret = amdgpu_ring_alloc(ring, 4); in vpe_ring_test_ring()
761 dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret); in vpe_ring_test_ring()
765 amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0)); in vpe_ring_test_ring()
766 amdgpu_ring_write(ring, lower_32_bits(wb_addr)); in vpe_ring_test_ring()
767 amdgpu_ring_write(ring, upper_32_bits(wb_addr)); in vpe_ring_test_ring()
768 amdgpu_ring_write(ring, test_pattern); in vpe_ring_test_ring()
769 amdgpu_ring_commit(ring); in vpe_ring_test_ring()
771 for (i = 0; i < adev->usec_timeout; i++) { in vpe_ring_test_ring()
772 if (le32_to_cpu(adev->wb.wb[index]) == test_pattern) in vpe_ring_test_ring()
777 ret = -ETIMEDOUT; in vpe_ring_test_ring()
784 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout) in vpe_ring_test_ib() argument
786 struct amdgpu_device *adev = ring->adev; in vpe_ring_test_ib()
796 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret); in vpe_ring_test_ib()
800 adev->wb.wb[index] = 0; in vpe_ring_test_ib()
801 wb_addr = adev->wb.gpu_addr + (index * 4); in vpe_ring_test_ib()
811 ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0); in vpe_ring_test_ib()
817 ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); in vpe_ring_test_ib()
823 ret = ret ? : -ETIMEDOUT; in vpe_ring_test_ib()
827 ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL; in vpe_ring_test_ib()
838 static void vpe_ring_begin_use(struct amdgpu_ring *ring) in vpe_ring_begin_use() argument
840 struct amdgpu_device *adev = ring->adev; in vpe_ring_begin_use()
841 struct amdgpu_vpe *vpe = &adev->vpe; in vpe_ring_begin_use()
843 cancel_delayed_work_sync(&adev->vpe.idle_work); in vpe_ring_begin_use()
846 if (!vpe->context_started) { in vpe_ring_begin_use()
853 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); in vpe_ring_begin_use()
858 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); in vpe_ring_begin_use()
859 vpe->context_started = true; in vpe_ring_begin_use()
863 static void vpe_ring_end_use(struct amdgpu_ring *ring) in vpe_ring_end_use() argument
865 struct amdgpu_device *adev = ring->adev; in vpe_ring_end_use()
867 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); in vpe_ring_end_use()
905 adev->vpe.ring.funcs = &vpe_ring_funcs; in vpe_set_ring_funcs()