Lines Matching +full:4 +full:- +full:ring
446 return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); in sdma_v4_0_get_reg_offset()
448 return (adev->reg_offset[SDMA1_HWIP][0][0] + offset); in sdma_v4_0_get_reg_offset()
450 return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
452 return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
453 case 4: in sdma_v4_0_get_reg_offset()
454 return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
456 return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
458 return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
460 return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); in sdma_v4_0_get_reg_offset()
478 case 4: in sdma_v4_0_seq_to_irq_id()
489 return -EINVAL; in sdma_v4_0_seq_to_irq_id()
504 return 4; in sdma_v4_0_irq_id_to_seq()
514 return -EINVAL; in sdma_v4_0_irq_id_to_seq()
520 case IP_VERSION(4, 0, 0): in sdma_v4_0_init_golden_registers()
528 case IP_VERSION(4, 0, 1): in sdma_v4_0_init_golden_registers()
536 case IP_VERSION(4, 2, 0): in sdma_v4_0_init_golden_registers()
547 case IP_VERSION(4, 2, 2): in sdma_v4_0_init_golden_registers()
552 case IP_VERSION(4, 4, 0): in sdma_v4_0_init_golden_registers()
557 case IP_VERSION(4, 1, 0): in sdma_v4_0_init_golden_registers()
558 case IP_VERSION(4, 1, 1): in sdma_v4_0_init_golden_registers()
562 if (adev->apu_flags & AMD_APU_IS_RAVEN2) in sdma_v4_0_init_golden_registers()
571 case IP_VERSION(4, 1, 2): in sdma_v4_0_init_golden_registers()
590 case IP_VERSION(4, 0, 0): in sdma_v4_0_setup_ulv()
591 if (adev->pdev->device == 0x6860) in sdma_v4_0_setup_ulv()
594 case IP_VERSION(4, 2, 0): in sdma_v4_0_setup_ulv()
595 if (adev->pdev->device == 0x66a1) in sdma_v4_0_setup_ulv()
602 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_setup_ulv()
612 * sdma_v4_0_init_microcode - load ucode images from disk
627 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_init_microcode()
629 IP_VERSION(4, 2, 2) || in sdma_v4_0_init_microcode()
631 IP_VERSION(4, 4, 0)) { in sdma_v4_0_init_microcode()
647 * sdma_v4_0_ring_get_rptr - get the current read pointer
649 * @ring: amdgpu ring pointer
653 static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring) in sdma_v4_0_ring_get_rptr() argument
658 rptr = ((u64 *)ring->rptr_cpu_addr); in sdma_v4_0_ring_get_rptr()
665 * sdma_v4_0_ring_get_wptr - get the current write pointer
667 * @ring: amdgpu ring pointer
671 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) in sdma_v4_0_ring_get_wptr() argument
673 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_ring_get_wptr()
676 if (ring->use_doorbell) { in sdma_v4_0_ring_get_wptr()
678 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr)); in sdma_v4_0_ring_get_wptr()
681 wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI); in sdma_v4_0_ring_get_wptr()
683 wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR); in sdma_v4_0_ring_get_wptr()
685 ring->me, wptr); in sdma_v4_0_ring_get_wptr()
692 * sdma_v4_0_ring_set_wptr - commit the write pointer
694 * @ring: amdgpu ring pointer
698 static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) in sdma_v4_0_ring_set_wptr() argument
700 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_ring_set_wptr()
703 if (ring->use_doorbell) { in sdma_v4_0_ring_set_wptr()
704 u64 *wb = (u64 *)ring->wptr_cpu_addr; in sdma_v4_0_ring_set_wptr()
706 DRM_DEBUG("Using doorbell -- " in sdma_v4_0_ring_set_wptr()
708 "lower_32_bits(ring->wptr << 2) == 0x%08x " in sdma_v4_0_ring_set_wptr()
709 "upper_32_bits(ring->wptr << 2) == 0x%08x\n", in sdma_v4_0_ring_set_wptr()
710 ring->wptr_offs, in sdma_v4_0_ring_set_wptr()
711 lower_32_bits(ring->wptr << 2), in sdma_v4_0_ring_set_wptr()
712 upper_32_bits(ring->wptr << 2)); in sdma_v4_0_ring_set_wptr()
714 WRITE_ONCE(*wb, (ring->wptr << 2)); in sdma_v4_0_ring_set_wptr()
716 ring->doorbell_index, ring->wptr << 2); in sdma_v4_0_ring_set_wptr()
717 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); in sdma_v4_0_ring_set_wptr()
719 DRM_DEBUG("Not using doorbell -- " in sdma_v4_0_ring_set_wptr()
722 ring->me, in sdma_v4_0_ring_set_wptr()
723 lower_32_bits(ring->wptr << 2), in sdma_v4_0_ring_set_wptr()
724 ring->me, in sdma_v4_0_ring_set_wptr()
725 upper_32_bits(ring->wptr << 2)); in sdma_v4_0_ring_set_wptr()
726 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR, in sdma_v4_0_ring_set_wptr()
727 lower_32_bits(ring->wptr << 2)); in sdma_v4_0_ring_set_wptr()
728 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI, in sdma_v4_0_ring_set_wptr()
729 upper_32_bits(ring->wptr << 2)); in sdma_v4_0_ring_set_wptr()
734 * sdma_v4_0_page_ring_get_wptr - get the current write pointer
736 * @ring: amdgpu ring pointer
740 static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring) in sdma_v4_0_page_ring_get_wptr() argument
742 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_page_ring_get_wptr()
745 if (ring->use_doorbell) { in sdma_v4_0_page_ring_get_wptr()
747 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr)); in sdma_v4_0_page_ring_get_wptr()
749 wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI); in sdma_v4_0_page_ring_get_wptr()
751 wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR); in sdma_v4_0_page_ring_get_wptr()
758 * sdma_v4_0_page_ring_set_wptr - commit the write pointer
760 * @ring: amdgpu ring pointer
764 static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring) in sdma_v4_0_page_ring_set_wptr() argument
766 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_page_ring_set_wptr()
768 if (ring->use_doorbell) { in sdma_v4_0_page_ring_set_wptr()
769 u64 *wb = (u64 *)ring->wptr_cpu_addr; in sdma_v4_0_page_ring_set_wptr()
772 WRITE_ONCE(*wb, (ring->wptr << 2)); in sdma_v4_0_page_ring_set_wptr()
773 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); in sdma_v4_0_page_ring_set_wptr()
775 uint64_t wptr = ring->wptr << 2; in sdma_v4_0_page_ring_set_wptr()
777 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR, in sdma_v4_0_page_ring_set_wptr()
779 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI, in sdma_v4_0_page_ring_set_wptr()
784 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in sdma_v4_0_ring_insert_nop() argument
786 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_insert_nop()
790 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_insert_nop()
791 amdgpu_ring_write(ring, ring->funcs->nop | in sdma_v4_0_ring_insert_nop()
792 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); in sdma_v4_0_ring_insert_nop()
794 amdgpu_ring_write(ring, ring->funcs->nop); in sdma_v4_0_ring_insert_nop()
798 * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
800 * @ring: amdgpu ring pointer
805 * Schedule an IB in the DMA ring (VEGA10).
807 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, in sdma_v4_0_ring_emit_ib() argument
815 sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); in sdma_v4_0_ring_emit_ib()
817 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | in sdma_v4_0_ring_emit_ib()
820 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); in sdma_v4_0_ring_emit_ib()
821 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); in sdma_v4_0_ring_emit_ib()
822 amdgpu_ring_write(ring, ib->length_dw); in sdma_v4_0_ring_emit_ib()
823 amdgpu_ring_write(ring, 0); in sdma_v4_0_ring_emit_ib()
824 amdgpu_ring_write(ring, 0); in sdma_v4_0_ring_emit_ib()
828 static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring, in sdma_v4_0_wait_reg_mem() argument
834 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | in sdma_v4_0_wait_reg_mem()
840 amdgpu_ring_write(ring, addr0); in sdma_v4_0_wait_reg_mem()
841 amdgpu_ring_write(ring, addr1); in sdma_v4_0_wait_reg_mem()
844 amdgpu_ring_write(ring, addr0 << 2); in sdma_v4_0_wait_reg_mem()
845 amdgpu_ring_write(ring, addr1 << 2); in sdma_v4_0_wait_reg_mem()
847 amdgpu_ring_write(ring, ref); /* reference */ in sdma_v4_0_wait_reg_mem()
848 amdgpu_ring_write(ring, mask); /* mask */ in sdma_v4_0_wait_reg_mem()
849 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | in sdma_v4_0_wait_reg_mem()
854 * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
856 * @ring: amdgpu ring pointer
858 * Emit an hdp flush packet on the requested DMA ring.
860 static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) in sdma_v4_0_ring_emit_hdp_flush() argument
862 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_ring_emit_hdp_flush()
864 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; in sdma_v4_0_ring_emit_hdp_flush()
866 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; in sdma_v4_0_ring_emit_hdp_flush()
868 sdma_v4_0_wait_reg_mem(ring, 0, 1, in sdma_v4_0_ring_emit_hdp_flush()
869 adev->nbio.funcs->get_hdp_flush_done_offset(adev), in sdma_v4_0_ring_emit_hdp_flush()
870 adev->nbio.funcs->get_hdp_flush_req_offset(adev), in sdma_v4_0_ring_emit_hdp_flush()
875 * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
877 * @ring: amdgpu ring pointer
882 * Add a DMA fence packet to the ring to write
886 static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, in sdma_v4_0_ring_emit_fence() argument
891 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); in sdma_v4_0_ring_emit_fence()
894 amdgpu_ring_write(ring, lower_32_bits(addr)); in sdma_v4_0_ring_emit_fence()
895 amdgpu_ring_write(ring, upper_32_bits(addr)); in sdma_v4_0_ring_emit_fence()
896 amdgpu_ring_write(ring, lower_32_bits(seq)); in sdma_v4_0_ring_emit_fence()
900 addr += 4; in sdma_v4_0_ring_emit_fence()
901 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); in sdma_v4_0_ring_emit_fence()
904 amdgpu_ring_write(ring, lower_32_bits(addr)); in sdma_v4_0_ring_emit_fence()
905 amdgpu_ring_write(ring, upper_32_bits(addr)); in sdma_v4_0_ring_emit_fence()
906 amdgpu_ring_write(ring, upper_32_bits(seq)); in sdma_v4_0_ring_emit_fence()
910 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); in sdma_v4_0_ring_emit_fence()
911 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); in sdma_v4_0_ring_emit_fence()
916 * sdma_v4_0_gfx_enable - enable the gfx async dma engines
920 * control the gfx async dma ring buffers (VEGA10).
927 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_gfx_enable()
938 * sdma_v4_0_rlc_stop - stop the compute async dma engines
950 * sdma_v4_0_page_stop - stop the page async dma engines
954 * Stop the page async dma ring buffers (VEGA10).
961 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_page_stop()
974 * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
1010 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_ctx_switch_enable()
1027 IP_VERSION(4, 2, 2) && in sdma_v4_0_ctx_switch_enable()
1028 adev->sdma.instance[i].fw_version >= 14) in sdma_v4_0_ctx_switch_enable()
1037 * sdma_v4_0_enable - stop the async dma engines
1052 if (adev->sdma.has_page_queue) in sdma_v4_0_enable()
1056 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_enable()
1064 * sdma_v4_0_rb_cntl - get parameters for rb_cntl
1066 static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) in sdma_v4_0_rb_cntl() argument
1068 /* Set ring buffer size in dwords */ in sdma_v4_0_rb_cntl()
1069 uint32_t rb_bufsz = order_base_2(ring->ring_size / 4); in sdma_v4_0_rb_cntl()
1081 * sdma_v4_0_gfx_resume - setup and start the async dma engines
1086 * Set up the gfx DMA ring buffers and enable them (VEGA10).
1091 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; in sdma_v4_0_gfx_resume() local
1098 rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); in sdma_v4_0_gfx_resume()
1101 /* Initialize the ring buffer's read and write pointers */ in sdma_v4_0_gfx_resume()
1109 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); in sdma_v4_0_gfx_resume()
1111 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); in sdma_v4_0_gfx_resume()
1116 WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8); in sdma_v4_0_gfx_resume()
1117 WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40); in sdma_v4_0_gfx_resume()
1119 ring->wptr = 0; in sdma_v4_0_gfx_resume()
1128 ring->use_doorbell); in sdma_v4_0_gfx_resume()
1131 OFFSET, ring->doorbell_index); in sdma_v4_0_gfx_resume()
1135 sdma_v4_0_ring_set_wptr(ring); in sdma_v4_0_gfx_resume()
1141 wptr_gpu_addr = ring->wptr_gpu_addr; in sdma_v4_0_gfx_resume()
1166 * sdma_v4_0_page_resume - setup and start the async dma engines
1171 * Set up the page DMA ring buffers and enable them (VEGA10).
1176 struct amdgpu_ring *ring = &adev->sdma.instance[i].page; in sdma_v4_0_page_resume() local
1183 rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); in sdma_v4_0_page_resume()
1186 /* Initialize the ring buffer's read and write pointers */ in sdma_v4_0_page_resume()
1194 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); in sdma_v4_0_page_resume()
1196 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); in sdma_v4_0_page_resume()
1201 WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8); in sdma_v4_0_page_resume()
1202 WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40); in sdma_v4_0_page_resume()
1204 ring->wptr = 0; in sdma_v4_0_page_resume()
1213 ring->use_doorbell); in sdma_v4_0_page_resume()
1216 OFFSET, ring->doorbell_index); in sdma_v4_0_page_resume()
1221 sdma_v4_0_page_ring_set_wptr(ring); in sdma_v4_0_page_resume()
1227 wptr_gpu_addr = ring->wptr_gpu_addr; in sdma_v4_0_page_resume()
1256 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) { in sdma_v4_1_update_power_gating()
1288 /* Configure hold time to filter in-valid power on/off request. Use default right now */ in sdma_v4_1_init_power_gating()
1301 if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) in sdma_v4_0_init_pg()
1305 case IP_VERSION(4, 1, 0): in sdma_v4_0_init_pg()
1306 case IP_VERSION(4, 1, 1): in sdma_v4_0_init_pg()
1307 case IP_VERSION(4, 1, 2): in sdma_v4_0_init_pg()
1317 * sdma_v4_0_rlc_resume - setup and start the async dma engines
1332 * sdma_v4_0_load_microcode - load the sDMA ME ucode
1337 * Returns 0 for success, -EINVAL if the ucode is not available.
1349 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_load_microcode()
1350 if (!adev->sdma.instance[i].fw) in sdma_v4_0_load_microcode()
1351 return -EINVAL; in sdma_v4_0_load_microcode()
1353 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v4_0_load_microcode()
1354 amdgpu_ucode_print_sdma_hdr(&hdr->header); in sdma_v4_0_load_microcode()
1355 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in sdma_v4_0_load_microcode()
1358 (adev->sdma.instance[i].fw->data + in sdma_v4_0_load_microcode()
1359 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in sdma_v4_0_load_microcode()
1368 adev->sdma.instance[i].fw_version); in sdma_v4_0_load_microcode()
1375 * sdma_v4_0_start - setup and start the async dma engines
1384 struct amdgpu_ring *ring; in sdma_v4_0_start() local
1392 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { in sdma_v4_0_start()
1400 /* enable sdma ring preemption */ in sdma_v4_0_start()
1405 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1410 if (adev->sdma.has_page_queue) in sdma_v4_0_start()
1435 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1436 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_start()
1438 r = amdgpu_ring_test_helper(ring); in sdma_v4_0_start()
1442 if (adev->sdma.has_page_queue) { in sdma_v4_0_start()
1443 struct amdgpu_ring *page = &adev->sdma.instance[i].page; in sdma_v4_0_start()
1455 * sdma_v4_0_ring_test_ring - simple async dma engine test
1457 * @ring: amdgpu_ring structure holding ring information
1463 static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) in sdma_v4_0_ring_test_ring() argument
1465 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_ring_test_ring()
1476 gpu_addr = adev->wb.gpu_addr + (index * 4); in sdma_v4_0_ring_test_ring()
1478 adev->wb.wb[index] = cpu_to_le32(tmp); in sdma_v4_0_ring_test_ring()
1480 r = amdgpu_ring_alloc(ring, 5); in sdma_v4_0_ring_test_ring()
1484 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | in sdma_v4_0_ring_test_ring()
1486 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); in sdma_v4_0_ring_test_ring()
1487 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); in sdma_v4_0_ring_test_ring()
1488 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); in sdma_v4_0_ring_test_ring()
1489 amdgpu_ring_write(ring, 0xDEADBEEF); in sdma_v4_0_ring_test_ring()
1490 amdgpu_ring_commit(ring); in sdma_v4_0_ring_test_ring()
1492 for (i = 0; i < adev->usec_timeout; i++) { in sdma_v4_0_ring_test_ring()
1493 tmp = le32_to_cpu(adev->wb.wb[index]); in sdma_v4_0_ring_test_ring()
1499 if (i >= adev->usec_timeout) in sdma_v4_0_ring_test_ring()
1500 r = -ETIMEDOUT; in sdma_v4_0_ring_test_ring()
1508 * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
1510 * @ring: amdgpu_ring structure holding ring information
1513 * Test a simple IB in the DMA ring (VEGA10).
1516 static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) in sdma_v4_0_ring_test_ib() argument
1518 struct amdgpu_device *adev = ring->adev; in sdma_v4_0_ring_test_ib()
1530 gpu_addr = adev->wb.gpu_addr + (index * 4); in sdma_v4_0_ring_test_ib()
1532 adev->wb.wb[index] = cpu_to_le32(tmp); in sdma_v4_0_ring_test_ib()
1544 ib.ptr[4] = 0xDEADBEEF; in sdma_v4_0_ring_test_ib()
1550 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); in sdma_v4_0_ring_test_ib()
1556 r = -ETIMEDOUT; in sdma_v4_0_ring_test_ib()
1561 tmp = le32_to_cpu(adev->wb.wb[index]); in sdma_v4_0_ring_test_ib()
1565 r = -EINVAL; in sdma_v4_0_ring_test_ib()
1577 * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
1592 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | in sdma_v4_0_vm_copy_pte()
1594 ib->ptr[ib->length_dw++] = bytes - 1; in sdma_v4_0_vm_copy_pte()
1595 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ in sdma_v4_0_vm_copy_pte()
1596 ib->ptr[ib->length_dw++] = lower_32_bits(src); in sdma_v4_0_vm_copy_pte()
1597 ib->ptr[ib->length_dw++] = upper_32_bits(src); in sdma_v4_0_vm_copy_pte()
1598 ib->ptr[ib->length_dw++] = lower_32_bits(pe); in sdma_v4_0_vm_copy_pte()
1599 ib->ptr[ib->length_dw++] = upper_32_bits(pe); in sdma_v4_0_vm_copy_pte()
1604 * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
1620 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | in sdma_v4_0_vm_write_pte()
1622 ib->ptr[ib->length_dw++] = lower_32_bits(pe); in sdma_v4_0_vm_write_pte()
1623 ib->ptr[ib->length_dw++] = upper_32_bits(pe); in sdma_v4_0_vm_write_pte()
1624 ib->ptr[ib->length_dw++] = ndw - 1; in sdma_v4_0_vm_write_pte()
1625 for (; ndw > 0; ndw -= 2) { in sdma_v4_0_vm_write_pte()
1626 ib->ptr[ib->length_dw++] = lower_32_bits(value); in sdma_v4_0_vm_write_pte()
1627 ib->ptr[ib->length_dw++] = upper_32_bits(value); in sdma_v4_0_vm_write_pte()
1633 * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1650 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); in sdma_v4_0_vm_set_pte_pde()
1651 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ in sdma_v4_0_vm_set_pte_pde()
1652 ib->ptr[ib->length_dw++] = upper_32_bits(pe); in sdma_v4_0_vm_set_pte_pde()
1653 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ in sdma_v4_0_vm_set_pte_pde()
1654 ib->ptr[ib->length_dw++] = upper_32_bits(flags); in sdma_v4_0_vm_set_pte_pde()
1655 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ in sdma_v4_0_vm_set_pte_pde()
1656 ib->ptr[ib->length_dw++] = upper_32_bits(addr); in sdma_v4_0_vm_set_pte_pde()
1657 ib->ptr[ib->length_dw++] = incr; /* increment size */ in sdma_v4_0_vm_set_pte_pde()
1658 ib->ptr[ib->length_dw++] = 0; in sdma_v4_0_vm_set_pte_pde()
1659 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ in sdma_v4_0_vm_set_pte_pde()
1663 * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
1665 * @ring: amdgpu_ring structure holding ring information
1668 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) in sdma_v4_0_ring_pad_ib() argument
1670 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_pad_ib()
1674 pad_count = (-ib->length_dw) & 7; in sdma_v4_0_ring_pad_ib()
1676 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_pad_ib()
1677 ib->ptr[ib->length_dw++] = in sdma_v4_0_ring_pad_ib()
1679 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); in sdma_v4_0_ring_pad_ib()
1681 ib->ptr[ib->length_dw++] = in sdma_v4_0_ring_pad_ib()
1687 * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
1689 * @ring: amdgpu_ring pointer
1693 static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) in sdma_v4_0_ring_emit_pipeline_sync() argument
1695 uint32_t seq = ring->fence_drv.sync_seq; in sdma_v4_0_ring_emit_pipeline_sync()
1696 uint64_t addr = ring->fence_drv.gpu_addr; in sdma_v4_0_ring_emit_pipeline_sync()
1699 sdma_v4_0_wait_reg_mem(ring, 1, 0, in sdma_v4_0_ring_emit_pipeline_sync()
1702 seq, 0xffffffff, 4); in sdma_v4_0_ring_emit_pipeline_sync()
1707 * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1709 * @ring: amdgpu_ring pointer
1716 static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, in sdma_v4_0_ring_emit_vm_flush() argument
1719 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); in sdma_v4_0_ring_emit_vm_flush()
1722 static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring, in sdma_v4_0_ring_emit_wreg() argument
1725 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | in sdma_v4_0_ring_emit_wreg()
1727 amdgpu_ring_write(ring, reg); in sdma_v4_0_ring_emit_wreg()
1728 amdgpu_ring_write(ring, val); in sdma_v4_0_ring_emit_wreg()
1731 static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, in sdma_v4_0_ring_emit_reg_wait() argument
1734 sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); in sdma_v4_0_ring_emit_reg_wait()
1739 uint fw_version = adev->sdma.instance[0].fw_version; in sdma_v4_0_fw_support_paging_queue()
1742 case IP_VERSION(4, 0, 0): in sdma_v4_0_fw_support_paging_queue()
1744 case IP_VERSION(4, 0, 1): in sdma_v4_0_fw_support_paging_queue()
1747 case IP_VERSION(4, 2, 0): in sdma_v4_0_fw_support_paging_queue()
1764 if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 0, 0)) && in sdma_v4_0_early_init()
1766 adev->sdma.has_page_queue = false; in sdma_v4_0_early_init()
1768 adev->sdma.has_page_queue = true; in sdma_v4_0_early_init()
1797 struct amdgpu_ring *ring; in sdma_v4_0_sw_init() local
1804 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1807 &adev->sdma.trap_irq); in sdma_v4_0_sw_init()
1813 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1816 &adev->sdma.ecc_irq); in sdma_v4_0_sw_init()
1822 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1825 &adev->sdma.vm_hole_irq); in sdma_v4_0_sw_init()
1831 &adev->sdma.doorbell_invalid_irq); in sdma_v4_0_sw_init()
1837 &adev->sdma.pool_timeout_irq); in sdma_v4_0_sw_init()
1843 &adev->sdma.srbm_write_irq); in sdma_v4_0_sw_init()
1848 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1849 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_sw_init()
1850 ring->ring_obj = NULL; in sdma_v4_0_sw_init()
1851 ring->use_doorbell = true; in sdma_v4_0_sw_init()
1854 ring->use_doorbell?"true":"false"); in sdma_v4_0_sw_init()
1857 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; in sdma_v4_0_sw_init()
1864 IP_VERSION(4, 2, 2) && in sdma_v4_0_sw_init()
1866 ring->vm_hub = AMDGPU_MMHUB1(0); in sdma_v4_0_sw_init()
1868 ring->vm_hub = AMDGPU_MMHUB0(0); in sdma_v4_0_sw_init()
1870 sprintf(ring->name, "sdma%d", i); in sdma_v4_0_sw_init()
1871 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1877 if (adev->sdma.has_page_queue) { in sdma_v4_0_sw_init()
1878 ring = &adev->sdma.instance[i].page; in sdma_v4_0_sw_init()
1879 ring->ring_obj = NULL; in sdma_v4_0_sw_init()
1880 ring->use_doorbell = true; in sdma_v4_0_sw_init()
1886 IP_VERSION(4, 0, 0) && in sdma_v4_0_sw_init()
1888 IP_VERSION(4, 2, 0)) { in sdma_v4_0_sw_init()
1889 ring->doorbell_index = in sdma_v4_0_sw_init()
1890 adev->doorbell_index.sdma_engine[i] << 1; in sdma_v4_0_sw_init()
1891 ring->doorbell_index += 0x400; in sdma_v4_0_sw_init()
1896 ring->doorbell_index = in sdma_v4_0_sw_init()
1897 (adev->doorbell_index.sdma_engine[i] + 1) << 1; in sdma_v4_0_sw_init()
1901 IP_VERSION(4, 2, 2) && in sdma_v4_0_sw_init()
1903 ring->vm_hub = AMDGPU_MMHUB1(0); in sdma_v4_0_sw_init()
1905 ring->vm_hub = AMDGPU_MMHUB0(0); in sdma_v4_0_sw_init()
1907 sprintf(ring->name, "page%d", i); in sdma_v4_0_sw_init()
1908 r = amdgpu_ring_init(adev, ring, 1024, in sdma_v4_0_sw_init()
1909 &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1918 dev_err(adev->dev, "Failed to initialize sdma ras block!\n"); in sdma_v4_0_sw_init()
1919 return -EINVAL; in sdma_v4_0_sw_init()
1923 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v4_0_sw_init()
1925 adev->sdma.ip_dump = ptr; in sdma_v4_0_sw_init()
1937 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_fini()
1938 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v4_0_sw_fini()
1939 if (adev->sdma.has_page_queue) in sdma_v4_0_sw_fini()
1940 amdgpu_ring_fini(&adev->sdma.instance[i].page); in sdma_v4_0_sw_fini()
1943 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 2, 2) || in sdma_v4_0_sw_fini()
1944 amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 0)) in sdma_v4_0_sw_fini()
1949 kfree(adev->sdma.ip_dump); in sdma_v4_0_sw_fini()
1958 if (adev->flags & AMD_IS_APU) in sdma_v4_0_hw_init()
1976 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_hw_fini()
1977 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, in sdma_v4_0_hw_fini()
1985 if (adev->flags & AMD_IS_APU) in sdma_v4_0_hw_fini()
1996 if (adev->in_s0ix) { in sdma_v4_0_suspend()
2009 if (adev->in_s0ix) { in sdma_v4_0_resume()
2023 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_is_idle()
2039 for (i = 0; i < adev->usec_timeout; i++) { in sdma_v4_0_wait_for_idle()
2040 for (j = 0; j < adev->sdma.num_instances; j++) { in sdma_v4_0_wait_for_idle()
2045 if (j == adev->sdma.num_instances) in sdma_v4_0_wait_for_idle()
2049 return -ETIMEDOUT; in sdma_v4_0_wait_for_idle()
2081 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_process_trap_irq()
2085 switch (entry->ring_id) { in sdma_v4_0_process_trap_irq()
2087 amdgpu_fence_process(&adev->sdma.instance[instance].ring); in sdma_v4_0_process_trap_irq()
2091 IP_VERSION(4, 2, 0)) in sdma_v4_0_process_trap_irq()
2092 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2099 IP_VERSION(4, 2, 0)) in sdma_v4_0_process_trap_irq()
2100 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2112 /* When “Full RAS” is enabled, the per-IP interrupt sources should in sdma_v4_0_process_ras_data_cb()
2119 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_process_ras_data_cb()
2137 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_process_illegal_inst_irq()
2141 switch (entry->ring_id) { in sdma_v4_0_process_illegal_inst_irq()
2143 drm_sched_fault(&adev->sdma.instance[instance].ring.sched); in sdma_v4_0_process_illegal_inst_irq()
2171 instance = sdma_v4_0_irq_id_to_seq(entry->client_id); in sdma_v4_0_print_iv_entry()
2172 if (instance < 0 || instance >= adev->sdma.num_instances) { in sdma_v4_0_print_iv_entry()
2173 dev_err(adev->dev, "sdma instance invalid %d\n", instance); in sdma_v4_0_print_iv_entry()
2174 return -EINVAL; in sdma_v4_0_print_iv_entry()
2177 addr = (u64)entry->src_data[0] << 12; in sdma_v4_0_print_iv_entry()
2178 addr |= ((u64)entry->src_data[1] & 0xf) << 44; in sdma_v4_0_print_iv_entry()
2180 dev_dbg_ratelimited(adev->dev, in sdma_v4_0_print_iv_entry()
2181 "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u pasid:%u\n", in sdma_v4_0_print_iv_entry()
2182 instance, addr, entry->src_id, entry->ring_id, entry->vmid, in sdma_v4_0_print_iv_entry()
2183 entry->pasid); in sdma_v4_0_print_iv_entry()
2185 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); in sdma_v4_0_print_iv_entry()
2187 dev_dbg_ratelimited(adev->dev, in sdma_v4_0_print_iv_entry()
2189 task_info->process_name, task_info->tgid, in sdma_v4_0_print_iv_entry()
2190 task_info->task_name, task_info->pid); in sdma_v4_0_print_iv_entry()
2201 dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n"); in sdma_v4_0_process_vm_hole_irq()
2210 dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); in sdma_v4_0_process_doorbell_invalid_irq()
2219 dev_dbg_ratelimited(adev->dev, in sdma_v4_0_process_pool_timeout_irq()
2229 dev_dbg_ratelimited(adev->dev, in sdma_v4_0_process_srbm_write_irq()
2230 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); in sdma_v4_0_process_srbm_write_irq()
2242 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { in sdma_v4_0_update_medium_grain_clock_gating()
2243 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2257 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2281 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { in sdma_v4_0_update_medium_grain_light_sleep()
2282 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2283 /* 1-not override: enable sdma mem light sleep */ in sdma_v4_0_update_medium_grain_light_sleep()
2290 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2291 /* 0-override:disable sdma mem light sleep */ in sdma_v4_0_update_medium_grain_light_sleep()
2321 case IP_VERSION(4, 1, 0): in sdma_v4_0_set_powergating_state()
2322 case IP_VERSION(4, 1, 1): in sdma_v4_0_set_powergating_state()
2323 case IP_VERSION(4, 1, 2): in sdma_v4_0_set_powergating_state()
2360 if (!adev->sdma.ip_dump) in sdma_v4_0_print_ip_state()
2363 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); in sdma_v4_0_print_ip_state()
2364 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_print_ip_state()
2369 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_0[j].reg_name, in sdma_v4_0_print_ip_state()
2370 adev->sdma.ip_dump[instance_offset + j]); in sdma_v4_0_print_ip_state()
2381 if (!adev->sdma.ip_dump) in sdma_v4_0_dump_ip_state()
2385 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_dump_ip_state()
2388 adev->sdma.ip_dump[instance_offset + j] = in sdma_v4_0_dump_ip_state()
2483 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_ring_funcs()
2484 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; in sdma_v4_0_set_ring_funcs()
2485 adev->sdma.instance[i].ring.me = i; in sdma_v4_0_set_ring_funcs()
2486 if (adev->sdma.has_page_queue) { in sdma_v4_0_set_ring_funcs()
2487 adev->sdma.instance[i].page.funcs = in sdma_v4_0_set_ring_funcs()
2489 adev->sdma.instance[i].page.me = i; in sdma_v4_0_set_ring_funcs()
2526 adev->sdma.trap_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2527 adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2528 /*For Arcturus and Aldebaran, add another 4 irq handler*/ in sdma_v4_0_set_irq_funcs()
2529 switch (adev->sdma.num_instances) { in sdma_v4_0_set_irq_funcs()
2532 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2533 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2534 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2535 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2540 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; in sdma_v4_0_set_irq_funcs()
2541 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs; in sdma_v4_0_set_irq_funcs()
2542 adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs; in sdma_v4_0_set_irq_funcs()
2543 adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs; in sdma_v4_0_set_irq_funcs()
2544 adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs; in sdma_v4_0_set_irq_funcs()
2545 adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs; in sdma_v4_0_set_irq_funcs()
2546 adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs; in sdma_v4_0_set_irq_funcs()
2550 * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
2568 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | in sdma_v4_0_emit_copy_buffer()
2571 ib->ptr[ib->length_dw++] = byte_count - 1; in sdma_v4_0_emit_copy_buffer()
2572 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ in sdma_v4_0_emit_copy_buffer()
2573 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); in sdma_v4_0_emit_copy_buffer()
2574 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); in sdma_v4_0_emit_copy_buffer()
2575 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); in sdma_v4_0_emit_copy_buffer()
2576 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); in sdma_v4_0_emit_copy_buffer()
2580 * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
2594 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); in sdma_v4_0_emit_fill_buffer()
2595 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); in sdma_v4_0_emit_fill_buffer()
2596 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); in sdma_v4_0_emit_fill_buffer()
2597 ib->ptr[ib->length_dw++] = src_data; in sdma_v4_0_emit_fill_buffer()
2598 ib->ptr[ib->length_dw++] = byte_count - 1; in sdma_v4_0_emit_fill_buffer()
2613 adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; in sdma_v4_0_set_buffer_funcs()
2614 if (adev->sdma.has_page_queue) in sdma_v4_0_set_buffer_funcs()
2615 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; in sdma_v4_0_set_buffer_funcs()
2617 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v4_0_set_buffer_funcs()
2633 adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; in sdma_v4_0_set_vm_pte_funcs()
2634 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_vm_pte_funcs()
2635 if (adev->sdma.has_page_queue) in sdma_v4_0_set_vm_pte_funcs()
2636 sched = &adev->sdma.instance[i].page.sched; in sdma_v4_0_set_vm_pte_funcs()
2638 sched = &adev->sdma.instance[i].ring.sched; in sdma_v4_0_set_vm_pte_funcs()
2639 adev->vm_manager.vm_pte_scheds[i] = sched; in sdma_v4_0_set_vm_pte_funcs()
2641 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v4_0_set_vm_pte_funcs()
2680 /* err_data->ce_count should be initialized to 0 in sdma_v4_0_query_ras_error_count_by_instance()
2682 err_data->ce_count += sec_count; in sdma_v4_0_query_ras_error_count_by_instance()
2685 err_data->ue_count = 0; in sdma_v4_0_query_ras_error_count_by_instance()
2694 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_query_ras_error_count()
2696 dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i); in sdma_v4_0_query_ras_error_count()
2708 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v4_0_reset_ras_error_count()
2728 case IP_VERSION(4, 2, 0): in sdma_v4_0_set_ras_funcs()
2729 case IP_VERSION(4, 2, 2): in sdma_v4_0_set_ras_funcs()
2730 adev->sdma.ras = &sdma_v4_0_ras; in sdma_v4_0_set_ras_funcs()
2732 case IP_VERSION(4, 4, 0): in sdma_v4_0_set_ras_funcs()
2733 adev->sdma.ras = &sdma_v4_4_ras; in sdma_v4_0_set_ras_funcs()
2742 .major = 4,