Lines Matching +full:4 +full:- +full:ring
31 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
36 * Add a DMA fence packet to the ring to write
38 * an interrupt if needed (evergreen-SI).
43 struct radeon_ring *ring = &rdev->ring[fence->ring]; in evergreen_dma_fence_ring_emit() local
44 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in evergreen_dma_fence_ring_emit()
46 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); in evergreen_dma_fence_ring_emit()
47 radeon_ring_write(ring, addr & 0xfffffffc); in evergreen_dma_fence_ring_emit()
48 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); in evergreen_dma_fence_ring_emit()
49 radeon_ring_write(ring, fence->seq); in evergreen_dma_fence_ring_emit()
51 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); in evergreen_dma_fence_ring_emit()
53 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); in evergreen_dma_fence_ring_emit()
54 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); in evergreen_dma_fence_ring_emit()
55 radeon_ring_write(ring, 1); in evergreen_dma_fence_ring_emit()
59 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
64 * Schedule an IB in the DMA ring (evergreen).
69 struct radeon_ring *ring = &rdev->ring[ib->ring]; in evergreen_dma_ring_ib_execute() local
71 if (rdev->wb.enabled) { in evergreen_dma_ring_ib_execute()
72 u32 next_rptr = ring->wptr + 4; in evergreen_dma_ring_ib_execute()
76 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); in evergreen_dma_ring_ib_execute()
77 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); in evergreen_dma_ring_ib_execute()
78 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); in evergreen_dma_ring_ib_execute()
79 radeon_ring_write(ring, next_rptr); in evergreen_dma_ring_ib_execute()
82 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. in evergreen_dma_ring_ib_execute()
85 while ((ring->wptr & 7) != 5) in evergreen_dma_ring_ib_execute()
86 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); in evergreen_dma_ring_ib_execute()
87 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); in evergreen_dma_ring_ib_execute()
88 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); in evergreen_dma_ring_ib_execute()
89 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); in evergreen_dma_ring_ib_execute()
94 * evergreen_copy_dma - copy pages using the DMA engine
102 * Copy GPU paging using the DMA engine (evergreen-cayman).
114 int ring_index = rdev->asic->copy.dma_ring_index; in evergreen_copy_dma()
115 struct radeon_ring *ring = &rdev->ring[ring_index]; in evergreen_copy_dma() local
122 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; in evergreen_copy_dma()
124 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); in evergreen_copy_dma()
132 radeon_sync_rings(rdev, &sync, ring->idx); in evergreen_copy_dma()
138 size_in_dw -= cur_size_in_dw; in evergreen_copy_dma()
139 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); in evergreen_copy_dma()
140 radeon_ring_write(ring, dst_offset & 0xfffffffc); in evergreen_copy_dma()
141 radeon_ring_write(ring, src_offset & 0xfffffffc); in evergreen_copy_dma()
142 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); in evergreen_copy_dma()
143 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); in evergreen_copy_dma()
144 src_offset += cur_size_in_dw * 4; in evergreen_copy_dma()
145 dst_offset += cur_size_in_dw * 4; in evergreen_copy_dma()
148 r = radeon_fence_emit(rdev, &fence, ring->idx); in evergreen_copy_dma()
150 radeon_ring_unlock_undo(rdev, ring); in evergreen_copy_dma()
155 radeon_ring_unlock_commit(rdev, ring, false); in evergreen_copy_dma()
162 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
165 * @ring: radeon_ring structure holding ring information
170 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in evergreen_dma_is_lockup() argument
175 radeon_ring_lockup_update(rdev, ring); in evergreen_dma_is_lockup()
178 return radeon_ring_test_lockup(rdev, ring); in evergreen_dma_is_lockup()