Lines Matching +full:4 +full:- +full:ring

35  * to the 3D engine (ring buffer, IBs, etc.), but the
45 * cayman_dma_get_rptr - get the current read pointer
48 * @ring: radeon ring pointer
53 struct radeon_ring *ring) in cayman_dma_get_rptr() argument
57 if (rdev->wb.enabled) { in cayman_dma_get_rptr()
58 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cayman_dma_get_rptr()
60 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_rptr()
72 * cayman_dma_get_wptr - get the current write pointer
75 * @ring: radeon ring pointer
80 struct radeon_ring *ring) in cayman_dma_get_wptr() argument
84 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_wptr()
93 * cayman_dma_set_wptr - commit the write pointer
96 * @ring: radeon ring pointer
101 struct radeon_ring *ring) in cayman_dma_set_wptr() argument
105 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_set_wptr()
110 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cayman_dma_set_wptr()
114 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
119 * Schedule an IB in the DMA ring (cayman-SI).
124 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cayman_dma_ring_ib_execute() local
125 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; in cayman_dma_ring_ib_execute()
127 if (rdev->wb.enabled) { in cayman_dma_ring_ib_execute()
128 u32 next_rptr = ring->wptr + 4; in cayman_dma_ring_ib_execute()
132 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); in cayman_dma_ring_ib_execute()
133 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); in cayman_dma_ring_ib_execute()
134 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); in cayman_dma_ring_ib_execute()
135 radeon_ring_write(ring, next_rptr); in cayman_dma_ring_ib_execute()
138 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. in cayman_dma_ring_ib_execute()
141 while ((ring->wptr & 7) != 5) in cayman_dma_ring_ib_execute()
142 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); in cayman_dma_ring_ib_execute()
143 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); in cayman_dma_ring_ib_execute()
144 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); in cayman_dma_ring_ib_execute()
145 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); in cayman_dma_ring_ib_execute()
150 * cayman_dma_stop - stop the async dma engines
154 * Stop the async dma engines (cayman-SI).
160 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || in cayman_dma_stop()
161 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) in cayman_dma_stop()
162 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); in cayman_dma_stop()
174 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in cayman_dma_stop()
175 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; in cayman_dma_stop()
179 * cayman_dma_resume - setup and start the async dma engines
183 * Set up the DMA ring buffers and enable them. (cayman-SI).
188 struct radeon_ring *ring; in cayman_dma_resume() local
196 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in cayman_dma_resume()
200 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; in cayman_dma_resume()
208 /* Set ring buffer size in dwords */ in cayman_dma_resume()
209 rb_bufsz = order_base_2(ring->ring_size / 4); in cayman_dma_resume()
216 /* Initialize the ring buffer's read and write pointers */ in cayman_dma_resume()
222 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); in cayman_dma_resume()
224 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); in cayman_dma_resume()
226 if (rdev->wb.enabled) in cayman_dma_resume()
229 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); in cayman_dma_resume()
242 ring->wptr = 0; in cayman_dma_resume()
243 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); in cayman_dma_resume()
247 ring->ready = true; in cayman_dma_resume()
249 r = radeon_ring_test(rdev, ring->idx, ring); in cayman_dma_resume()
251 ring->ready = false; in cayman_dma_resume()
256 if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) || in cayman_dma_resume()
257 (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX)) in cayman_dma_resume()
258 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); in cayman_dma_resume()
264 * cayman_dma_fini - tear down the async dma engines
268 * Stop the async dma engines and free the rings (cayman-SI).
273 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); in cayman_dma_fini()
274 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); in cayman_dma_fini()
278 * cayman_dma_is_lockup - Check if the DMA engine is locked up
281 * @ring: radeon_ring structure holding ring information
286 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in cayman_dma_is_lockup() argument
291 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_is_lockup()
297 radeon_ring_lockup_update(rdev, ring); in cayman_dma_is_lockup()
300 return radeon_ring_test_lockup(rdev, ring); in cayman_dma_is_lockup()
304 * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
326 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, in cayman_dma_vm_copy_pages()
328 ib->ptr[ib->length_dw++] = lower_32_bits(pe); in cayman_dma_vm_copy_pages()
329 ib->ptr[ib->length_dw++] = lower_32_bits(src); in cayman_dma_vm_copy_pages()
330 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in cayman_dma_vm_copy_pages()
331 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; in cayman_dma_vm_copy_pages()
333 pe += ndw * 4; in cayman_dma_vm_copy_pages()
334 src += ndw * 4; in cayman_dma_vm_copy_pages()
335 count -= ndw / 2; in cayman_dma_vm_copy_pages()
340 * cayman_dma_vm_write_pages - update PTEs by writing them manually
366 /* for non-physically contiguous pages (system) */ in cayman_dma_vm_write_pages()
367 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, in cayman_dma_vm_write_pages()
369 ib->ptr[ib->length_dw++] = pe; in cayman_dma_vm_write_pages()
370 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in cayman_dma_vm_write_pages()
371 for (; ndw > 0; ndw -= 2, --count, pe += 8) { in cayman_dma_vm_write_pages()
381 ib->ptr[ib->length_dw++] = value; in cayman_dma_vm_write_pages()
382 ib->ptr[ib->length_dw++] = upper_32_bits(value); in cayman_dma_vm_write_pages()
388 * cayman_dma_vm_set_pages - update the page tables using the DMA
420 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); in cayman_dma_vm_set_pages()
421 ib->ptr[ib->length_dw++] = pe; /* dst addr */ in cayman_dma_vm_set_pages()
422 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in cayman_dma_vm_set_pages()
423 ib->ptr[ib->length_dw++] = flags; /* mask */ in cayman_dma_vm_set_pages()
424 ib->ptr[ib->length_dw++] = 0; in cayman_dma_vm_set_pages()
425 ib->ptr[ib->length_dw++] = value; /* value */ in cayman_dma_vm_set_pages()
426 ib->ptr[ib->length_dw++] = upper_32_bits(value); in cayman_dma_vm_set_pages()
427 ib->ptr[ib->length_dw++] = incr; /* increment size */ in cayman_dma_vm_set_pages()
428 ib->ptr[ib->length_dw++] = 0; in cayman_dma_vm_set_pages()
430 pe += ndw * 4; in cayman_dma_vm_set_pages()
432 count -= ndw / 2; in cayman_dma_vm_set_pages()
437 * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
444 while (ib->length_dw & 0x7) in cayman_dma_vm_pad_ib()
445 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); in cayman_dma_vm_pad_ib()
448 void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, in cayman_dma_vm_flush() argument
451 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); in cayman_dma_vm_flush()
452 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2)); in cayman_dma_vm_flush()
453 radeon_ring_write(ring, pd_addr >> 12); in cayman_dma_vm_flush()
456 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); in cayman_dma_vm_flush()
457 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); in cayman_dma_vm_flush()
458 radeon_ring_write(ring, 1); in cayman_dma_vm_flush()
460 /* bits 0-7 are the VM contexts0-7 */ in cayman_dma_vm_flush()
461 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); in cayman_dma_vm_flush()
462 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); in cayman_dma_vm_flush()
463 radeon_ring_write(ring, 1 << vm_id); in cayman_dma_vm_flush()
466 radeon_ring_write(ring, DMA_SRBM_READ_PACKET); in cayman_dma_vm_flush()
467 radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); in cayman_dma_vm_flush()
468 radeon_ring_write(ring, 0); /* mask */ in cayman_dma_vm_flush()
469 radeon_ring_write(ring, 0); /* value */ in cayman_dma_vm_flush()