Lines Matching refs:txd

64 	struct dma_async_tx_descriptor	txd;  member
199 iowrite32(td_desc->txd.phys, td_chan->membase + in __td_start_dma()
209 iowrite32(td_desc->txd.phys, td_chan->membase + in __td_start_dma()
222 struct dma_async_tx_descriptor *txd; in __td_finish() local
231 txd = &td_desc->txd; in __td_finish()
234 txd->cookie); in __td_finish()
243 dma_cookie_complete(txd); in __td_finish()
246 dmaengine_desc_get_callback(txd, &cb); in __td_finish()
250 dma_descriptor_unmap(txd); in __td_finish()
288 __func__, td_desc->txd.cookie); in __td_start_next()
294 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) in td_tx_submit() argument
296 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, in td_tx_submit()
297 txd); in td_tx_submit()
298 struct timb_dma_chan *td_chan = container_of(txd->chan, in td_tx_submit()
303 cookie = dma_cookie_assign(txd); in td_tx_submit()
306 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, in td_tx_submit()
307 txd->cookie); in td_tx_submit()
311 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", in td_tx_submit()
312 txd->cookie); in td_tx_submit()
338 dma_async_tx_descriptor_init(&td_desc->txd, chan); in td_alloc_init_desc()
339 td_desc->txd.tx_submit = td_tx_submit; in td_alloc_init_desc()
340 td_desc->txd.flags = DMA_CTRL_ACK; in td_alloc_init_desc()
342 td_desc->txd.phys = dma_map_single(chan2dmadev(chan), in td_alloc_init_desc()
345 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); in td_alloc_init_desc()
362 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); in td_free_desc()
363 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, in td_free_desc()
388 if (async_tx_test_ack(&td_desc->txd)) { in td_desc_get()
539 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, in td_prep_slave_sg()
542 return &td_desc->txd; in td_prep_slave_sg()