Lines Matching +full:tx +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0-only
37 * __async_tx_find_channel - find a channel to carry out the operation or let
46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel()
50 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) in __async_tx_find_channel()
51 return depend_tx->chan; in __async_tx_find_channel()
59 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
60 * pre-attached.
62 * @tx: the new operation
66 struct dma_async_tx_descriptor *tx) in async_tx_channel_switch() argument
68 struct dma_chan *chan = depend_tx->chan; in async_tx_channel_switch()
69 struct dma_device *device = chan->device; in async_tx_channel_switch() local
74 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { in async_tx_channel_switch()
75 txd_chain(depend_tx, tx); in async_tx_channel_switch()
82 device->device_issue_pending(chan); in async_tx_channel_switch()
89 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) in async_tx_channel_switch()
90 intr_tx = device->device_prep_dma_interrupt(chan, 0); in async_tx_channel_switch()
95 intr_tx->callback = NULL; in async_tx_channel_switch()
96 intr_tx->callback_param = NULL; in async_tx_channel_switch()
100 txd_chain(intr_tx, tx); in async_tx_channel_switch()
113 intr_tx->tx_submit(intr_tx); in async_tx_channel_switch()
116 device->device_issue_pending(chan); in async_tx_channel_switch()
121 tx->tx_submit(tx); in async_tx_channel_switch()
127 * enum submit_disposition - flags for routing an incoming operation
132 * while holding depend_tx->lock we must avoid submitting new operations
143 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, in async_tx_submit() argument
146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit()
148 tx->callback = submit->cb_fn; in async_tx_submit()
149 tx->callback_param = submit->cb_param; in async_tx_submit()
161 txd_parent(tx)); in async_tx_submit()
164 * the setting of ->next when ->parent != NULL in async_tx_submit()
172 if (depend_tx->chan == chan) { in async_tx_submit()
173 txd_chain(depend_tx, tx); in async_tx_submit()
181 if (depend_tx->chan == chan) in async_tx_submit()
192 async_tx_channel_switch(depend_tx, tx); in async_tx_submit()
195 txd_clear_parent(tx); in async_tx_submit()
196 tx->tx_submit(tx); in async_tx_submit()
200 txd_clear_parent(tx); in async_tx_submit()
201 tx->tx_submit(tx); in async_tx_submit()
204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit()
205 async_tx_ack(tx); in async_tx_submit()
213 * async_trigger_callback - schedules the callback function to be run
224 struct dma_device *device; in async_trigger_callback() local
225 struct dma_async_tx_descriptor *tx; in async_trigger_callback() local
226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback()
229 chan = depend_tx->chan; in async_trigger_callback()
230 device = chan->device; in async_trigger_callback()
235 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) in async_trigger_callback()
236 device = NULL; in async_trigger_callback()
238 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; in async_trigger_callback()
240 tx = NULL; in async_trigger_callback()
242 if (tx) { in async_trigger_callback()
245 async_tx_submit(chan, tx, submit); in async_trigger_callback()
250 async_tx_quiesce(&submit->depend_tx); in async_trigger_callback()
255 return tx; in async_trigger_callback()
260 * async_tx_quiesce - ensure tx is complete and freeable upon return
261 * @tx: transaction to quiesce
263 void async_tx_quiesce(struct dma_async_tx_descriptor **tx) in async_tx_quiesce() argument
265 if (*tx) { in async_tx_quiesce()
269 BUG_ON(async_tx_test_ack(*tx)); in async_tx_quiesce()
270 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE) in async_tx_quiesce()
273 async_tx_ack(*tx); in async_tx_quiesce()
274 *tx = NULL; in async_tx_quiesce()