Lines Matching +full:tx +full:- +full:device

1 // SPDX-License-Identifier: GPL-2.0-or-later
9 #include <linux/dma-mapping.h>
15 * struct pq_scribble_page - space to hold throwaway P or Q buffer for
22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
26 #define P(b, d) (b[d-2])
27 #define Q(b, d) (b[d-1])
32 * do_async_gen_syndrome - asynchronously calculate P and/or Q
41 struct dma_async_tx_descriptor *tx = NULL; in do_async_gen_syndrome() local
42 struct dma_device *dma = chan->device; in do_async_gen_syndrome()
43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome()
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome()
45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome()
46 int src_cnt = disks - 2; in do_async_gen_syndrome()
52 submit->flags = flags_orig; in do_async_gen_syndrome()
59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome()
60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome()
61 submit->cb_fn = NULL; in do_async_gen_syndrome()
62 submit->cb_param = NULL; in do_async_gen_syndrome()
64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome()
65 submit->cb_param = cb_param_orig; in do_async_gen_syndrome()
69 if (submit->flags & ASYNC_TX_FENCE) in do_async_gen_syndrome()
76 dma_dest[0] = unmap->addr[disks - 2]; in do_async_gen_syndrome()
77 dma_dest[1] = unmap->addr[disks - 1]; in do_async_gen_syndrome()
78 tx = dma->device_prep_dma_pq(chan, dma_dest, in do_async_gen_syndrome()
79 &unmap->addr[src_off], in do_async_gen_syndrome()
81 &scfs[src_off], unmap->len, in do_async_gen_syndrome()
83 if (likely(tx)) in do_async_gen_syndrome()
85 async_tx_quiesce(&submit->depend_tx); in do_async_gen_syndrome()
89 dma_set_unmap(tx, unmap); in do_async_gen_syndrome()
90 async_tx_submit(chan, tx, submit); in do_async_gen_syndrome()
91 submit->depend_tx = tx; in do_async_gen_syndrome()
94 src_cnt -= pq_src_cnt; in do_async_gen_syndrome()
100 return tx; in do_async_gen_syndrome()
104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
112 int start = -1, stop = disks - 3; in do_sync_gen_syndrome()
114 if (submit->scribble) in do_sync_gen_syndrome()
115 srcs = submit->scribble; in do_sync_gen_syndrome()
121 BUG_ON(i > disks - 3); /* P or Q can't be zero */ in do_sync_gen_syndrome()
126 if (i < disks - 2) { in do_sync_gen_syndrome()
128 if (start == -1) in do_sync_gen_syndrome()
133 if (submit->flags & ASYNC_TX_PQ_XOR_DST) { in do_sync_gen_syndrome()
156 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
167 * both) from the calculation by setting blocks[disks-2] or
168 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
171 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
173 * in the synchronous path and omitted in the hardware-asynchronous
180 int src_cnt = disks - 2; in async_gen_syndrome()
184 struct dma_device *device = chan ? chan->device : NULL; in async_gen_syndrome() local
189 if (device) in async_gen_syndrome()
190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_gen_syndrome()
193 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && in async_gen_syndrome()
194 (src_cnt <= dma_maxpq(device, 0) || in async_gen_syndrome()
195 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && in async_gen_syndrome()
196 is_dma_pq_aligned_offs(device, offsets, disks, len)) { in async_gen_syndrome()
197 struct dma_async_tx_descriptor *tx; in async_gen_syndrome() local
209 unmap->len = len; in async_gen_syndrome()
213 unmap->addr[j] = dma_map_page(device->dev, blocks[i], in async_gen_syndrome()
216 unmap->to_cnt++; in async_gen_syndrome()
224 unmap->bidi_cnt++; in async_gen_syndrome()
226 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), in async_gen_syndrome()
230 unmap->addr[j++] = 0; in async_gen_syndrome()
234 unmap->bidi_cnt++; in async_gen_syndrome()
236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), in async_gen_syndrome()
240 unmap->addr[j++] = 0; in async_gen_syndrome()
244 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); in async_gen_syndrome()
246 return tx; in async_gen_syndrome()
255 async_tx_quiesce(&submit->depend_tx); in async_gen_syndrome()
282 * async_syndrome_val - asynchronously validate a raid6 syndrome
283 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
294 * requires a temporary result buffer and submit->scribble to be
303 struct dma_device *device = chan ? chan->device : NULL; in async_syndrome_val() local
304 struct dma_async_tx_descriptor *tx; in async_syndrome_val() local
306 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; in async_syndrome_val()
311 if (device) in async_syndrome_val()
312 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); in async_syndrome_val()
314 if (unmap && disks <= dma_maxpq(device, 0) && in async_syndrome_val()
315 is_dma_pq_aligned_offs(device, offsets, disks, len)) { in async_syndrome_val()
316 struct device *dev = device->dev; in async_syndrome_val()
323 unmap->len = len; in async_syndrome_val()
324 for (i = 0; i < disks-2; i++) in async_syndrome_val()
326 unmap->addr[j] = dma_map_page(dev, blocks[i], in async_syndrome_val()
330 unmap->to_cnt++; in async_syndrome_val()
342 unmap->addr[j++] = pq[0]; in async_syndrome_val()
343 unmap->to_cnt++; in async_syndrome_val()
352 unmap->addr[j++] = pq[1]; in async_syndrome_val()
353 unmap->to_cnt++; in async_syndrome_val()
356 if (submit->flags & ASYNC_TX_FENCE) in async_syndrome_val()
359 tx = device->device_prep_dma_pq_val(chan, pq, in async_syndrome_val()
360 unmap->addr, in async_syndrome_val()
365 if (likely(tx)) in async_syndrome_val()
367 async_tx_quiesce(&submit->depend_tx); in async_syndrome_val()
371 dma_set_unmap(tx, unmap); in async_syndrome_val()
372 async_tx_submit(chan, tx, submit); in async_syndrome_val()
378 enum async_tx_flags flags_orig = submit->flags; in async_syndrome_val()
379 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in async_syndrome_val()
380 void *scribble = submit->scribble; in async_syndrome_val()
381 void *cb_param_orig = submit->cb_param; in async_syndrome_val()
393 async_tx_quiesce(&submit->depend_tx); in async_syndrome_val()
398 tx = NULL; in async_syndrome_val()
403 tx = async_xor_offs(spare, s_off, in async_syndrome_val()
404 blocks, offsets, disks-2, len, submit); in async_syndrome_val()
405 async_tx_quiesce(&tx); in async_syndrome_val()
416 tx = async_gen_syndrome(blocks, offsets, disks, in async_syndrome_val()
418 async_tx_quiesce(&tx); in async_syndrome_val()
430 submit->cb_fn = cb_fn_orig; in async_syndrome_val()
431 submit->cb_param = cb_param_orig; in async_syndrome_val()
432 submit->flags = flags_orig; in async_syndrome_val()
434 tx = NULL; in async_syndrome_val()
438 return tx; in async_syndrome_val()
451 return -ENOMEM; in async_pq_init()