1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4   */
5  #ifndef LINUX_DMAENGINE_H
6  #define LINUX_DMAENGINE_H
7  
8  #include <linux/device.h>
9  #include <linux/err.h>
10  #include <linux/uio.h>
11  #include <linux/bug.h>
12  #include <linux/scatterlist.h>
13  #include <linux/bitmap.h>
14  #include <linux/types.h>
15  #include <asm/page.h>
16  
17  /**
18   * typedef dma_cookie_t - an opaque DMA cookie
19   *
20   * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
21   */
22  typedef s32 dma_cookie_t;
23  #define DMA_MIN_COOKIE	1
24  
dma_submit_error(dma_cookie_t cookie)25  static inline int dma_submit_error(dma_cookie_t cookie)
26  {
27  	return cookie < 0 ? cookie : 0;
28  }
29  
30  /**
31   * enum dma_status - DMA transaction status
32   * @DMA_COMPLETE: transaction completed
33   * @DMA_IN_PROGRESS: transaction not yet processed
34   * @DMA_PAUSED: transaction is paused
35   * @DMA_ERROR: transaction failed
36   */
37  enum dma_status {
38  	DMA_COMPLETE,
39  	DMA_IN_PROGRESS,
40  	DMA_PAUSED,
41  	DMA_ERROR,
42  	DMA_OUT_OF_ORDER,
43  };
44  
45  /**
46   * enum dma_transaction_type - DMA transaction types/indexes
47   *
48   * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
49   * automatically set as dma devices are registered.
50   */
51  enum dma_transaction_type {
52  	DMA_MEMCPY,
53  	DMA_XOR,
54  	DMA_PQ,
55  	DMA_XOR_VAL,
56  	DMA_PQ_VAL,
57  	DMA_MEMSET,
58  	DMA_MEMSET_SG,
59  	DMA_INTERRUPT,
60  	DMA_PRIVATE,
61  	DMA_ASYNC_TX,
62  	DMA_SLAVE,
63  	DMA_CYCLIC,
64  	DMA_INTERLEAVE,
65  	DMA_COMPLETION_NO_ORDER,
66  	DMA_REPEAT,
67  	DMA_LOAD_EOT,
68  /* last transaction type for creation of the capabilities mask */
69  	DMA_TX_TYPE_END,
70  };
71  
72  /**
73   * enum dma_transfer_direction - dma transfer mode and direction indicator
74   * @DMA_MEM_TO_MEM: Async/Memcpy mode
75   * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
76   * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
77   * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
78   */
79  enum dma_transfer_direction {
80  	DMA_MEM_TO_MEM,
81  	DMA_MEM_TO_DEV,
82  	DMA_DEV_TO_MEM,
83  	DMA_DEV_TO_DEV,
84  	DMA_TRANS_NONE,
85  };
86  
87  /**
88   * Interleaved Transfer Request
89   * ----------------------------
90   * A chunk is collection of contiguous bytes to be transferred.
91   * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
92   * ICGs may or may not change between chunks.
93   * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
94   *  that when repeated an integral number of times, specifies the transfer.
95   * A transfer template is specification of a Frame, the number of times
96   *  it is to be repeated and other per-transfer attributes.
97   *
98   * Practically, a client driver would have ready a template for each
99   *  type of transfer it is going to need during its lifetime and
100   *  set only 'src_start' and 'dst_start' before submitting the requests.
101   *
102   *
103   *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
104   *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
105   *
106   *    ==  Chunk size
107   *    ... ICG
108   */
109  
110  /**
111   * struct data_chunk - Element of scatter-gather list that makes a frame.
112   * @size: Number of bytes to read from source.
113   *	  size_dst := fn(op, size_src), so doesn't mean much for destination.
114   * @icg: Number of bytes to jump after last src/dst address of this
115   *	 chunk and before first src/dst address for next chunk.
116   *	 Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
117   *	 Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
118   * @dst_icg: Number of bytes to jump after last dst address of this
119   *	 chunk and before the first dst address for next chunk.
120   *	 Ignored if dst_inc is true and dst_sgl is false.
121   * @src_icg: Number of bytes to jump after last src address of this
122   *	 chunk and before the first src address for next chunk.
123   *	 Ignored if src_inc is true and src_sgl is false.
124   */
125  struct data_chunk {
126  	size_t size;
127  	size_t icg;
128  	size_t dst_icg;
129  	size_t src_icg;
130  };
131  
132  /**
133   * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
134   *	 and attributes.
135   * @src_start: Bus address of source for the first chunk.
136   * @dst_start: Bus address of destination for the first chunk.
137   * @dir: Specifies the type of Source and Destination.
138   * @src_inc: If the source address increments after reading from it.
139   * @dst_inc: If the destination address increments after writing to it.
140   * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
141   *		Otherwise, source is read contiguously (icg ignored).
142   *		Ignored if src_inc is false.
143   * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
144   *		Otherwise, destination is filled contiguously (icg ignored).
145   *		Ignored if dst_inc is false.
146   * @numf: Number of frames in this template.
147   * @frame_size: Number of chunks in a frame i.e, size of sgl[].
148   * @sgl: Array of {chunk,icg} pairs that make up a frame.
149   */
150  struct dma_interleaved_template {
151  	dma_addr_t src_start;
152  	dma_addr_t dst_start;
153  	enum dma_transfer_direction dir;
154  	bool src_inc;
155  	bool dst_inc;
156  	bool src_sgl;
157  	bool dst_sgl;
158  	size_t numf;
159  	size_t frame_size;
160  	struct data_chunk sgl[];
161  };
162  
163  /**
164   * struct dma_vec - DMA vector
165   * @addr: Bus address of the start of the vector
166   * @len: Length in bytes of the DMA vector
167   */
168  struct dma_vec {
169  	dma_addr_t addr;
170  	size_t len;
171  };
172  
173  /**
174   * enum dma_ctrl_flags - DMA flags to augment operation preparation,
175   *  control completion, and communicate status.
176   * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
177   *  this transaction
178   * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
179   *  acknowledges receipt, i.e. has a chance to establish any dependency
180   *  chains
181   * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
182   * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
183   * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
184   *  sources that were the result of a previous operation, in the case of a PQ
185   *  operation it continues the calculation with new sources
186   * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
187   *  on the result of this operation
188   * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
189   *  cleared or freed
190   * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
191   *  data and the descriptor should be in different format from normal
192   *  data descriptors.
193   * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
194   *  repeated when it ends until a transaction is issued on the same channel
195   *  with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
196   *  interleaved transactions and is ignored for all other transaction types.
197   * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
198   *  active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
199   *  repeated transaction ends. Not setting this flag when the previously queued
200   *  transaction is marked with DMA_PREP_REPEAT will cause the new transaction
201   *  to never be processed and stay in the issued queue forever. The flag is
202   *  ignored if the previous transaction is not a repeated transaction.
203   */
204  enum dma_ctrl_flags {
205  	DMA_PREP_INTERRUPT = (1 << 0),
206  	DMA_CTRL_ACK = (1 << 1),
207  	DMA_PREP_PQ_DISABLE_P = (1 << 2),
208  	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
209  	DMA_PREP_CONTINUE = (1 << 4),
210  	DMA_PREP_FENCE = (1 << 5),
211  	DMA_CTRL_REUSE = (1 << 6),
212  	DMA_PREP_CMD = (1 << 7),
213  	DMA_PREP_REPEAT = (1 << 8),
214  	DMA_PREP_LOAD_EOT = (1 << 9),
215  };
216  
217  /**
218   * enum sum_check_bits - bit position of pq_check_flags
219   */
220  enum sum_check_bits {
221  	SUM_CHECK_P = 0,
222  	SUM_CHECK_Q = 1,
223  };
224  
225  /**
226   * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
227   * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
228   * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
229   */
230  enum sum_check_flags {
231  	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
232  	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
233  };
234  
235  
236  /**
237   * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
238   * See linux/cpumask.h
239   */
240  typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
241  
242  /**
243   * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
244   * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
245   *  client driver and it is attached (via the dmaengine_desc_attach_metadata()
246   *  helper) to the descriptor.
247   *
248   * Client drivers interested to use this mode can follow:
249   * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
250   *   1. prepare the descriptor (dmaengine_prep_*)
251   *	construct the metadata in the client's buffer
252   *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
253   *	descriptor
254   *   3. submit the transfer
255   * - DMA_DEV_TO_MEM:
256   *   1. prepare the descriptor (dmaengine_prep_*)
257   *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
258   *	descriptor
259   *   3. submit the transfer
260   *   4. when the transfer is completed, the metadata should be available in the
261   *	attached buffer
262   *
263   * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
264   *  driver. The client driver can ask for the pointer, maximum size and the
265   *  currently used size of the metadata and can directly update or read it.
266   *  dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
267   *  provided as helper functions.
268   *
269   *  Note: the metadata area for the descriptor is no longer valid after the
270   *  transfer has been completed (valid up to the point when the completion
271   *  callback returns if used).
272   *
273   * Client drivers interested to use this mode can follow:
274   * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
275   *   1. prepare the descriptor (dmaengine_prep_*)
276   *   2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
277   *	metadata area
278   *   3. update the metadata at the pointer
279   *   4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the amount
280   *	of data the client has placed into the metadata buffer
281   *   5. submit the transfer
282   * - DMA_DEV_TO_MEM:
283   *   1. prepare the descriptor (dmaengine_prep_*)
284   *   2. submit the transfer
285   *   3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
286   *	pointer to the engine's metadata area
287   *   4. Read out the metadata from the pointer
288   *
289   * Note: the two mode is not compatible and clients must use one mode for a
290   * descriptor.
291   */
292  enum dma_desc_metadata_mode {
293  	DESC_METADATA_NONE = 0,
294  	DESC_METADATA_CLIENT = BIT(0),
295  	DESC_METADATA_ENGINE = BIT(1),
296  };
297  
298  /**
299   * struct dma_chan_percpu - the per-CPU part of struct dma_chan
300   * @memcpy_count: transaction counter
301   * @bytes_transferred: byte counter
302   */
303  struct dma_chan_percpu {
304  	/* stats */
305  	unsigned long memcpy_count;
306  	unsigned long bytes_transferred;
307  };
308  
309  /**
310   * struct dma_router - DMA router structure
311   * @dev: pointer to the DMA router device
312   * @route_free: function to be called when the route can be disconnected
313   */
314  struct dma_router {
315  	struct device *dev;
316  	void (*route_free)(struct device *dev, void *route_data);
317  };
318  
319  /**
320   * struct dma_chan - devices supply DMA channels, clients use them
321   * @device: ptr to the dma device who supplies this channel, always !%NULL
322   * @slave: ptr to the device using this channel
323   * @cookie: last cookie value returned to client
324   * @completed_cookie: last completed cookie for this channel
325   * @chan_id: channel ID for sysfs
326   * @dev: class device for sysfs
327   * @name: backlink name for sysfs
328   * @dbg_client_name: slave name for debugfs in format:
329   *	dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
330   * @device_node: used to add this to the device chan list
331   * @local: per-cpu pointer to a struct dma_chan_percpu
332   * @client_count: how many clients are using this channel
333   * @table_count: number of appearances in the mem-to-mem allocation table
334   * @router: pointer to the DMA router structure
335   * @route_data: channel specific data for the router
336   * @private: private data for certain client-channel associations
337   */
338  struct dma_chan {
339  	struct dma_device *device;
340  	struct device *slave;
341  	dma_cookie_t cookie;
342  	dma_cookie_t completed_cookie;
343  
344  	/* sysfs */
345  	int chan_id;
346  	struct dma_chan_dev *dev;
347  	const char *name;
348  #ifdef CONFIG_DEBUG_FS
349  	char *dbg_client_name;
350  #endif
351  
352  	struct list_head device_node;
353  	struct dma_chan_percpu __percpu *local;
354  	int client_count;
355  	int table_count;
356  
357  	/* DMA router */
358  	struct dma_router *router;
359  	void *route_data;
360  
361  	void *private;
362  };
363  
364  /**
365   * struct dma_chan_dev - relate sysfs device node to backing channel device
366   * @chan: driver channel device
367   * @device: sysfs device
368   * @dev_id: parent dma_device dev_id
369   * @chan_dma_dev: The channel is using custom/different dma-mapping
370   * compared to the parent dma_device
371   */
372  struct dma_chan_dev {
373  	struct dma_chan *chan;
374  	struct device device;
375  	int dev_id;
376  	bool chan_dma_dev;
377  };
378  
379  /**
380   * enum dma_slave_buswidth - defines bus width of the DMA slave
381   * device, source or target buses
382   */
383  enum dma_slave_buswidth {
384  	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
385  	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
386  	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
387  	DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
388  	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
389  	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
390  	DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
391  	DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
392  	DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
393  	DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
394  };
395  
396  /**
397   * struct dma_slave_config - dma slave channel runtime config
398   * @direction: whether the data shall go in or out on this slave
399   * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
400   * legal values. DEPRECATED, drivers should use the direction argument
401   * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
402   * the dir field in the dma_interleaved_template structure.
403   * @src_addr: this is the physical address where DMA slave data
404   * should be read (RX), if the source is memory this argument is
405   * ignored.
406   * @dst_addr: this is the physical address where DMA slave data
407   * should be written (TX), if the destination is memory this argument
408   * is ignored.
409   * @src_addr_width: this is the width in bytes of the source (RX)
410   * register where DMA data shall be read. If the source
411   * is memory this may be ignored depending on architecture.
412   * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
413   * @dst_addr_width: same as src_addr_width but for destination
414   * target (TX) mutatis mutandis.
415   * @src_maxburst: the maximum number of words (note: words, as in
416   * units of the src_addr_width member, not bytes) that can be sent
417   * in one burst to the device. Typically something like half the
418   * FIFO depth on I/O peripherals so you don't overflow it. This
419   * may or may not be applicable on memory sources.
420   * @dst_maxburst: same as src_maxburst but for destination target
421   * mutatis mutandis.
422   * @src_port_window_size: The length of the register area in words the data need
423   * to be accessed on the device side. It is only used for devices which is using
424   * an area instead of a single register to receive the data. Typically the DMA
425   * loops in this area in order to transfer the data.
426   * @dst_port_window_size: same as src_port_window_size but for the destination
427   * port.
428   * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
429   * with 'true' if peripheral should be flow controller. Direction will be
430   * selected at Runtime.
431   * @peripheral_config: peripheral configuration for programming peripheral
432   * for dmaengine transfer
433   * @peripheral_size: peripheral configuration buffer size
434   *
435   * This struct is passed in as configuration data to a DMA engine
436   * in order to set up a certain channel for DMA transport at runtime.
437   * The DMA device/engine has to provide support for an additional
438   * callback in the dma_device structure, device_config and this struct
439   * will then be passed in as an argument to the function.
440   *
441   * The rationale for adding configuration information to this struct is as
442   * follows: if it is likely that more than one DMA slave controllers in
443   * the world will support the configuration option, then make it generic.
444   * If not: if it is fixed so that it be sent in static from the platform
445   * data, then prefer to do that.
446   */
447  struct dma_slave_config {
448  	enum dma_transfer_direction direction;
449  	phys_addr_t src_addr;
450  	phys_addr_t dst_addr;
451  	enum dma_slave_buswidth src_addr_width;
452  	enum dma_slave_buswidth dst_addr_width;
453  	u32 src_maxburst;
454  	u32 dst_maxburst;
455  	u32 src_port_window_size;
456  	u32 dst_port_window_size;
457  	bool device_fc;
458  	void *peripheral_config;
459  	size_t peripheral_size;
460  };
461  
462  /**
463   * enum dma_residue_granularity - Granularity of the reported transfer residue
464   * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
465   *  DMA channel is only able to tell whether a descriptor has been completed or
466   *  not, which means residue reporting is not supported by this channel. The
467   *  residue field of the dma_tx_state field will always be 0.
468   * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
469   *  completed segment of the transfer (For cyclic transfers this is after each
470   *  period). This is typically implemented by having the hardware generate an
471   *  interrupt after each transferred segment and then the drivers updates the
472   *  outstanding residue by the size of the segment. Another possibility is if
473   *  the hardware supports scatter-gather and the segment descriptor has a field
474   *  which gets set after the segment has been completed. The driver then counts
475   *  the number of segments without the flag set to compute the residue.
476   * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
477   *  burst. This is typically only supported if the hardware has a progress
478   *  register of some sort (E.g. a register with the current read/write address
479   *  or a register with the amount of bursts/beats/bytes that have been
480   *  transferred or still need to be transferred).
481   */
482  enum dma_residue_granularity {
483  	DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
484  	DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
485  	DMA_RESIDUE_GRANULARITY_BURST = 2,
486  };
487  
488  /**
489   * struct dma_slave_caps - expose capabilities of a slave channel only
490   * @src_addr_widths: bit mask of src addr widths the channel supports.
491   *	Width is specified in bytes, e.g. for a channel supporting
492   *	a width of 4 the mask should have BIT(4) set.
493   * @dst_addr_widths: bit mask of dst addr widths the channel supports
494   * @directions: bit mask of slave directions the channel supports.
495   *	Since the enum dma_transfer_direction is not defined as bit flag for
496   *	each type, the dma controller should set BIT(<TYPE>) and same
497   *	should be checked by controller as well
498   * @min_burst: min burst capability per-transfer
499   * @max_burst: max burst capability per-transfer
500   * @max_sg_burst: max number of SG list entries executed in a single burst
501   *	DMA tansaction with no software intervention for reinitialization.
502   *	Zero value means unlimited number of entries.
503   * @cmd_pause: true, if pause is supported (i.e. for reading residue or
504   *	       for resume later)
505   * @cmd_resume: true, if resume is supported
506   * @cmd_terminate: true, if terminate cmd is supported
507   * @residue_granularity: granularity of the reported transfer residue
508   * @descriptor_reuse: if a descriptor can be reused by client and
509   * resubmitted multiple times
510   */
511  struct dma_slave_caps {
512  	u32 src_addr_widths;
513  	u32 dst_addr_widths;
514  	u32 directions;
515  	u32 min_burst;
516  	u32 max_burst;
517  	u32 max_sg_burst;
518  	bool cmd_pause;
519  	bool cmd_resume;
520  	bool cmd_terminate;
521  	enum dma_residue_granularity residue_granularity;
522  	bool descriptor_reuse;
523  };
524  
dma_chan_name(struct dma_chan * chan)525  static inline const char *dma_chan_name(struct dma_chan *chan)
526  {
527  	return dev_name(&chan->dev->device);
528  }
529  
530  /**
531   * typedef dma_filter_fn - callback filter for dma_request_channel
532   * @chan: channel to be reviewed
533   * @filter_param: opaque parameter passed through dma_request_channel
534   *
535   * When this optional parameter is specified in a call to dma_request_channel a
536   * suitable channel is passed to this routine for further dispositioning before
537   * being returned.  Where 'suitable' indicates a non-busy channel that
538   * satisfies the given capability mask.  It returns 'true' to indicate that the
539   * channel is suitable.
540   */
541  typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
542  
543  typedef void (*dma_async_tx_callback)(void *dma_async_param);
544  
545  enum dmaengine_tx_result {
546  	DMA_TRANS_NOERROR = 0,		/* SUCCESS */
547  	DMA_TRANS_READ_FAILED,		/* Source DMA read failed */
548  	DMA_TRANS_WRITE_FAILED,		/* Destination DMA write failed */
549  	DMA_TRANS_ABORTED,		/* Op never submitted / aborted */
550  };
551  
552  struct dmaengine_result {
553  	enum dmaengine_tx_result result;
554  	u32 residue;
555  };
556  
557  typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
558  				const struct dmaengine_result *result);
559  
560  struct dmaengine_unmap_data {
561  #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
562  	u16 map_cnt;
563  #else
564  	u8 map_cnt;
565  #endif
566  	u8 to_cnt;
567  	u8 from_cnt;
568  	u8 bidi_cnt;
569  	struct device *dev;
570  	struct kref kref;
571  	size_t len;
572  	dma_addr_t addr[];
573  };
574  
575  struct dma_async_tx_descriptor;
576  
577  struct dma_descriptor_metadata_ops {
578  	int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
579  		      size_t len);
580  
581  	void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
582  			 size_t *payload_len, size_t *max_len);
583  	int (*set_len)(struct dma_async_tx_descriptor *desc,
584  		       size_t payload_len);
585  };
586  
587  /**
588   * struct dma_async_tx_descriptor - async transaction descriptor
589   * ---dma generic offload fields---
590   * @cookie: tracking cookie for this transaction, set to -EBUSY if
591   *	this tx is sitting on a dependency list
592   * @flags: flags to augment operation preparation, control completion, and
593   *	communicate status
594   * @phys: physical address of the descriptor
595   * @chan: target channel for this operation
596   * @tx_submit: accept the descriptor, assign ordered cookie and mark the
597   * descriptor pending. To be pushed on .issue_pending() call
598   * @callback: routine to call after this operation is complete
599   * @callback_param: general parameter to pass to the callback routine
600   * @desc_metadata_mode: core managed metadata mode to protect mixed use of
601   *	DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
602   *	DESC_METADATA_NONE
603   * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
604   *	DMA driver if metadata mode is supported with the descriptor
605   * ---async_tx api specific fields---
606   * @next: at completion submit this descriptor
607   * @parent: pointer to the next level up in the dependency chain
608   * @lock: protect the parent and next pointers
609   */
610  struct dma_async_tx_descriptor {
611  	dma_cookie_t cookie;
612  	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
613  	dma_addr_t phys;
614  	struct dma_chan *chan;
615  	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
616  	int (*desc_free)(struct dma_async_tx_descriptor *tx);
617  	dma_async_tx_callback callback;
618  	dma_async_tx_callback_result callback_result;
619  	void *callback_param;
620  	struct dmaengine_unmap_data *unmap;
621  	enum dma_desc_metadata_mode desc_metadata_mode;
622  	struct dma_descriptor_metadata_ops *metadata_ops;
623  #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
624  	struct dma_async_tx_descriptor *next;
625  	struct dma_async_tx_descriptor *parent;
626  	spinlock_t lock;
627  #endif
628  };
629  
630  #ifdef CONFIG_DMA_ENGINE
dma_set_unmap(struct dma_async_tx_descriptor * tx,struct dmaengine_unmap_data * unmap)631  static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
632  				 struct dmaengine_unmap_data *unmap)
633  {
634  	kref_get(&unmap->kref);
635  	tx->unmap = unmap;
636  }
637  
638  struct dmaengine_unmap_data *
639  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
640  void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
641  #else
dma_set_unmap(struct dma_async_tx_descriptor * tx,struct dmaengine_unmap_data * unmap)642  static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
643  				 struct dmaengine_unmap_data *unmap)
644  {
645  }
646  static inline struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)647  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
648  {
649  	return NULL;
650  }
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)651  static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
652  {
653  }
654  #endif
655  
dma_descriptor_unmap(struct dma_async_tx_descriptor * tx)656  static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
657  {
658  	if (!tx->unmap)
659  		return;
660  
661  	dmaengine_unmap_put(tx->unmap);
662  	tx->unmap = NULL;
663  }
664  
665  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
txd_lock(struct dma_async_tx_descriptor * txd)666  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
667  {
668  }
txd_unlock(struct dma_async_tx_descriptor * txd)669  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
670  {
671  }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)672  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
673  {
674  	BUG();
675  }
txd_clear_parent(struct dma_async_tx_descriptor * txd)676  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
677  {
678  }
txd_clear_next(struct dma_async_tx_descriptor * txd)679  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
680  {
681  }
txd_next(struct dma_async_tx_descriptor * txd)682  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
683  {
684  	return NULL;
685  }
txd_parent(struct dma_async_tx_descriptor * txd)686  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
687  {
688  	return NULL;
689  }
690  
691  #else
txd_lock(struct dma_async_tx_descriptor * txd)692  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
693  {
694  	spin_lock_bh(&txd->lock);
695  }
txd_unlock(struct dma_async_tx_descriptor * txd)696  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
697  {
698  	spin_unlock_bh(&txd->lock);
699  }
txd_chain(struct dma_async_tx_descriptor * txd,struct dma_async_tx_descriptor * next)700  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
701  {
702  	txd->next = next;
703  	next->parent = txd;
704  }
txd_clear_parent(struct dma_async_tx_descriptor * txd)705  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
706  {
707  	txd->parent = NULL;
708  }
txd_clear_next(struct dma_async_tx_descriptor * txd)709  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
710  {
711  	txd->next = NULL;
712  }
txd_parent(struct dma_async_tx_descriptor * txd)713  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
714  {
715  	return txd->parent;
716  }
txd_next(struct dma_async_tx_descriptor * txd)717  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
718  {
719  	return txd->next;
720  }
721  #endif
722  
723  /**
724   * struct dma_tx_state - filled in to report the status of
725   * a transfer.
726   * @last: last completed DMA cookie
727   * @used: last issued DMA cookie (i.e. the one in progress)
728   * @residue: the remaining number of bytes left to transmit
729   *	on the selected transfer for states DMA_IN_PROGRESS and
730   *	DMA_PAUSED if this is implemented in the driver, else 0
731   * @in_flight_bytes: amount of data in bytes cached by the DMA.
732   */
733  struct dma_tx_state {
734  	dma_cookie_t last;
735  	dma_cookie_t used;
736  	u32 residue;
737  	u32 in_flight_bytes;
738  };
739  
740  /**
741   * enum dmaengine_alignment - defines alignment of the DMA async tx
742   * buffers
743   */
744  enum dmaengine_alignment {
745  	DMAENGINE_ALIGN_1_BYTE = 0,
746  	DMAENGINE_ALIGN_2_BYTES = 1,
747  	DMAENGINE_ALIGN_4_BYTES = 2,
748  	DMAENGINE_ALIGN_8_BYTES = 3,
749  	DMAENGINE_ALIGN_16_BYTES = 4,
750  	DMAENGINE_ALIGN_32_BYTES = 5,
751  	DMAENGINE_ALIGN_64_BYTES = 6,
752  	DMAENGINE_ALIGN_128_BYTES = 7,
753  	DMAENGINE_ALIGN_256_BYTES = 8,
754  };
755  
756  /**
757   * struct dma_slave_map - associates slave device and it's slave channel with
758   * parameter to be used by a filter function
759   * @devname: name of the device
760   * @slave: slave channel name
761   * @param: opaque parameter to pass to struct dma_filter.fn
762   */
763  struct dma_slave_map {
764  	const char *devname;
765  	const char *slave;
766  	void *param;
767  };
768  
769  /**
770   * struct dma_filter - information for slave device/channel to filter_fn/param
771   * mapping
772   * @fn: filter function callback
773   * @mapcnt: number of slave device/channel in the map
774   * @map: array of channel to filter mapping data
775   */
776  struct dma_filter {
777  	dma_filter_fn fn;
778  	int mapcnt;
779  	const struct dma_slave_map *map;
780  };
781  
782  /**
783   * struct dma_device - info on the entity supplying DMA services
784   * @ref: reference is taken and put every time a channel is allocated or freed
785   * @chancnt: how many DMA channels are supported
786   * @privatecnt: how many DMA channels are requested by dma_request_channel
787   * @channels: the list of struct dma_chan
788   * @global_node: list_head for global dma_device_list
789   * @filter: information for device/slave to filter function/param mapping
790   * @cap_mask: one or more dma_capability flags
791   * @desc_metadata_modes: supported metadata modes by the DMA device
792   * @max_xor: maximum number of xor sources, 0 if no capability
793   * @max_pq: maximum number of PQ sources and PQ-continue capability
794   * @copy_align: alignment shift for memcpy operations
795   * @xor_align: alignment shift for xor operations
796   * @pq_align: alignment shift for pq operations
797   * @fill_align: alignment shift for memset operations
798   * @dev_id: unique device ID
799   * @dev: struct device reference for dma mapping api
800   * @owner: owner module (automatically set based on the provided dev)
801   * @chan_ida: unique channel ID
802   * @src_addr_widths: bit mask of src addr widths the device supports
803   *	Width is specified in bytes, e.g. for a device supporting
804   *	a width of 4 the mask should have BIT(4) set.
805   * @dst_addr_widths: bit mask of dst addr widths the device supports
806   * @directions: bit mask of slave directions the device supports.
807   *	Since the enum dma_transfer_direction is not defined as bit flag for
808   *	each type, the dma controller should set BIT(<TYPE>) and same
809   *	should be checked by controller as well
810   * @min_burst: min burst capability per-transfer
811   * @max_burst: max burst capability per-transfer
812   * @max_sg_burst: max number of SG list entries executed in a single burst
813   *	DMA tansaction with no software intervention for reinitialization.
814   *	Zero value means unlimited number of entries.
815   * @descriptor_reuse: a submitted transfer can be resubmitted after completion
816   * @residue_granularity: granularity of the transfer residue reported
817   *	by tx_status
818   * @device_alloc_chan_resources: allocate resources and return the
819   *	number of allocated descriptors
820   * @device_router_config: optional callback for DMA router configuration
821   * @device_free_chan_resources: release DMA channel's resources
822   * @device_prep_dma_memcpy: prepares a memcpy operation
823   * @device_prep_dma_xor: prepares a xor operation
824   * @device_prep_dma_xor_val: prepares a xor validation operation
825   * @device_prep_dma_pq: prepares a pq operation
826   * @device_prep_dma_pq_val: prepares a pqzero_sum operation
827   * @device_prep_dma_memset: prepares a memset operation
828   * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
829   * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
830   * @device_prep_slave_sg: prepares a slave dma operation
831   * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
832   *	The function takes a buffer of size buf_len. The callback function will
833   *	be called after period_len bytes have been transferred.
834   * @device_prep_interleaved_dma: Transfer expression in a generic way.
835   * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
836   * @device_caps: May be used to override the generic DMA slave capabilities
837   *	with per-channel specific ones
838   * @device_config: Pushes a new configuration to a channel, return 0 or an error
839   *	code
840   * @device_pause: Pauses any transfer happening on a channel. Returns
841   *	0 or an error code
842   * @device_resume: Resumes any transfer on a channel previously
843   *	paused. Returns 0 or an error code
844   * @device_terminate_all: Aborts all transfers on a channel. Returns 0
845   *	or an error code
846   * @device_synchronize: Synchronizes the termination of a transfers to the
847   *  current context.
848   * @device_tx_status: poll for transaction completion, the optional
849   *	txstate parameter can be supplied with a pointer to get a
850   *	struct with auxiliary transfer status information, otherwise the call
851   *	will just return a simple status code
852   * @device_issue_pending: push pending transactions to hardware
853   * @device_release: called sometime atfer dma_async_device_unregister() is
854   *     called and there are no further references to this structure. This
855   *     must be implemented to free resources however many existing drivers
856   *     do not and are therefore not safe to unbind while in use.
857   * @dbg_summary_show: optional routine to show contents in debugfs; default code
858   *     will be used when this is omitted, but custom code can show extra,
859   *     controller specific information.
860   * @dbg_dev_root: the root folder in debugfs for this device
861   */
862  struct dma_device {
863  	struct kref ref;
864  	unsigned int chancnt;
865  	unsigned int privatecnt;
866  	struct list_head channels;
867  	struct list_head global_node;
868  	struct dma_filter filter;
869  	dma_cap_mask_t cap_mask;
870  	enum dma_desc_metadata_mode desc_metadata_modes;
871  	unsigned short max_xor;
872  	unsigned short max_pq;
873  	enum dmaengine_alignment copy_align;
874  	enum dmaengine_alignment xor_align;
875  	enum dmaengine_alignment pq_align;
876  	enum dmaengine_alignment fill_align;
877  	#define DMA_HAS_PQ_CONTINUE (1 << 15)
878  
879  	int dev_id;
880  	struct device *dev;
881  	struct module *owner;
882  	struct ida chan_ida;
883  
884  	u32 src_addr_widths;
885  	u32 dst_addr_widths;
886  	u32 directions;
887  	u32 min_burst;
888  	u32 max_burst;
889  	u32 max_sg_burst;
890  	bool descriptor_reuse;
891  	enum dma_residue_granularity residue_granularity;
892  
893  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
894  	int (*device_router_config)(struct dma_chan *chan);
895  	void (*device_free_chan_resources)(struct dma_chan *chan);
896  
897  	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
898  		struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
899  		size_t len, unsigned long flags);
900  	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
901  		struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
902  		unsigned int src_cnt, size_t len, unsigned long flags);
903  	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
904  		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
905  		size_t len, enum sum_check_flags *result, unsigned long flags);
906  	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
907  		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
908  		unsigned int src_cnt, const unsigned char *scf,
909  		size_t len, unsigned long flags);
910  	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
911  		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
912  		unsigned int src_cnt, const unsigned char *scf, size_t len,
913  		enum sum_check_flags *pqres, unsigned long flags);
914  	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
915  		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
916  		unsigned long flags);
917  	struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
918  		struct dma_chan *chan, struct scatterlist *sg,
919  		unsigned int nents, int value, unsigned long flags);
920  	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
921  		struct dma_chan *chan, unsigned long flags);
922  
923  	struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
924  		struct dma_chan *chan, const struct dma_vec *vecs,
925  		size_t nents, enum dma_transfer_direction direction,
926  		unsigned long flags);
927  	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
928  		struct dma_chan *chan, struct scatterlist *sgl,
929  		unsigned int sg_len, enum dma_transfer_direction direction,
930  		unsigned long flags, void *context);
931  	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
932  		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
933  		size_t period_len, enum dma_transfer_direction direction,
934  		unsigned long flags);
935  	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
936  		struct dma_chan *chan, struct dma_interleaved_template *xt,
937  		unsigned long flags);
938  	struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
939  		struct dma_chan *chan, dma_addr_t dst, u64 data,
940  		unsigned long flags);
941  
942  	void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
943  	int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
944  	int (*device_pause)(struct dma_chan *chan);
945  	int (*device_resume)(struct dma_chan *chan);
946  	int (*device_terminate_all)(struct dma_chan *chan);
947  	void (*device_synchronize)(struct dma_chan *chan);
948  
949  	enum dma_status (*device_tx_status)(struct dma_chan *chan,
950  					    dma_cookie_t cookie,
951  					    struct dma_tx_state *txstate);
952  	void (*device_issue_pending)(struct dma_chan *chan);
953  	void (*device_release)(struct dma_device *dev);
954  	/* debugfs support */
955  	void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
956  	struct dentry *dbg_dev_root;
957  };
958  
dmaengine_slave_config(struct dma_chan * chan,struct dma_slave_config * config)959  static inline int dmaengine_slave_config(struct dma_chan *chan,
960  					  struct dma_slave_config *config)
961  {
962  	if (chan->device->device_config)
963  		return chan->device->device_config(chan, config);
964  
965  	return -ENOSYS;
966  }
967  
is_slave_direction(enum dma_transfer_direction direction)968  static inline bool is_slave_direction(enum dma_transfer_direction direction)
969  {
970  	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
971  	       (direction == DMA_DEV_TO_DEV);
972  }
973  
dmaengine_prep_slave_single(struct dma_chan * chan,dma_addr_t buf,size_t len,enum dma_transfer_direction dir,unsigned long flags)974  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
975  	struct dma_chan *chan, dma_addr_t buf, size_t len,
976  	enum dma_transfer_direction dir, unsigned long flags)
977  {
978  	struct scatterlist sg;
979  	sg_init_table(&sg, 1);
980  	sg_dma_address(&sg) = buf;
981  	sg_dma_len(&sg) = len;
982  
983  	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
984  		return NULL;
985  
986  	return chan->device->device_prep_slave_sg(chan, &sg, 1,
987  						  dir, flags, NULL);
988  }
989  
990  /**
991   * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
992   * @chan: The channel to be used for this descriptor
993   * @vecs: The array of DMA vectors that should be transferred
994   * @nents: The number of DMA vectors in the array
995   * @dir: Specifies the direction of the data transfer
996   * @flags: DMA engine flags
997   */
dmaengine_prep_peripheral_dma_vec(struct dma_chan * chan,const struct dma_vec * vecs,size_t nents,enum dma_transfer_direction dir,unsigned long flags)998  static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
999  	struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
1000  	enum dma_transfer_direction dir, unsigned long flags)
1001  {
1002  	if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
1003  		return NULL;
1004  
1005  	return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
1006  							    dir, flags);
1007  }
1008  
dmaengine_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags)1009  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
1010  	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
1011  	enum dma_transfer_direction dir, unsigned long flags)
1012  {
1013  	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1014  		return NULL;
1015  
1016  	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1017  						  dir, flags, NULL);
1018  }
1019  
1020  #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1021  struct rio_dma_ext;
dmaengine_prep_rio_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,struct rio_dma_ext * rio_ext)1022  static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
1023  	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
1024  	enum dma_transfer_direction dir, unsigned long flags,
1025  	struct rio_dma_ext *rio_ext)
1026  {
1027  	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
1028  		return NULL;
1029  
1030  	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
1031  						  dir, flags, rio_ext);
1032  }
1033  #endif
1034  
dmaengine_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1035  static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
1036  		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1037  		size_t period_len, enum dma_transfer_direction dir,
1038  		unsigned long flags)
1039  {
1040  	if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
1041  		return NULL;
1042  
1043  	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
1044  						period_len, dir, flags);
1045  }
1046  
dmaengine_prep_interleaved_dma(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)1047  static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
1048  		struct dma_chan *chan, struct dma_interleaved_template *xt,
1049  		unsigned long flags)
1050  {
1051  	if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
1052  		return NULL;
1053  	if (flags & DMA_PREP_REPEAT &&
1054  	    !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
1055  		return NULL;
1056  
1057  	return chan->device->device_prep_interleaved_dma(chan, xt, flags);
1058  }
1059  
1060  /**
1061   * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
1062   * @chan: The channel to be used for this descriptor
1063   * @dest: Address of buffer to be set
1064   * @value: Treated as a single byte value that fills the destination buffer
1065   * @len: The total size of dest
1066   * @flags: DMA engine flags
1067   */
dmaengine_prep_dma_memset(struct dma_chan * chan,dma_addr_t dest,int value,size_t len,unsigned long flags)1068  static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
1069  		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
1070  		unsigned long flags)
1071  {
1072  	if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
1073  		return NULL;
1074  
1075  	return chan->device->device_prep_dma_memset(chan, dest, value,
1076  						    len, flags);
1077  }
1078  
dmaengine_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)1079  static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
1080  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1081  		size_t len, unsigned long flags)
1082  {
1083  	if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
1084  		return NULL;
1085  
1086  	return chan->device->device_prep_dma_memcpy(chan, dest, src,
1087  						    len, flags);
1088  }
1089  
dmaengine_is_metadata_mode_supported(struct dma_chan * chan,enum dma_desc_metadata_mode mode)1090  static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
1091  		enum dma_desc_metadata_mode mode)
1092  {
1093  	if (!chan)
1094  		return false;
1095  
1096  	return !!(chan->device->desc_metadata_modes & mode);
1097  }
1098  
1099  #ifdef CONFIG_DMA_ENGINE
1100  int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1101  				   void *data, size_t len);
1102  void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1103  				      size_t *payload_len, size_t *max_len);
1104  int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1105  				    size_t payload_len);
1106  #else /* CONFIG_DMA_ENGINE */
dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)1107  static inline int dmaengine_desc_attach_metadata(
1108  		struct dma_async_tx_descriptor *desc, void *data, size_t len)
1109  {
1110  	return -EINVAL;
1111  }
dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)1112  static inline void *dmaengine_desc_get_metadata_ptr(
1113  		struct dma_async_tx_descriptor *desc, size_t *payload_len,
1114  		size_t *max_len)
1115  {
1116  	return NULL;
1117  }
dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)1118  static inline int dmaengine_desc_set_metadata_len(
1119  		struct dma_async_tx_descriptor *desc, size_t payload_len)
1120  {
1121  	return -EINVAL;
1122  }
1123  #endif /* CONFIG_DMA_ENGINE */
1124  
1125  /**
1126   * dmaengine_terminate_all() - Terminate all active DMA transfers
1127   * @chan: The channel for which to terminate the transfers
1128   *
1129   * This function is DEPRECATED use either dmaengine_terminate_sync() or
1130   * dmaengine_terminate_async() instead.
1131   */
dmaengine_terminate_all(struct dma_chan * chan)1132  static inline int dmaengine_terminate_all(struct dma_chan *chan)
1133  {
1134  	if (chan->device->device_terminate_all)
1135  		return chan->device->device_terminate_all(chan);
1136  
1137  	return -ENOSYS;
1138  }
1139  
1140  /**
1141   * dmaengine_terminate_async() - Terminate all active DMA transfers
1142   * @chan: The channel for which to terminate the transfers
1143   *
1144   * Calling this function will terminate all active and pending descriptors
1145   * that have previously been submitted to the channel. It is not guaranteed
1146   * though that the transfer for the active descriptor has stopped when the
1147   * function returns. Furthermore it is possible the complete callback of a
1148   * submitted transfer is still running when this function returns.
1149   *
1150   * dmaengine_synchronize() needs to be called before it is safe to free
1151   * any memory that is accessed by previously submitted descriptors or before
1152   * freeing any resources accessed from within the completion callback of any
1153   * previously submitted descriptors.
1154   *
1155   * This function can be called from atomic context as well as from within a
1156   * complete callback of a descriptor submitted on the same channel.
1157   *
1158   * If none of the two conditions above apply consider using
1159   * dmaengine_terminate_sync() instead.
1160   */
dmaengine_terminate_async(struct dma_chan * chan)1161  static inline int dmaengine_terminate_async(struct dma_chan *chan)
1162  {
1163  	if (chan->device->device_terminate_all)
1164  		return chan->device->device_terminate_all(chan);
1165  
1166  	return -EINVAL;
1167  }
1168  
1169  /**
1170   * dmaengine_synchronize() - Synchronize DMA channel termination
1171   * @chan: The channel to synchronize
1172   *
1173   * Synchronizes to the DMA channel termination to the current context. When this
1174   * function returns it is guaranteed that all transfers for previously issued
1175   * descriptors have stopped and it is safe to free the memory associated
1176   * with them. Furthermore it is guaranteed that all complete callback functions
1177   * for a previously submitted descriptor have finished running and it is safe to
1178   * free resources accessed from within the complete callbacks.
1179   *
1180   * The behavior of this function is undefined if dma_async_issue_pending() has
1181   * been called between dmaengine_terminate_async() and this function.
1182   *
1183   * This function must only be called from non-atomic context and must not be
1184   * called from within a complete callback of a descriptor submitted on the same
1185   * channel.
1186   */
dmaengine_synchronize(struct dma_chan * chan)1187  static inline void dmaengine_synchronize(struct dma_chan *chan)
1188  {
1189  	might_sleep();
1190  
1191  	if (chan->device->device_synchronize)
1192  		chan->device->device_synchronize(chan);
1193  }
1194  
1195  /**
1196   * dmaengine_terminate_sync() - Terminate all active DMA transfers
1197   * @chan: The channel for which to terminate the transfers
1198   *
1199   * Calling this function will terminate all active and pending transfers
1200   * that have previously been submitted to the channel. It is similar to
1201   * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
1202   * stopped and that all complete callbacks have finished running when the
1203   * function returns.
1204   *
1205   * This function must only be called from non-atomic context and must not be
1206   * called from within a complete callback of a descriptor submitted on the same
1207   * channel.
1208   */
dmaengine_terminate_sync(struct dma_chan * chan)1209  static inline int dmaengine_terminate_sync(struct dma_chan *chan)
1210  {
1211  	int ret;
1212  
1213  	ret = dmaengine_terminate_async(chan);
1214  	if (ret)
1215  		return ret;
1216  
1217  	dmaengine_synchronize(chan);
1218  
1219  	return 0;
1220  }
1221  
dmaengine_pause(struct dma_chan * chan)1222  static inline int dmaengine_pause(struct dma_chan *chan)
1223  {
1224  	if (chan->device->device_pause)
1225  		return chan->device->device_pause(chan);
1226  
1227  	return -ENOSYS;
1228  }
1229  
dmaengine_resume(struct dma_chan * chan)1230  static inline int dmaengine_resume(struct dma_chan *chan)
1231  {
1232  	if (chan->device->device_resume)
1233  		return chan->device->device_resume(chan);
1234  
1235  	return -ENOSYS;
1236  }
1237  
dmaengine_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)1238  static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
1239  	dma_cookie_t cookie, struct dma_tx_state *state)
1240  {
1241  	return chan->device->device_tx_status(chan, cookie, state);
1242  }
1243  
dmaengine_submit(struct dma_async_tx_descriptor * desc)1244  static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
1245  {
1246  	return desc->tx_submit(desc);
1247  }
1248  
dmaengine_check_align(enum dmaengine_alignment align,size_t off1,size_t off2,size_t len)1249  static inline bool dmaengine_check_align(enum dmaengine_alignment align,
1250  					 size_t off1, size_t off2, size_t len)
1251  {
1252  	return !(((1 << align) - 1) & (off1 | off2 | len));
1253  }
1254  
is_dma_copy_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)1255  static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
1256  				       size_t off2, size_t len)
1257  {
1258  	return dmaengine_check_align(dev->copy_align, off1, off2, len);
1259  }
1260  
is_dma_xor_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)1261  static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
1262  				      size_t off2, size_t len)
1263  {
1264  	return dmaengine_check_align(dev->xor_align, off1, off2, len);
1265  }
1266  
is_dma_pq_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)1267  static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
1268  				     size_t off2, size_t len)
1269  {
1270  	return dmaengine_check_align(dev->pq_align, off1, off2, len);
1271  }
1272  
is_dma_fill_aligned(struct dma_device * dev,size_t off1,size_t off2,size_t len)1273  static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
1274  				       size_t off2, size_t len)
1275  {
1276  	return dmaengine_check_align(dev->fill_align, off1, off2, len);
1277  }
1278  
1279  static inline void
dma_set_maxpq(struct dma_device * dma,int maxpq,int has_pq_continue)1280  dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
1281  {
1282  	dma->max_pq = maxpq;
1283  	if (has_pq_continue)
1284  		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
1285  }
1286  
dmaf_continue(enum dma_ctrl_flags flags)1287  static inline bool dmaf_continue(enum dma_ctrl_flags flags)
1288  {
1289  	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
1290  }
1291  
dmaf_p_disabled_continue(enum dma_ctrl_flags flags)1292  static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
1293  {
1294  	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
1295  
1296  	return (flags & mask) == mask;
1297  }
1298  
dma_dev_has_pq_continue(struct dma_device * dma)1299  static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
1300  {
1301  	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
1302  }
1303  
dma_dev_to_maxpq(struct dma_device * dma)1304  static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
1305  {
1306  	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
1307  }
1308  
1309  /* dma_maxpq - reduce maxpq in the face of continued operations
1310   * @dma - dma device with PQ capability
1311   * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
1312   *
1313   * When an engine does not support native continuation we need 3 extra
1314   * source slots to reuse P and Q with the following coefficients:
1315   * 1/ {00} * P : remove P from Q', but use it as a source for P'
1316   * 2/ {01} * Q : use Q to continue Q' calculation
1317   * 3/ {00} * Q : subtract Q from P' to cancel (2)
1318   *
1319   * In the case where P is disabled we only need 1 extra source:
1320   * 1/ {01} * Q : use Q to continue Q' calculation
1321   */
dma_maxpq(struct dma_device * dma,enum dma_ctrl_flags flags)1322  static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
1323  {
1324  	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
1325  		return dma_dev_to_maxpq(dma);
1326  	if (dmaf_p_disabled_continue(flags))
1327  		return dma_dev_to_maxpq(dma) - 1;
1328  	if (dmaf_continue(flags))
1329  		return dma_dev_to_maxpq(dma) - 3;
1330  	BUG();
1331  }
1332  
dmaengine_get_icg(bool inc,bool sgl,size_t icg,size_t dir_icg)1333  static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
1334  				      size_t dir_icg)
1335  {
1336  	if (inc) {
1337  		if (dir_icg)
1338  			return dir_icg;
1339  		if (sgl)
1340  			return icg;
1341  	}
1342  
1343  	return 0;
1344  }
1345  
dmaengine_get_dst_icg(struct dma_interleaved_template * xt,struct data_chunk * chunk)1346  static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
1347  					   struct data_chunk *chunk)
1348  {
1349  	return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
1350  				 chunk->icg, chunk->dst_icg);
1351  }
1352  
dmaengine_get_src_icg(struct dma_interleaved_template * xt,struct data_chunk * chunk)1353  static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
1354  					   struct data_chunk *chunk)
1355  {
1356  	return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
1357  				 chunk->icg, chunk->src_icg);
1358  }
1359  
1360  /* --- public DMA engine API --- */
1361  
1362  #ifdef CONFIG_DMA_ENGINE
1363  void dmaengine_get(void);
1364  void dmaengine_put(void);
1365  #else
dmaengine_get(void)1366  static inline void dmaengine_get(void)
1367  {
1368  }
dmaengine_put(void)1369  static inline void dmaengine_put(void)
1370  {
1371  }
1372  #endif
1373  
1374  #ifdef CONFIG_ASYNC_TX_DMA
1375  #define async_dmaengine_get()	dmaengine_get()
1376  #define async_dmaengine_put()	dmaengine_put()
1377  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1378  #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
1379  #else
1380  #define async_dma_find_channel(type) dma_find_channel(type)
1381  #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
1382  #else
async_dmaengine_get(void)1383  static inline void async_dmaengine_get(void)
1384  {
1385  }
async_dmaengine_put(void)1386  static inline void async_dmaengine_put(void)
1387  {
1388  }
1389  static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)1390  async_dma_find_channel(enum dma_transaction_type type)
1391  {
1392  	return NULL;
1393  }
1394  #endif /* CONFIG_ASYNC_TX_DMA */
1395  void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1396  				  struct dma_chan *chan);
1397  
async_tx_ack(struct dma_async_tx_descriptor * tx)1398  static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
1399  {
1400  	tx->flags |= DMA_CTRL_ACK;
1401  }
1402  
async_tx_clear_ack(struct dma_async_tx_descriptor * tx)1403  static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
1404  {
1405  	tx->flags &= ~DMA_CTRL_ACK;
1406  }
1407  
async_tx_test_ack(struct dma_async_tx_descriptor * tx)1408  static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
1409  {
1410  	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
1411  }
1412  
1413  #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
1414  static inline void
__dma_cap_set(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)1415  __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1416  {
1417  	set_bit(tx_type, dstp->bits);
1418  }
1419  
1420  #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
1421  static inline void
__dma_cap_clear(enum dma_transaction_type tx_type,dma_cap_mask_t * dstp)1422  __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
1423  {
1424  	clear_bit(tx_type, dstp->bits);
1425  }
1426  
1427  #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
__dma_cap_zero(dma_cap_mask_t * dstp)1428  static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
1429  {
1430  	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
1431  }
1432  
1433  #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
1434  static inline int
__dma_has_cap(enum dma_transaction_type tx_type,dma_cap_mask_t * srcp)1435  __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
1436  {
1437  	return test_bit(tx_type, srcp->bits);
1438  }
1439  
1440  #define for_each_dma_cap_mask(cap, mask) \
1441  	for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
1442  
1443  /**
1444   * dma_async_issue_pending - flush pending transactions to HW
1445   * @chan: target DMA channel
1446   *
1447   * This allows drivers to push copies to HW in batches,
1448   * reducing MMIO writes where possible.
1449   */
dma_async_issue_pending(struct dma_chan * chan)1450  static inline void dma_async_issue_pending(struct dma_chan *chan)
1451  {
1452  	chan->device->device_issue_pending(chan);
1453  }
1454  
1455  /**
1456   * dma_async_is_tx_complete - poll for transaction completion
1457   * @chan: DMA channel
1458   * @cookie: transaction identifier to check status of
1459   * @last: returns last completed cookie, can be NULL
1460   * @used: returns last issued cookie, can be NULL
1461   *
1462   * If @last and @used are passed in, upon return they reflect the driver
1463   * internal state and can be used with dma_async_is_complete() to check
1464   * the status of multiple cookies without re-checking hardware state.
1465   */
dma_async_is_tx_complete(struct dma_chan * chan,dma_cookie_t cookie,dma_cookie_t * last,dma_cookie_t * used)1466  static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
1467  	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
1468  {
1469  	struct dma_tx_state state;
1470  	enum dma_status status;
1471  
1472  	status = chan->device->device_tx_status(chan, cookie, &state);
1473  	if (last)
1474  		*last = state.last;
1475  	if (used)
1476  		*used = state.used;
1477  	return status;
1478  }
1479  
1480  /**
1481   * dma_async_is_complete - test a cookie against chan state
1482   * @cookie: transaction identifier to test status of
1483   * @last_complete: last know completed transaction
1484   * @last_used: last cookie value handed out
1485   *
1486   * dma_async_is_complete() is used in dma_async_is_tx_complete()
1487   * the test logic is separated for lightweight testing of multiple cookies
1488   */
dma_async_is_complete(dma_cookie_t cookie,dma_cookie_t last_complete,dma_cookie_t last_used)1489  static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
1490  			dma_cookie_t last_complete, dma_cookie_t last_used)
1491  {
1492  	if (last_complete <= last_used) {
1493  		if ((cookie <= last_complete) || (cookie > last_used))
1494  			return DMA_COMPLETE;
1495  	} else {
1496  		if ((cookie <= last_complete) && (cookie > last_used))
1497  			return DMA_COMPLETE;
1498  	}
1499  	return DMA_IN_PROGRESS;
1500  }
1501  
1502  static inline void
dma_set_tx_state(struct dma_tx_state * st,dma_cookie_t last,dma_cookie_t used,u32 residue)1503  dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
1504  {
1505  	if (!st)
1506  		return;
1507  
1508  	st->last = last;
1509  	st->used = used;
1510  	st->residue = residue;
1511  }
1512  
1513  #ifdef CONFIG_DMA_ENGINE
1514  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1515  enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1516  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1517  void dma_issue_pending_all(void);
1518  struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1519  				       dma_filter_fn fn, void *fn_param,
1520  				       struct device_node *np);
1521  
1522  struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1523  struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1524  
1525  void dma_release_channel(struct dma_chan *chan);
1526  int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1527  #else
dma_find_channel(enum dma_transaction_type tx_type)1528  static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1529  {
1530  	return NULL;
1531  }
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)1532  static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1533  {
1534  	return DMA_COMPLETE;
1535  }
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)1536  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1537  {
1538  	return DMA_COMPLETE;
1539  }
dma_issue_pending_all(void)1540  static inline void dma_issue_pending_all(void)
1541  {
1542  }
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)1543  static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1544  						     dma_filter_fn fn,
1545  						     void *fn_param,
1546  						     struct device_node *np)
1547  {
1548  	return NULL;
1549  }
dma_request_chan(struct device * dev,const char * name)1550  static inline struct dma_chan *dma_request_chan(struct device *dev,
1551  						const char *name)
1552  {
1553  	return ERR_PTR(-ENODEV);
1554  }
dma_request_chan_by_mask(const dma_cap_mask_t * mask)1555  static inline struct dma_chan *dma_request_chan_by_mask(
1556  						const dma_cap_mask_t *mask)
1557  {
1558  	return ERR_PTR(-ENODEV);
1559  }
dma_release_channel(struct dma_chan * chan)1560  static inline void dma_release_channel(struct dma_chan *chan)
1561  {
1562  }
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)1563  static inline int dma_get_slave_caps(struct dma_chan *chan,
1564  				     struct dma_slave_caps *caps)
1565  {
1566  	return -ENXIO;
1567  }
1568  #endif
1569  
dmaengine_desc_set_reuse(struct dma_async_tx_descriptor * tx)1570  static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1571  {
1572  	struct dma_slave_caps caps;
1573  	int ret;
1574  
1575  	ret = dma_get_slave_caps(tx->chan, &caps);
1576  	if (ret)
1577  		return ret;
1578  
1579  	if (!caps.descriptor_reuse)
1580  		return -EPERM;
1581  
1582  	tx->flags |= DMA_CTRL_REUSE;
1583  	return 0;
1584  }
1585  
dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor * tx)1586  static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1587  {
1588  	tx->flags &= ~DMA_CTRL_REUSE;
1589  }
1590  
dmaengine_desc_test_reuse(struct dma_async_tx_descriptor * tx)1591  static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1592  {
1593  	return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1594  }
1595  
dmaengine_desc_free(struct dma_async_tx_descriptor * desc)1596  static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1597  {
1598  	/* this is supported for reusable desc, so check that */
1599  	if (!dmaengine_desc_test_reuse(desc))
1600  		return -EPERM;
1601  
1602  	return desc->desc_free(desc);
1603  }
1604  
1605  /* --- DMA device --- */
1606  
1607  int dma_async_device_register(struct dma_device *device);
1608  int dmaenginem_async_device_register(struct dma_device *device);
1609  void dma_async_device_unregister(struct dma_device *device);
1610  int dma_async_device_channel_register(struct dma_device *device,
1611  				      struct dma_chan *chan,
1612  				      const char *name);
1613  void dma_async_device_channel_unregister(struct dma_device *device,
1614  					 struct dma_chan *chan);
1615  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1616  #define dma_request_channel(mask, x, y) \
1617  	__dma_request_channel(&(mask), x, y, NULL)
1618  
1619  /* Deprecated, please use dma_request_chan() directly */
1620  static inline struct dma_chan * __deprecated
dma_request_slave_channel(struct device * dev,const char * name)1621  dma_request_slave_channel(struct device *dev, const char *name)
1622  {
1623  	struct dma_chan *ch = dma_request_chan(dev, name);
1624  
1625  	return IS_ERR(ch) ? NULL : ch;
1626  }
1627  
1628  static inline struct dma_chan
dma_request_slave_channel_compat(const dma_cap_mask_t mask,dma_filter_fn fn,void * fn_param,struct device * dev,const char * name)1629  *dma_request_slave_channel_compat(const dma_cap_mask_t mask,
1630  				  dma_filter_fn fn, void *fn_param,
1631  				  struct device *dev, const char *name)
1632  {
1633  	struct dma_chan *chan;
1634  
1635  	chan = dma_request_slave_channel(dev, name);
1636  	if (chan)
1637  		return chan;
1638  
1639  	if (!fn || !fn_param)
1640  		return NULL;
1641  
1642  	return __dma_request_channel(&mask, fn, fn_param, NULL);
1643  }
1644  
1645  static inline char *
dmaengine_get_direction_text(enum dma_transfer_direction dir)1646  dmaengine_get_direction_text(enum dma_transfer_direction dir)
1647  {
1648  	switch (dir) {
1649  	case DMA_DEV_TO_MEM:
1650  		return "DEV_TO_MEM";
1651  	case DMA_MEM_TO_DEV:
1652  		return "MEM_TO_DEV";
1653  	case DMA_MEM_TO_MEM:
1654  		return "MEM_TO_MEM";
1655  	case DMA_DEV_TO_DEV:
1656  		return "DEV_TO_DEV";
1657  	default:
1658  		return "invalid";
1659  	}
1660  }
1661  
dmaengine_get_dma_device(struct dma_chan * chan)1662  static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
1663  {
1664  	if (chan->dev->chan_dma_dev)
1665  		return &chan->dev->device;
1666  
1667  	return chan->device->dev;
1668  }
1669  
1670  #endif /* DMAENGINE_H */
1671