1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_DMA_MAPPING_H
3  #define _LINUX_DMA_MAPPING_H
4  
5  #include <linux/cache.h>
6  #include <linux/sizes.h>
7  #include <linux/string.h>
8  #include <linux/device.h>
9  #include <linux/err.h>
10  #include <linux/dma-direction.h>
11  #include <linux/scatterlist.h>
12  #include <linux/bug.h>
13  #include <linux/mem_encrypt.h>
14  
15  /**
16   * List of possible attributes associated with a DMA mapping. The semantics
17   * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
18   */
19  
20  /*
21   * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
22   * may be weakly ordered, that is that reads and writes may pass each other.
23   */
24  #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
25  /*
26   * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
27   * buffered to improve performance.
28   */
29  #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
30  /*
31   * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
32   * virtual mapping for the allocated buffer.
33   */
34  #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
35  /*
36   * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
37   * the CPU cache for the given buffer assuming that it has been already
38   * transferred to 'device' domain.
39   */
40  #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
41  /*
42   * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
43   * in physical memory.
44   */
45  #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
46  /*
47   * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
48   * that it's probably not worth the time to try to allocate memory to in a way
49   * that gives better TLB efficiency.
50   */
51  #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
52  /*
53   * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
54   * allocation failure reports (similarly to __GFP_NOWARN).
55   */
56  #define DMA_ATTR_NO_WARN	(1UL << 8)
57  
58  /*
59   * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
60   * accessible at an elevated privilege level (and ideally inaccessible or
61   * at least read-only at lesser-privileged levels).
62   */
63  #define DMA_ATTR_PRIVILEGED		(1UL << 9)
64  
65  /*
66   * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
67   * be given to a device to use as a DMA source or target.  It is specific to a
68   * given device and there may be a translation between the CPU physical address
69   * space and the bus address space.
70   *
71   * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
72   * be used directly in drivers, but checked for using dma_mapping_error()
73   * instead.
74   */
75  #define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
76  
77  #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
78  
79  #ifdef CONFIG_DMA_API_DEBUG
80  void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
81  void debug_dma_map_single(struct device *dev, const void *addr,
82  		unsigned long len);
83  #else
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)84  static inline void debug_dma_mapping_error(struct device *dev,
85  		dma_addr_t dma_addr)
86  {
87  }
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)88  static inline void debug_dma_map_single(struct device *dev, const void *addr,
89  		unsigned long len)
90  {
91  }
92  #endif /* CONFIG_DMA_API_DEBUG */
93  
94  #ifdef CONFIG_HAS_DMA
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)95  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
96  {
97  	debug_dma_mapping_error(dev, dma_addr);
98  
99  	if (unlikely(dma_addr == DMA_MAPPING_ERROR))
100  		return -ENOMEM;
101  	return 0;
102  }
103  
104  dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
105  		size_t offset, size_t size, enum dma_data_direction dir,
106  		unsigned long attrs);
107  void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
108  		enum dma_data_direction dir, unsigned long attrs);
109  unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
110  		int nents, enum dma_data_direction dir, unsigned long attrs);
111  void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
112  				      int nents, enum dma_data_direction dir,
113  				      unsigned long attrs);
114  int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
115  		enum dma_data_direction dir, unsigned long attrs);
116  dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
117  		size_t size, enum dma_data_direction dir, unsigned long attrs);
118  void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
119  		enum dma_data_direction dir, unsigned long attrs);
120  void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121  		gfp_t flag, unsigned long attrs);
122  void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
123  		dma_addr_t dma_handle, unsigned long attrs);
124  void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
125  		gfp_t gfp, unsigned long attrs);
126  void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
127  		dma_addr_t dma_handle);
128  int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
129  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
130  		unsigned long attrs);
131  int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
132  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
133  		unsigned long attrs);
134  bool dma_can_mmap(struct device *dev);
135  bool dma_pci_p2pdma_supported(struct device *dev);
136  int dma_set_mask(struct device *dev, u64 mask);
137  int dma_set_coherent_mask(struct device *dev, u64 mask);
138  u64 dma_get_required_mask(struct device *dev);
139  bool dma_addressing_limited(struct device *dev);
140  size_t dma_max_mapping_size(struct device *dev);
141  size_t dma_opt_mapping_size(struct device *dev);
142  unsigned long dma_get_merge_boundary(struct device *dev);
143  struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
144  		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
145  void dma_free_noncontiguous(struct device *dev, size_t size,
146  		struct sg_table *sgt, enum dma_data_direction dir);
147  void *dma_vmap_noncontiguous(struct device *dev, size_t size,
148  		struct sg_table *sgt);
149  void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
150  int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
151  		size_t size, struct sg_table *sgt);
152  #else /* CONFIG_HAS_DMA */
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)153  static inline dma_addr_t dma_map_page_attrs(struct device *dev,
154  		struct page *page, size_t offset, size_t size,
155  		enum dma_data_direction dir, unsigned long attrs)
156  {
157  	return DMA_MAPPING_ERROR;
158  }
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)159  static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
160  		size_t size, enum dma_data_direction dir, unsigned long attrs)
161  {
162  }
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)163  static inline unsigned int dma_map_sg_attrs(struct device *dev,
164  		struct scatterlist *sg, int nents, enum dma_data_direction dir,
165  		unsigned long attrs)
166  {
167  	return 0;
168  }
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)169  static inline void dma_unmap_sg_attrs(struct device *dev,
170  		struct scatterlist *sg, int nents, enum dma_data_direction dir,
171  		unsigned long attrs)
172  {
173  }
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)174  static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
175  		enum dma_data_direction dir, unsigned long attrs)
176  {
177  	return -EOPNOTSUPP;
178  }
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)179  static inline dma_addr_t dma_map_resource(struct device *dev,
180  		phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
181  		unsigned long attrs)
182  {
183  	return DMA_MAPPING_ERROR;
184  }
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)185  static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
186  		size_t size, enum dma_data_direction dir, unsigned long attrs)
187  {
188  }
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)189  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
190  {
191  	return -ENOMEM;
192  }
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)193  static inline void *dma_alloc_attrs(struct device *dev, size_t size,
194  		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
195  {
196  	return NULL;
197  }
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)198  static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
199  		dma_addr_t dma_handle, unsigned long attrs)
200  {
201  }
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)202  static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
203  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
204  {
205  	return NULL;
206  }
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)207  static inline void dmam_free_coherent(struct device *dev, size_t size,
208  		void *vaddr, dma_addr_t dma_handle)
209  {
210  }
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)211  static inline int dma_get_sgtable_attrs(struct device *dev,
212  		struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
213  		size_t size, unsigned long attrs)
214  {
215  	return -ENXIO;
216  }
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)217  static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
218  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
219  		unsigned long attrs)
220  {
221  	return -ENXIO;
222  }
dma_can_mmap(struct device * dev)223  static inline bool dma_can_mmap(struct device *dev)
224  {
225  	return false;
226  }
dma_pci_p2pdma_supported(struct device * dev)227  static inline bool dma_pci_p2pdma_supported(struct device *dev)
228  {
229  	return false;
230  }
dma_set_mask(struct device * dev,u64 mask)231  static inline int dma_set_mask(struct device *dev, u64 mask)
232  {
233  	return -EIO;
234  }
dma_set_coherent_mask(struct device * dev,u64 mask)235  static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
236  {
237  	return -EIO;
238  }
dma_get_required_mask(struct device * dev)239  static inline u64 dma_get_required_mask(struct device *dev)
240  {
241  	return 0;
242  }
dma_addressing_limited(struct device * dev)243  static inline bool dma_addressing_limited(struct device *dev)
244  {
245  	return false;
246  }
dma_max_mapping_size(struct device * dev)247  static inline size_t dma_max_mapping_size(struct device *dev)
248  {
249  	return 0;
250  }
dma_opt_mapping_size(struct device * dev)251  static inline size_t dma_opt_mapping_size(struct device *dev)
252  {
253  	return 0;
254  }
dma_get_merge_boundary(struct device * dev)255  static inline unsigned long dma_get_merge_boundary(struct device *dev)
256  {
257  	return 0;
258  }
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)259  static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
260  		size_t size, enum dma_data_direction dir, gfp_t gfp,
261  		unsigned long attrs)
262  {
263  	return NULL;
264  }
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)265  static inline void dma_free_noncontiguous(struct device *dev, size_t size,
266  		struct sg_table *sgt, enum dma_data_direction dir)
267  {
268  }
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)269  static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
270  		struct sg_table *sgt)
271  {
272  	return NULL;
273  }
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)274  static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
275  {
276  }
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)277  static inline int dma_mmap_noncontiguous(struct device *dev,
278  		struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
279  {
280  	return -EINVAL;
281  }
282  #endif /* CONFIG_HAS_DMA */
283  
284  #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
285  void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
286  		enum dma_data_direction dir);
287  void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
288  		size_t size, enum dma_data_direction dir);
289  void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
290  		int nelems, enum dma_data_direction dir);
291  void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
292  		int nelems, enum dma_data_direction dir);
293  bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
294  
dma_dev_need_sync(const struct device * dev)295  static inline bool dma_dev_need_sync(const struct device *dev)
296  {
297  	/* Always call DMA sync operations when debugging is enabled */
298  	return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
299  }
300  
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)301  static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
302  		size_t size, enum dma_data_direction dir)
303  {
304  	if (dma_dev_need_sync(dev))
305  		__dma_sync_single_for_cpu(dev, addr, size, dir);
306  }
307  
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)308  static inline void dma_sync_single_for_device(struct device *dev,
309  		dma_addr_t addr, size_t size, enum dma_data_direction dir)
310  {
311  	if (dma_dev_need_sync(dev))
312  		__dma_sync_single_for_device(dev, addr, size, dir);
313  }
314  
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)315  static inline void dma_sync_sg_for_cpu(struct device *dev,
316  		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
317  {
318  	if (dma_dev_need_sync(dev))
319  		__dma_sync_sg_for_cpu(dev, sg, nelems, dir);
320  }
321  
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)322  static inline void dma_sync_sg_for_device(struct device *dev,
323  		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
324  {
325  	if (dma_dev_need_sync(dev))
326  		__dma_sync_sg_for_device(dev, sg, nelems, dir);
327  }
328  
dma_need_sync(struct device * dev,dma_addr_t dma_addr)329  static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
330  {
331  	return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
332  }
333  #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
dma_dev_need_sync(const struct device * dev)334  static inline bool dma_dev_need_sync(const struct device *dev)
335  {
336  	return false;
337  }
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)338  static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
339  		size_t size, enum dma_data_direction dir)
340  {
341  }
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)342  static inline void dma_sync_single_for_device(struct device *dev,
343  		dma_addr_t addr, size_t size, enum dma_data_direction dir)
344  {
345  }
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)346  static inline void dma_sync_sg_for_cpu(struct device *dev,
347  		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
348  {
349  }
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)350  static inline void dma_sync_sg_for_device(struct device *dev,
351  		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
352  {
353  }
dma_need_sync(struct device * dev,dma_addr_t dma_addr)354  static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
355  {
356  	return false;
357  }
358  #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
359  
360  struct page *dma_alloc_pages(struct device *dev, size_t size,
361  		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
362  void dma_free_pages(struct device *dev, size_t size, struct page *page,
363  		dma_addr_t dma_handle, enum dma_data_direction dir);
364  int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
365  		size_t size, struct page *page);
366  
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)367  static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
368  		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
369  {
370  	struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
371  	return page ? page_address(page) : NULL;
372  }
373  
dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)374  static inline void dma_free_noncoherent(struct device *dev, size_t size,
375  		void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
376  {
377  	dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
378  }
379  
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)380  static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
381  		size_t size, enum dma_data_direction dir, unsigned long attrs)
382  {
383  	/* DMA must never operate on areas that might be remapped. */
384  	if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
385  			  "rejecting DMA map of vmalloc memory\n"))
386  		return DMA_MAPPING_ERROR;
387  	debug_dma_map_single(dev, ptr, size);
388  	return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
389  			size, dir, attrs);
390  }
391  
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)392  static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
393  		size_t size, enum dma_data_direction dir, unsigned long attrs)
394  {
395  	return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
396  }
397  
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)398  static inline void dma_sync_single_range_for_cpu(struct device *dev,
399  		dma_addr_t addr, unsigned long offset, size_t size,
400  		enum dma_data_direction dir)
401  {
402  	return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
403  }
404  
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)405  static inline void dma_sync_single_range_for_device(struct device *dev,
406  		dma_addr_t addr, unsigned long offset, size_t size,
407  		enum dma_data_direction dir)
408  {
409  	return dma_sync_single_for_device(dev, addr + offset, size, dir);
410  }
411  
412  /**
413   * dma_unmap_sgtable - Unmap the given buffer for DMA
414   * @dev:	The device for which to perform the DMA operation
415   * @sgt:	The sg_table object describing the buffer
416   * @dir:	DMA direction
417   * @attrs:	Optional DMA attributes for the unmap operation
418   *
419   * Unmaps a buffer described by a scatterlist stored in the given sg_table
420   * object for the @dir DMA operation by the @dev device. After this function
421   * the ownership of the buffer is transferred back to the CPU domain.
422   */
dma_unmap_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)423  static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
424  		enum dma_data_direction dir, unsigned long attrs)
425  {
426  	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
427  }
428  
429  /**
430   * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
431   * @dev:	The device for which to perform the DMA operation
432   * @sgt:	The sg_table object describing the buffer
433   * @dir:	DMA direction
434   *
435   * Performs the needed cache synchronization and moves the ownership of the
436   * buffer back to the CPU domain, so it is safe to perform any access to it
437   * by the CPU. Before doing any further DMA operations, one has to transfer
438   * the ownership of the buffer back to the DMA domain by calling the
439   * dma_sync_sgtable_for_device().
440   */
dma_sync_sgtable_for_cpu(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)441  static inline void dma_sync_sgtable_for_cpu(struct device *dev,
442  		struct sg_table *sgt, enum dma_data_direction dir)
443  {
444  	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
445  }
446  
447  /**
448   * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
449   * @dev:	The device for which to perform the DMA operation
450   * @sgt:	The sg_table object describing the buffer
451   * @dir:	DMA direction
452   *
453   * Performs the needed cache synchronization and moves the ownership of the
454   * buffer back to the DMA domain, so it is safe to perform the DMA operation.
455   * Once finished, one has to call dma_sync_sgtable_for_cpu() or
456   * dma_unmap_sgtable().
457   */
dma_sync_sgtable_for_device(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)458  static inline void dma_sync_sgtable_for_device(struct device *dev,
459  		struct sg_table *sgt, enum dma_data_direction dir)
460  {
461  	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
462  }
463  
464  #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
465  #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
466  #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
467  #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
468  #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
469  #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
470  #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471  #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
472  
473  bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
474  
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)475  static inline void *dma_alloc_coherent(struct device *dev, size_t size,
476  		dma_addr_t *dma_handle, gfp_t gfp)
477  {
478  	return dma_alloc_attrs(dev, size, dma_handle, gfp,
479  			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
480  }
481  
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)482  static inline void dma_free_coherent(struct device *dev, size_t size,
483  		void *cpu_addr, dma_addr_t dma_handle)
484  {
485  	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
486  }
487  
488  
dma_get_mask(struct device * dev)489  static inline u64 dma_get_mask(struct device *dev)
490  {
491  	if (dev->dma_mask && *dev->dma_mask)
492  		return *dev->dma_mask;
493  	return DMA_BIT_MASK(32);
494  }
495  
496  /*
497   * Set both the DMA mask and the coherent DMA mask to the same thing.
498   * Note that we don't check the return value from dma_set_coherent_mask()
499   * as the DMA API guarantees that the coherent DMA mask can be set to
500   * the same or smaller than the streaming DMA mask.
501   */
dma_set_mask_and_coherent(struct device * dev,u64 mask)502  static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
503  {
504  	int rc = dma_set_mask(dev, mask);
505  	if (rc == 0)
506  		dma_set_coherent_mask(dev, mask);
507  	return rc;
508  }
509  
510  /*
511   * Similar to the above, except it deals with the case where the device
512   * does not have dev->dma_mask appropriately setup.
513   */
dma_coerce_mask_and_coherent(struct device * dev,u64 mask)514  static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
515  {
516  	dev->dma_mask = &dev->coherent_dma_mask;
517  	return dma_set_mask_and_coherent(dev, mask);
518  }
519  
dma_get_max_seg_size(struct device * dev)520  static inline unsigned int dma_get_max_seg_size(struct device *dev)
521  {
522  	if (dev->dma_parms && dev->dma_parms->max_segment_size)
523  		return dev->dma_parms->max_segment_size;
524  	return SZ_64K;
525  }
526  
dma_set_max_seg_size(struct device * dev,unsigned int size)527  static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
528  {
529  	if (WARN_ON_ONCE(!dev->dma_parms))
530  		return;
531  	dev->dma_parms->max_segment_size = size;
532  }
533  
dma_get_seg_boundary(struct device * dev)534  static inline unsigned long dma_get_seg_boundary(struct device *dev)
535  {
536  	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
537  		return dev->dma_parms->segment_boundary_mask;
538  	return ULONG_MAX;
539  }
540  
541  /**
542   * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
543   * @dev: device to guery the boundary for
544   * @page_shift: ilog() of the IOMMU page size
545   *
546   * Return the segment boundary in IOMMU page units (which may be different from
547   * the CPU page size) for the passed in device.
548   *
549   * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
550   * non-DMA API callers.
551   */
dma_get_seg_boundary_nr_pages(struct device * dev,unsigned int page_shift)552  static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
553  		unsigned int page_shift)
554  {
555  	if (!dev)
556  		return (U32_MAX >> page_shift) + 1;
557  	return (dma_get_seg_boundary(dev) >> page_shift) + 1;
558  }
559  
dma_set_seg_boundary(struct device * dev,unsigned long mask)560  static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
561  {
562  	if (WARN_ON_ONCE(!dev->dma_parms))
563  		return;
564  	dev->dma_parms->segment_boundary_mask = mask;
565  }
566  
dma_get_min_align_mask(struct device * dev)567  static inline unsigned int dma_get_min_align_mask(struct device *dev)
568  {
569  	if (dev->dma_parms)
570  		return dev->dma_parms->min_align_mask;
571  	return 0;
572  }
573  
dma_set_min_align_mask(struct device * dev,unsigned int min_align_mask)574  static inline void dma_set_min_align_mask(struct device *dev,
575  		unsigned int min_align_mask)
576  {
577  	if (WARN_ON_ONCE(!dev->dma_parms))
578  		return;
579  	dev->dma_parms->min_align_mask = min_align_mask;
580  }
581  
582  #ifndef dma_get_cache_alignment
dma_get_cache_alignment(void)583  static inline int dma_get_cache_alignment(void)
584  {
585  #ifdef ARCH_HAS_DMA_MINALIGN
586  	return ARCH_DMA_MINALIGN;
587  #endif
588  	return 1;
589  }
590  #endif
591  
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)592  static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
593  		dma_addr_t *dma_handle, gfp_t gfp)
594  {
595  	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
596  			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
597  }
598  
dma_alloc_wc(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t gfp)599  static inline void *dma_alloc_wc(struct device *dev, size_t size,
600  				 dma_addr_t *dma_addr, gfp_t gfp)
601  {
602  	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
603  
604  	if (gfp & __GFP_NOWARN)
605  		attrs |= DMA_ATTR_NO_WARN;
606  
607  	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
608  }
609  
dma_free_wc(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr)610  static inline void dma_free_wc(struct device *dev, size_t size,
611  			       void *cpu_addr, dma_addr_t dma_addr)
612  {
613  	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
614  			      DMA_ATTR_WRITE_COMBINE);
615  }
616  
dma_mmap_wc(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)617  static inline int dma_mmap_wc(struct device *dev,
618  			      struct vm_area_struct *vma,
619  			      void *cpu_addr, dma_addr_t dma_addr,
620  			      size_t size)
621  {
622  	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
623  			      DMA_ATTR_WRITE_COMBINE);
624  }
625  
626  #ifdef CONFIG_NEED_DMA_MAP_STATE
627  #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
628  #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
629  #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
630  #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
631  #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
632  #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
633  #else
634  #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
635  #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
636  #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
637  #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
638  #define dma_unmap_len(PTR, LEN_NAME)             (0)
639  #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
640  #endif
641  
642  #endif /* _LINUX_DMA_MAPPING_H */
643