1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SWIOTLB_H
3 #define __LINUX_SWIOTLB_H
4
5 #include <linux/device.h>
6 #include <linux/dma-direction.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 #include <linux/limits.h>
10 #include <linux/spinlock.h>
11 #include <linux/workqueue.h>
12
13 struct device;
14 struct page;
15 struct scatterlist;
16
17 #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
18 #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
19 #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
20
21 /*
22 * Maximum allowable number of contiguous slabs to map,
23 * must be a power of 2. What is the appropriate value ?
24 * The complexity of {map,unmap}_single is linearly dependent on this value.
25 */
26 #define IO_TLB_SEGSIZE 128
27
28 /*
29 * log of the size of each IO TLB slab. The number of slabs is command line
30 * controllable.
31 */
32 #define IO_TLB_SHIFT 11
33 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
34
35 /* default to 64MB */
36 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
37
38 unsigned long swiotlb_size_or_default(void);
39 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
40 int (*remap)(void *tlb, unsigned long nslabs));
41 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
42 int (*remap)(void *tlb, unsigned long nslabs));
43 extern void __init swiotlb_update_mem_attributes(void);
44
45 #ifdef CONFIG_SWIOTLB
46
47 /**
48 * struct io_tlb_pool - IO TLB memory pool descriptor
49 * @start: The start address of the swiotlb memory pool. Used to do a quick
50 * range check to see if the memory was in fact allocated by this
51 * API.
52 * @end: The end address of the swiotlb memory pool. Used to do a quick
53 * range check to see if the memory was in fact allocated by this
54 * API.
55 * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
56 * may be remapped in the memory encrypted case and store virtual
57 * address for bounce buffer operation.
58 * @nslabs: The number of IO TLB slots between @start and @end. For the
59 * default swiotlb, this can be adjusted with a boot parameter,
60 * see setup_io_tlb_npages().
61 * @late_alloc: %true if allocated using the page allocator.
62 * @nareas: Number of areas in the pool.
63 * @area_nslabs: Number of slots in each area.
64 * @areas: Array of memory area descriptors.
65 * @slots: Array of slot descriptors.
66 * @node: Member of the IO TLB memory pool list.
67 * @rcu: RCU head for swiotlb_dyn_free().
68 * @transient: %true if transient memory pool.
69 */
70 struct io_tlb_pool {
71 phys_addr_t start;
72 phys_addr_t end;
73 void *vaddr;
74 unsigned long nslabs;
75 bool late_alloc;
76 unsigned int nareas;
77 unsigned int area_nslabs;
78 struct io_tlb_area *areas;
79 struct io_tlb_slot *slots;
80 #ifdef CONFIG_SWIOTLB_DYNAMIC
81 struct list_head node;
82 struct rcu_head rcu;
83 bool transient;
84 #endif
85 };
86
87 /**
88 * struct io_tlb_mem - Software IO TLB allocator
89 * @defpool: Default (initial) IO TLB memory pool descriptor.
90 * @pool: IO TLB memory pool descriptor (if not dynamic).
91 * @nslabs: Total number of IO TLB slabs in all pools.
92 * @debugfs: The dentry to debugfs.
93 * @force_bounce: %true if swiotlb bouncing is forced
94 * @for_alloc: %true if the pool is used for memory allocation
95 * @can_grow: %true if more pools can be allocated dynamically.
96 * @phys_limit: Maximum allowed physical address.
97 * @lock: Lock to synchronize changes to the list.
98 * @pools: List of IO TLB memory pool descriptors (if dynamic).
99 * @dyn_alloc: Dynamic IO TLB pool allocation work.
100 * @total_used: The total number of slots in the pool that are currently used
101 * across all areas. Used only for calculating used_hiwater in
102 * debugfs.
103 * @used_hiwater: The high water mark for total_used. Used only for reporting
104 * in debugfs.
105 * @transient_nslabs: The total number of slots in all transient pools that
106 * are currently used across all areas.
107 */
108 struct io_tlb_mem {
109 struct io_tlb_pool defpool;
110 unsigned long nslabs;
111 struct dentry *debugfs;
112 bool force_bounce;
113 bool for_alloc;
114 #ifdef CONFIG_SWIOTLB_DYNAMIC
115 bool can_grow;
116 u64 phys_limit;
117 spinlock_t lock;
118 struct list_head pools;
119 struct work_struct dyn_alloc;
120 #endif
121 #ifdef CONFIG_DEBUG_FS
122 atomic_long_t total_used;
123 atomic_long_t used_hiwater;
124 atomic_long_t transient_nslabs;
125 #endif
126 };
127
128 struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
129
130 /**
131 * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs
132 * @dev: Device which has mapped the buffer.
133 * @paddr: Physical address within the DMA buffer.
134 *
135 * Find the swiotlb pool that @paddr points into.
136 *
137 * Return:
138 * * pool address if @paddr points into a bounce buffer
139 * * NULL if @paddr does not point into a bounce buffer. As such, this function
140 * can be used to determine if @paddr denotes a swiotlb bounce buffer.
141 */
swiotlb_find_pool(struct device * dev,phys_addr_t paddr)142 static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
143 phys_addr_t paddr)
144 {
145 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
146
147 if (!mem)
148 return NULL;
149
150 #ifdef CONFIG_SWIOTLB_DYNAMIC
151 /*
152 * All SWIOTLB buffer addresses must have been returned by
153 * swiotlb_tbl_map_single() and passed to a device driver.
154 * If a SWIOTLB address is checked on another CPU, then it was
155 * presumably loaded by the device driver from an unspecified private
156 * data structure. Make sure that this load is ordered before reading
157 * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool().
158 *
159 * This barrier pairs with smp_mb() in swiotlb_find_slots().
160 */
161 smp_rmb();
162 if (READ_ONCE(dev->dma_uses_io_tlb))
163 return __swiotlb_find_pool(dev, paddr);
164 #else
165 if (paddr >= mem->defpool.start && paddr < mem->defpool.end)
166 return &mem->defpool;
167 #endif
168
169 return NULL;
170 }
171
is_swiotlb_force_bounce(struct device * dev)172 static inline bool is_swiotlb_force_bounce(struct device *dev)
173 {
174 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
175
176 return mem && mem->force_bounce;
177 }
178
179 void swiotlb_init(bool addressing_limited, unsigned int flags);
180 void __init swiotlb_exit(void);
181 void swiotlb_dev_init(struct device *dev);
182 size_t swiotlb_max_mapping_size(struct device *dev);
183 bool is_swiotlb_allocated(void);
184 bool is_swiotlb_active(struct device *dev);
185 void __init swiotlb_adjust_size(unsigned long size);
186 phys_addr_t default_swiotlb_base(void);
187 phys_addr_t default_swiotlb_limit(void);
188 #else
swiotlb_init(bool addressing_limited,unsigned int flags)189 static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
190 {
191 }
192
swiotlb_dev_init(struct device * dev)193 static inline void swiotlb_dev_init(struct device *dev)
194 {
195 }
196
swiotlb_find_pool(struct device * dev,phys_addr_t paddr)197 static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
198 phys_addr_t paddr)
199 {
200 return NULL;
201 }
is_swiotlb_force_bounce(struct device * dev)202 static inline bool is_swiotlb_force_bounce(struct device *dev)
203 {
204 return false;
205 }
swiotlb_exit(void)206 static inline void swiotlb_exit(void)
207 {
208 }
swiotlb_max_mapping_size(struct device * dev)209 static inline size_t swiotlb_max_mapping_size(struct device *dev)
210 {
211 return SIZE_MAX;
212 }
213
is_swiotlb_allocated(void)214 static inline bool is_swiotlb_allocated(void)
215 {
216 return false;
217 }
218
is_swiotlb_active(struct device * dev)219 static inline bool is_swiotlb_active(struct device *dev)
220 {
221 return false;
222 }
223
swiotlb_adjust_size(unsigned long size)224 static inline void swiotlb_adjust_size(unsigned long size)
225 {
226 }
227
default_swiotlb_base(void)228 static inline phys_addr_t default_swiotlb_base(void)
229 {
230 return 0;
231 }
232
default_swiotlb_limit(void)233 static inline phys_addr_t default_swiotlb_limit(void)
234 {
235 return 0;
236 }
237 #endif /* CONFIG_SWIOTLB */
238
239 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
240 size_t mapping_size, unsigned int alloc_aligned_mask,
241 enum dma_data_direction dir, unsigned long attrs);
242 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
243 size_t size, enum dma_data_direction dir, unsigned long attrs);
244
245 void __swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
246 size_t mapping_size, enum dma_data_direction dir,
247 unsigned long attrs, struct io_tlb_pool *pool);
swiotlb_tbl_unmap_single(struct device * dev,phys_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)248 static inline void swiotlb_tbl_unmap_single(struct device *dev,
249 phys_addr_t addr, size_t size, enum dma_data_direction dir,
250 unsigned long attrs)
251 {
252 struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
253
254 if (unlikely(pool))
255 __swiotlb_tbl_unmap_single(dev, addr, size, dir, attrs, pool);
256 }
257
258 void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
259 size_t size, enum dma_data_direction dir,
260 struct io_tlb_pool *pool);
swiotlb_sync_single_for_device(struct device * dev,phys_addr_t addr,size_t size,enum dma_data_direction dir)261 static inline void swiotlb_sync_single_for_device(struct device *dev,
262 phys_addr_t addr, size_t size, enum dma_data_direction dir)
263 {
264 struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
265
266 if (unlikely(pool))
267 __swiotlb_sync_single_for_device(dev, addr, size, dir, pool);
268 }
269
270 void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
271 size_t size, enum dma_data_direction dir,
272 struct io_tlb_pool *pool);
swiotlb_sync_single_for_cpu(struct device * dev,phys_addr_t addr,size_t size,enum dma_data_direction dir)273 static inline void swiotlb_sync_single_for_cpu(struct device *dev,
274 phys_addr_t addr, size_t size, enum dma_data_direction dir)
275 {
276 struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
277
278 if (unlikely(pool))
279 __swiotlb_sync_single_for_cpu(dev, addr, size, dir, pool);
280 }
281
282 extern void swiotlb_print_info(void);
283
284 #ifdef CONFIG_DMA_RESTRICTED_POOL
285 struct page *swiotlb_alloc(struct device *dev, size_t size);
286 bool swiotlb_free(struct device *dev, struct page *page, size_t size);
287
is_swiotlb_for_alloc(struct device * dev)288 static inline bool is_swiotlb_for_alloc(struct device *dev)
289 {
290 return dev->dma_io_tlb_mem->for_alloc;
291 }
292 #else
swiotlb_alloc(struct device * dev,size_t size)293 static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
294 {
295 return NULL;
296 }
swiotlb_free(struct device * dev,struct page * page,size_t size)297 static inline bool swiotlb_free(struct device *dev, struct page *page,
298 size_t size)
299 {
300 return false;
301 }
is_swiotlb_for_alloc(struct device * dev)302 static inline bool is_swiotlb_for_alloc(struct device *dev)
303 {
304 return false;
305 }
306 #endif /* CONFIG_DMA_RESTRICTED_POOL */
307
308 #endif /* __LINUX_SWIOTLB_H */
309