xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_mem.h
21  * Linux-specific definitions for QDF memory API's
22  */
23 
24 #ifndef __I_QDF_MEM_H
25 #define __I_QDF_MEM_H
26 
27 #ifdef __KERNEL__
28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
30 #include <linux/autoconf.h>
31 #else
32 #include <generated/autoconf.h>
33 #endif
34 #endif
35 #include <linux/slab.h>
36 #include <linux/hardirq.h>
37 #include <linux/vmalloc.h>
38 #include <linux/pci.h> /* pci_alloc_consistent */
39 #include <linux/cache.h> /* L1_CACHE_BYTES */
40 
41 #define __qdf_cache_line_sz L1_CACHE_BYTES
42 #if defined(CONFIG_MCL)
43 #include <cds_queue.h>
44 #else
45 #include <sys/queue.h>
46 #endif
47 #else
48 /*
49  * Provide dummy defs for kernel data types, functions, and enums
50  * used in this header file.
51  */
52 #define GFP_KERNEL 0
53 #define GFP_ATOMIC 0
54 #define kzalloc(size, flags) NULL
55 #define vmalloc(size)        NULL
56 #define kfree(buf)
57 #define vfree(buf)
58 #define pci_alloc_consistent(dev, size, paddr) NULL
59 #define __qdf_mempool_t void*
60 #define QDF_RET_IP NULL
61 #endif /* __KERNEL__ */
62 #include <qdf_status.h>
63 
64 #ifdef CONFIG_ARM_SMMU
65 #include <pld_common.h>
66 #ifdef ENABLE_SMMU_S1_TRANSLATION
67 #include <asm/dma-iommu.h>
68 #endif
69 #include <linux/iommu.h>
70 #endif
71 
72 #ifdef __KERNEL__
73 typedef struct mempool_elem {
74 	STAILQ_ENTRY(mempool_elem) mempool_entry;
75 } mempool_elem_t;
76 
77 /**
78  * typedef __qdf_mempool_ctxt_t - Memory pool context
79  * @pool_id: pool identifier
80  * @flags: flags
81  * @elem_size: size of each pool element in bytes
82  * @pool_mem: pool_addr address of the pool created
83  * @mem_size: Total size of the pool in bytes
84  * @free_list: free pool list
85  * @lock: spinlock object
86  * @max_elem: Maximum number of elements in tha pool
87  * @free_cnt: Number of free elements available
88  */
89 typedef struct __qdf_mempool_ctxt {
90 	int pool_id;
91 	u_int32_t flags;
92 	size_t elem_size;
93 	void *pool_mem;
94 	u_int32_t mem_size;
95 
96 	STAILQ_HEAD(, mempool_elem) free_list;
97 	spinlock_t lock;
98 	u_int32_t max_elem;
99 	u_int32_t free_cnt;
100 } __qdf_mempool_ctxt_t;
101 
102 #endif /* __KERNEL__ */
103 
104 #define __qdf_align(a, mask) ALIGN(a, mask)
105 
106 /* typedef for dma_data_direction */
107 typedef enum dma_data_direction __dma_data_direction;
108 
109 /**
110  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
111  * @dir: QDF DMA data direction
112  *
113  * Return:
114  * enum dma_data_direction
115  */
116 static inline
117 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
118 {
119 	switch (qdf_dir) {
120 	case QDF_DMA_BIDIRECTIONAL:
121 		return DMA_BIDIRECTIONAL;
122 	case QDF_DMA_TO_DEVICE:
123 		return DMA_TO_DEVICE;
124 	case QDF_DMA_FROM_DEVICE:
125 		return DMA_FROM_DEVICE;
126 	default:
127 		return DMA_NONE;
128 	}
129 }
130 
131 
132 /**
133  * __qdf_mem_map_nbytes_single - Map memory for DMA
134  * @osdev: pomter OS device context
135  * @buf: pointer to memory to be dma mapped
136  * @dir: DMA map direction
137  * @nbytes: number of bytes to be mapped.
138  * @phy_addr: ponter to recive physical address.
139  *
140  * Return: success/failure
141  */
142 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
143 						  void *buf, qdf_dma_dir_t dir,
144 						  int nbytes,
145 						  qdf_dma_addr_t *phy_addr)
146 {
147 	/* assume that the OS only provides a single fragment */
148 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
149 					__qdf_dma_dir_to_os(dir));
150 	return dma_mapping_error(osdev->dev, *phy_addr) ?
151 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
152 }
153 
154 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
155 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
156 					    qdf_dma_addr_t buf,
157 					    qdf_dma_dir_t dir,
158 					    int nbytes)
159 {
160 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
161 }
162 #else
163 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
164 					    qdf_dma_addr_t buf,
165 					    qdf_dma_dir_t dir,
166 					    int nbytes)
167 {
168 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
169 				__qdf_dma_dir_to_os(dir));
170 }
171 #endif
172 
173 /**
174  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
175  *
176  * @osdev: pomter OS device context
177  * @phy_addr: physical address of memory to be dma unmapped
178  * @dir: DMA unmap direction
179  * @nbytes: number of bytes to be unmapped.
180  *
181  * Return - none
182  */
183 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
184 						 qdf_dma_addr_t phy_addr,
185 						 qdf_dma_dir_t dir, int nbytes)
186 {
187 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
188 				__qdf_dma_dir_to_os(dir));
189 }
190 #ifdef __KERNEL__
191 
192 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
193 
194 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
195 		       size_t pool_entry_size, u_int32_t flags);
196 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
197 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
198 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
199 #define QDF_RET_IP ((void *)_RET_IP_)
200 
201 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
202 #endif
203 
204 /**
205  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
206  * @osdev parent device instance
207  *
208  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
209  */
210 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
211 {
212 	return osdev->smmu_s1_enabled;
213 }
214 
215 #if defined(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
216 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
217 /**
218  * __qdf_dev_get_domain() - get iommu domain from osdev
219  * @osdev: parent device instance
220  *
221  * Return: iommu domain
222  */
223 static inline struct iommu_domain *
224 __qdf_dev_get_domain(qdf_device_t osdev)
225 {
226 	return osdev->domain;
227 }
228 #else
229 static inline struct iommu_domain *
230 __qdf_dev_get_domain(qdf_device_t osdev)
231 {
232 	if (osdev->iommu_mapping)
233 		return osdev->iommu_mapping->domain;
234 
235 	return NULL;
236 }
237 #endif
238 
239 /**
240  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
241  * @osdev: parent device instance
242  * @dma_addr: dma_addr
243  *
244  * Get actual physical address from dma_addr based on SMMU enablement status.
245  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
246  * (IOVA) otherwise returns physical address. So get SMMU physical address
247  * mapping from IOVA.
248  *
249  * Return: dmaable physical address
250  */
251 static inline unsigned long
252 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
253 			     qdf_dma_addr_t dma_addr)
254 {
255 	struct iommu_domain *domain;
256 
257 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
258 		domain = __qdf_dev_get_domain(osdev);
259 		if (domain)
260 			return iommu_iova_to_phys(domain, dma_addr);
261 	}
262 
263 	return dma_addr;
264 }
265 #else
266 static inline unsigned long
267 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
268 			     qdf_dma_addr_t dma_addr)
269 {
270 	return dma_addr;
271 }
272 #endif
273 
274 /**
275  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
276  * @dev: device instace
277  * @sgt: scatter gather table pointer
278  * @cpu_addr: HLOS virtual address
279  * @dma_addr: dma/iova
280  * @size: allocated memory size
281  *
282  * Return: physical address
283  */
284 static inline int
285 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
286 			     qdf_dma_addr_t dma_addr, size_t size)
287 {
288 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
289 				size);
290 }
291 
292 /**
293  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
294  * @sgt: the mapped sg table header
295  *
296  * Return: None
297  */
298 static inline void
299 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
300 {
301 	sg_free_table(sgt);
302 }
303 
304 /**
305  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
306  * @sgt: scatter gather table pointer
307  *
308  * Return: None
309  */
310 static inline void
311 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
312 {
313 	struct scatterlist *sg;
314 	int i;
315 
316 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
317 		if (!sg)
318 			break;
319 
320 		sg->dma_address = sg_phys(sg);
321 	}
322 }
323 
324 /**
325  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
326  * @osdev: parent device instance
327  * @mem_info: Pointer to allocated memory information
328  *
329  * Based on smmu stage 1 translation enablement status, return corresponding dma
330  * address from qdf_mem_info_t. If stage 1 translation enabled, return
331  * IO virtual address otherwise return physical address.
332  *
333  * Return: dma address
334  */
335 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
336 						    qdf_mem_info_t *mem_info)
337 {
338 	if (__qdf_mem_smmu_s1_enabled(osdev))
339 		return (qdf_dma_addr_t)mem_info->iova;
340 	else
341 		return (qdf_dma_addr_t)mem_info->pa;
342 }
343 
344 /**
345  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
346  * @osdev: parent device instance
347  * @mem_info: Pointer to allocated memory information
348  *
349  * Based on smmu stage 1 translation enablement status, return corresponding
350  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
351  * enabled, return pointer to IO virtual address otherwise return pointer to
352  * physical address
353  *
354  * Return: dma address storage pointer
355  */
356 static inline qdf_dma_addr_t *
357 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
358 			   qdf_mem_info_t *mem_info)
359 {
360 	if (__qdf_mem_smmu_s1_enabled(osdev))
361 		return (qdf_dma_addr_t *)(&mem_info->iova);
362 	else
363 		return (qdf_dma_addr_t *)(&mem_info->pa);
364 }
365 
366 /**
367  * __qdf_update_mem_map_table() - Update DMA memory map info
368  * @osdev: Parent device instance
369  * @mem_info: Pointer to shared memory information
370  * @dma_addr: dma address
371  * @mem_size: memory size allocated
372  *
373  * Store DMA shared memory information
374  *
375  * Return: none
376  */
377 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
378 					      qdf_mem_info_t *mem_info,
379 					      qdf_dma_addr_t dma_addr,
380 					      uint32_t mem_size)
381 {
382 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
383 	mem_info->iova = dma_addr;
384 	mem_info->size = mem_size;
385 }
386 
387 /**
388  * __qdf_mem_get_dma_size() - Return DMA memory size
389  * @osdev: parent device instance
390  * @mem_info: Pointer to allocated memory information
391  *
392  * Return: DMA memory size
393  */
394 static inline uint32_t
395 __qdf_mem_get_dma_size(qdf_device_t osdev,
396 		       qdf_mem_info_t *mem_info)
397 {
398 	return mem_info->size;
399 }
400 
401 /**
402  * __qdf_mem_set_dma_size() - Set DMA memory size
403  * @osdev: parent device instance
404  * @mem_info: Pointer to allocated memory information
405  * @mem_size: memory size allocated
406  *
407  * Return: none
408  */
409 static inline void
410 __qdf_mem_set_dma_size(qdf_device_t osdev,
411 		       qdf_mem_info_t *mem_info,
412 		       uint32_t mem_size)
413 {
414 	mem_info->size = mem_size;
415 }
416 
417 /**
418  * __qdf_mem_get_dma_size() - Return DMA physical address
419  * @osdev: parent device instance
420  * @mem_info: Pointer to allocated memory information
421  *
422  * Return: DMA physical address
423  */
424 static inline qdf_dma_addr_t
425 __qdf_mem_get_dma_pa(qdf_device_t osdev,
426 		     qdf_mem_info_t *mem_info)
427 {
428 	return mem_info->pa;
429 }
430 
431 /**
432  * __qdf_mem_set_dma_size() - Set DMA physical address
433  * @osdev: parent device instance
434  * @mem_info: Pointer to allocated memory information
435  * @dma_pa: DMA phsical address
436  *
437  * Return: none
438  */
439 static inline void
440 __qdf_mem_set_dma_pa(qdf_device_t osdev,
441 		     qdf_mem_info_t *mem_info,
442 		     qdf_dma_addr_t dma_pa)
443 {
444 	mem_info->pa = dma_pa;
445 }
446 #endif /* __I_QDF_MEM_H */
447