xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_mem.h
21  * Linux-specific definitions for QDF memory API's
22  */
23 
24 #ifndef __I_QDF_MEM_H
25 #define __I_QDF_MEM_H
26 
27 #ifdef __KERNEL__
28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
30 #include <linux/autoconf.h>
31 #else
32 #include <generated/autoconf.h>
33 #endif
34 #endif
35 #include <linux/slab.h>
36 #include <linux/hardirq.h>
37 #include <linux/vmalloc.h>
38 #include <linux/pci.h> /* pci_alloc_consistent */
39 #include <linux/cache.h> /* L1_CACHE_BYTES */
40 
41 #define __qdf_cache_line_sz L1_CACHE_BYTES
42 #include "queue.h"
43 
44 #else
45 /*
46  * Provide dummy defs for kernel data types, functions, and enums
47  * used in this header file.
48  */
49 #define GFP_KERNEL 0
50 #define GFP_ATOMIC 0
51 #define kzalloc(size, flags) NULL
52 #define vmalloc(size)        NULL
53 #define kfree(buf)
54 #define vfree(buf)
55 #define pci_alloc_consistent(dev, size, paddr) NULL
56 #define __qdf_mempool_t void*
57 #define QDF_RET_IP NULL
58 #endif /* __KERNEL__ */
59 #include <qdf_status.h>
60 
61 #ifdef CONFIG_ARM_SMMU
62 #include <pld_common.h>
63 #ifdef ENABLE_SMMU_S1_TRANSLATION
64 #include <asm/dma-iommu.h>
65 #endif
66 #include <linux/iommu.h>
67 #endif
68 
69 #ifdef __KERNEL__
70 typedef struct mempool_elem {
71 	STAILQ_ENTRY(mempool_elem) mempool_entry;
72 } mempool_elem_t;
73 
74 /**
75  * typedef __qdf_mempool_ctxt_t - Memory pool context
76  * @pool_id: pool identifier
77  * @flags: flags
78  * @elem_size: size of each pool element in bytes
79  * @pool_mem: pool_addr address of the pool created
80  * @mem_size: Total size of the pool in bytes
81  * @free_list: free pool list
82  * @lock: spinlock object
83  * @max_elem: Maximum number of elements in tha pool
84  * @free_cnt: Number of free elements available
85  */
86 typedef struct __qdf_mempool_ctxt {
87 	int pool_id;
88 	u_int32_t flags;
89 	size_t elem_size;
90 	void *pool_mem;
91 	u_int32_t mem_size;
92 
93 	STAILQ_HEAD(, mempool_elem) free_list;
94 	spinlock_t lock;
95 	u_int32_t max_elem;
96 	u_int32_t free_cnt;
97 } __qdf_mempool_ctxt_t;
98 
99 #endif /* __KERNEL__ */
100 
101 #define __qdf_align(a, mask) ALIGN(a, mask)
102 
103 #ifdef DISABLE_MEMDEBUG_PANIC
104 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
105 	do { \
106 		/* no-op */ \
107 	} while (false)
108 #else
109 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
110 	QDF_DEBUG_PANIC(reason_fmt, ## args)
111 #endif
112 
113 /* typedef for dma_data_direction */
114 typedef enum dma_data_direction __dma_data_direction;
115 
116 /**
117  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
118  * @dir: QDF DMA data direction
119  *
120  * Return:
121  * enum dma_data_direction
122  */
123 static inline
124 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
125 {
126 	switch (qdf_dir) {
127 	case QDF_DMA_BIDIRECTIONAL:
128 		return DMA_BIDIRECTIONAL;
129 	case QDF_DMA_TO_DEVICE:
130 		return DMA_TO_DEVICE;
131 	case QDF_DMA_FROM_DEVICE:
132 		return DMA_FROM_DEVICE;
133 	default:
134 		return DMA_NONE;
135 	}
136 }
137 
138 
139 /**
140  * __qdf_mem_map_nbytes_single - Map memory for DMA
141  * @osdev: pomter OS device context
142  * @buf: pointer to memory to be dma mapped
143  * @dir: DMA map direction
144  * @nbytes: number of bytes to be mapped.
145  * @phy_addr: ponter to recive physical address.
146  *
147  * Return: success/failure
148  */
149 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
150 						  void *buf, qdf_dma_dir_t dir,
151 						  int nbytes,
152 						  qdf_dma_addr_t *phy_addr)
153 {
154 	/* assume that the OS only provides a single fragment */
155 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
156 					__qdf_dma_dir_to_os(dir));
157 	return dma_mapping_error(osdev->dev, *phy_addr) ?
158 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
159 }
160 
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
162 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
163 					    qdf_dma_addr_t buf,
164 					    qdf_dma_dir_t dir,
165 					    int nbytes)
166 {
167 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
168 }
169 #else
170 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
171 					    qdf_dma_addr_t buf,
172 					    qdf_dma_dir_t dir,
173 					    int nbytes)
174 {
175 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
176 				__qdf_dma_dir_to_os(dir));
177 }
178 #endif
179 
180 /**
181  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
182  *
183  * @osdev: pomter OS device context
184  * @phy_addr: physical address of memory to be dma unmapped
185  * @dir: DMA unmap direction
186  * @nbytes: number of bytes to be unmapped.
187  *
188  * Return - none
189  */
190 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
191 						 qdf_dma_addr_t phy_addr,
192 						 qdf_dma_dir_t dir, int nbytes)
193 {
194 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
195 				__qdf_dma_dir_to_os(dir));
196 }
197 #ifdef __KERNEL__
198 
199 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
200 
201 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
202 		       size_t pool_entry_size, u_int32_t flags);
203 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
204 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
205 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
206 #define QDF_RET_IP ((void *)_RET_IP_)
207 
208 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
209 #endif
210 
211 /**
212  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
213  * @osdev parent device instance
214  *
215  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
216  */
217 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
218 {
219 	return osdev->smmu_s1_enabled;
220 }
221 
222 #if defined(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
224 /**
225  * __qdf_dev_get_domain() - get iommu domain from osdev
226  * @osdev: parent device instance
227  *
228  * Return: iommu domain
229  */
230 static inline struct iommu_domain *
231 __qdf_dev_get_domain(qdf_device_t osdev)
232 {
233 	return osdev->domain;
234 }
235 #else
236 static inline struct iommu_domain *
237 __qdf_dev_get_domain(qdf_device_t osdev)
238 {
239 	if (osdev->iommu_mapping)
240 		return osdev->iommu_mapping->domain;
241 
242 	return NULL;
243 }
244 #endif
245 
246 /**
247  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
248  * @osdev: parent device instance
249  * @dma_addr: dma_addr
250  *
251  * Get actual physical address from dma_addr based on SMMU enablement status.
252  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
253  * (IOVA) otherwise returns physical address. So get SMMU physical address
254  * mapping from IOVA.
255  *
256  * Return: dmaable physical address
257  */
258 static inline unsigned long
259 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
260 			     qdf_dma_addr_t dma_addr)
261 {
262 	struct iommu_domain *domain;
263 
264 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
265 		domain = __qdf_dev_get_domain(osdev);
266 		if (domain)
267 			return iommu_iova_to_phys(domain, dma_addr);
268 	}
269 
270 	return dma_addr;
271 }
272 #else
273 static inline unsigned long
274 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
275 			     qdf_dma_addr_t dma_addr)
276 {
277 	return dma_addr;
278 }
279 #endif
280 
281 /**
282  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
283  * @dev: device instace
284  * @sgt: scatter gather table pointer
285  * @cpu_addr: HLOS virtual address
286  * @dma_addr: dma/iova
287  * @size: allocated memory size
288  *
289  * Return: physical address
290  */
291 static inline int
292 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
293 			     qdf_dma_addr_t dma_addr, size_t size)
294 {
295 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
296 				size);
297 }
298 
299 /**
300  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
301  * @sgt: the mapped sg table header
302  *
303  * Return: None
304  */
305 static inline void
306 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
307 {
308 	sg_free_table(sgt);
309 }
310 
311 /**
312  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
313  * @sgt: scatter gather table pointer
314  *
315  * Return: None
316  */
317 static inline void
318 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
319 {
320 	struct scatterlist *sg;
321 	int i;
322 
323 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
324 		if (!sg)
325 			break;
326 
327 		sg->dma_address = sg_phys(sg);
328 	}
329 }
330 
331 /**
332  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
333  * @osdev: parent device instance
334  * @mem_info: Pointer to allocated memory information
335  *
336  * Based on smmu stage 1 translation enablement status, return corresponding dma
337  * address from qdf_mem_info_t. If stage 1 translation enabled, return
338  * IO virtual address otherwise return physical address.
339  *
340  * Return: dma address
341  */
342 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
343 						    qdf_mem_info_t *mem_info)
344 {
345 	if (__qdf_mem_smmu_s1_enabled(osdev))
346 		return (qdf_dma_addr_t)mem_info->iova;
347 	else
348 		return (qdf_dma_addr_t)mem_info->pa;
349 }
350 
351 /**
352  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
353  * @osdev: parent device instance
354  * @mem_info: Pointer to allocated memory information
355  *
356  * Based on smmu stage 1 translation enablement status, return corresponding
357  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
358  * enabled, return pointer to IO virtual address otherwise return pointer to
359  * physical address
360  *
361  * Return: dma address storage pointer
362  */
363 static inline qdf_dma_addr_t *
364 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
365 			   qdf_mem_info_t *mem_info)
366 {
367 	if (__qdf_mem_smmu_s1_enabled(osdev))
368 		return (qdf_dma_addr_t *)(&mem_info->iova);
369 	else
370 		return (qdf_dma_addr_t *)(&mem_info->pa);
371 }
372 
373 /**
374  * __qdf_update_mem_map_table() - Update DMA memory map info
375  * @osdev: Parent device instance
376  * @mem_info: Pointer to shared memory information
377  * @dma_addr: dma address
378  * @mem_size: memory size allocated
379  *
380  * Store DMA shared memory information
381  *
382  * Return: none
383  */
384 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
385 					      qdf_mem_info_t *mem_info,
386 					      qdf_dma_addr_t dma_addr,
387 					      uint32_t mem_size)
388 {
389 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
390 	mem_info->iova = dma_addr;
391 	mem_info->size = mem_size;
392 }
393 
394 /**
395  * __qdf_mem_get_dma_size() - Return DMA memory size
396  * @osdev: parent device instance
397  * @mem_info: Pointer to allocated memory information
398  *
399  * Return: DMA memory size
400  */
401 static inline uint32_t
402 __qdf_mem_get_dma_size(qdf_device_t osdev,
403 		       qdf_mem_info_t *mem_info)
404 {
405 	return mem_info->size;
406 }
407 
408 /**
409  * __qdf_mem_set_dma_size() - Set DMA memory size
410  * @osdev: parent device instance
411  * @mem_info: Pointer to allocated memory information
412  * @mem_size: memory size allocated
413  *
414  * Return: none
415  */
416 static inline void
417 __qdf_mem_set_dma_size(qdf_device_t osdev,
418 		       qdf_mem_info_t *mem_info,
419 		       uint32_t mem_size)
420 {
421 	mem_info->size = mem_size;
422 }
423 
424 /**
425  * __qdf_mem_get_dma_size() - Return DMA physical address
426  * @osdev: parent device instance
427  * @mem_info: Pointer to allocated memory information
428  *
429  * Return: DMA physical address
430  */
431 static inline qdf_dma_addr_t
432 __qdf_mem_get_dma_pa(qdf_device_t osdev,
433 		     qdf_mem_info_t *mem_info)
434 {
435 	return mem_info->pa;
436 }
437 
438 /**
439  * __qdf_mem_set_dma_size() - Set DMA physical address
440  * @osdev: parent device instance
441  * @mem_info: Pointer to allocated memory information
442  * @dma_pa: DMA phsical address
443  *
444  * Return: none
445  */
446 static inline void
447 __qdf_mem_set_dma_pa(qdf_device_t osdev,
448 		     qdf_mem_info_t *mem_info,
449 		     qdf_dma_addr_t dma_pa)
450 {
451 	mem_info->pa = dma_pa;
452 }
453 
454 /**
455  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
456  * @osdev: OS device handle
457  * @dev: Pointer to device handle
458  * @size: Size to be allocated
459  * @paddr: Physical address
460  * @func: Function name of the call site
461  * @line: line numbe rof the call site
462  *
463  * Return: pointer of allocated memory or null if memory alloc fails
464  */
465 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
466 				 qdf_size_t size, qdf_dma_addr_t *paddr,
467 				 const char *func, uint32_t line);
468 
469 /**
470  * __qdf_mem_malloc() - allocates QDF memory
471  * @size: Number of bytes of memory to allocate.
472  *
473  * @func: Function name of the call site
474  * @line: line numbe rof the call site
475  *
476  * This function will dynamicallly allocate the specified number of bytes of
477  * memory.
478  *
479  * Return:
480  * Upon successful allocate, returns a non-NULL pointer to the allocated
481  * memory.  If this function is unable to allocate the amount of memory
482  * specified (for any reason) it returns NULL.
483  */
484 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
485 
486 /**
487  * __qdf_mem_free() - free QDF memory
488  * @ptr: Pointer to the starting address of the memory to be freed.
489  *
490  * This function will free the memory pointed to by 'ptr'.
491  * Return: None
492  */
493 void __qdf_mem_free(void *ptr);
494 
495 /**
496  * __qdf_mem_free_consistent() - free consistent qdf memory
497  * @osdev: OS device handle
498  * @dev: Pointer to device handle
499  * @size: Size to be allocated
500  * @vaddr: virtual address
501  * @paddr: Physical address
502  * @memctx: Pointer to DMA context
503  *
504  * Return: none
505  */
506 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
507 			       qdf_size_t size, void *vaddr,
508 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
509 
510 #endif /* __I_QDF_MEM_H */
511