xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_mem.h
21  * Linux-specific definitions for QDF memory API's
22  */
23 
24 #ifndef __I_QDF_MEM_H
25 #define __I_QDF_MEM_H
26 
27 #ifdef __KERNEL__
28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
30 #include <linux/autoconf.h>
31 #else
32 #include <generated/autoconf.h>
33 #endif
34 #endif
35 #include <linux/slab.h>
36 #include <linux/hardirq.h>
37 #include <linux/vmalloc.h>
38 #include <linux/pci.h> /* pci_alloc_consistent */
39 #include <linux/cache.h> /* L1_CACHE_BYTES */
40 
41 #define __qdf_cache_line_sz L1_CACHE_BYTES
42 #include "queue.h"
43 
44 #else
45 /*
46  * Provide dummy defs for kernel data types, functions, and enums
47  * used in this header file.
48  */
49 #define GFP_KERNEL 0
50 #define GFP_ATOMIC 0
51 #define kzalloc(size, flags) NULL
52 #define vmalloc(size)        NULL
53 #define kfree(buf)
54 #define vfree(buf)
55 #define pci_alloc_consistent(dev, size, paddr) NULL
56 #define __qdf_mempool_t void*
57 #define QDF_RET_IP NULL
58 #endif /* __KERNEL__ */
59 #include <qdf_status.h>
60 
61 #if IS_ENABLED(CONFIG_ARM_SMMU)
62 #include <pld_common.h>
63 #ifdef ENABLE_SMMU_S1_TRANSLATION
64 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
65 #include <asm/dma-iommu.h>
66 #endif
67 #endif
68 #include <linux/iommu.h>
69 #endif
70 
71 #ifdef __KERNEL__
72 typedef struct mempool_elem {
73 	STAILQ_ENTRY(mempool_elem) mempool_entry;
74 } mempool_elem_t;
75 
76 /**
77  * typedef __qdf_mempool_ctxt_t - Memory pool context
78  * @pool_id: pool identifier
79  * @flags: flags
80  * @elem_size: size of each pool element in bytes
81  * @pool_mem: pool_addr address of the pool created
82  * @mem_size: Total size of the pool in bytes
83  * @free_list: free pool list
84  * @lock: spinlock object
85  * @max_elem: Maximum number of elements in tha pool
86  * @free_cnt: Number of free elements available
87  */
88 typedef struct __qdf_mempool_ctxt {
89 	int pool_id;
90 	u_int32_t flags;
91 	size_t elem_size;
92 	void *pool_mem;
93 	u_int32_t mem_size;
94 
95 	STAILQ_HEAD(, mempool_elem) free_list;
96 	spinlock_t lock;
97 	u_int32_t max_elem;
98 	u_int32_t free_cnt;
99 } __qdf_mempool_ctxt_t;
100 
101 #endif /* __KERNEL__ */
102 
103 #define __page_size ((size_t)PAGE_SIZE)
104 #define __qdf_align(a, mask) ALIGN(a, mask)
105 
106 #ifdef DISABLE_MEMDEBUG_PANIC
107 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
108 	do { \
109 		/* no-op */ \
110 	} while (false)
111 #else
112 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
113 	QDF_DEBUG_PANIC(reason_fmt, ## args)
114 #endif
115 
116 /* typedef for dma_data_direction */
117 typedef enum dma_data_direction __dma_data_direction;
118 
119 /**
120  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
121  * @dir: QDF DMA data direction
122  *
123  * Return:
124  * enum dma_data_direction
125  */
126 static inline
127 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
128 {
129 	switch (qdf_dir) {
130 	case QDF_DMA_BIDIRECTIONAL:
131 		return DMA_BIDIRECTIONAL;
132 	case QDF_DMA_TO_DEVICE:
133 		return DMA_TO_DEVICE;
134 	case QDF_DMA_FROM_DEVICE:
135 		return DMA_FROM_DEVICE;
136 	default:
137 		return DMA_NONE;
138 	}
139 }
140 
141 
142 /**
143  * __qdf_mem_map_nbytes_single - Map memory for DMA
144  * @osdev: pomter OS device context
145  * @buf: pointer to memory to be dma mapped
146  * @dir: DMA map direction
147  * @nbytes: number of bytes to be mapped.
148  * @phy_addr: ponter to recive physical address.
149  *
150  * Return: success/failure
151  */
152 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
153 						  void *buf, qdf_dma_dir_t dir,
154 						  int nbytes,
155 						  qdf_dma_addr_t *phy_addr)
156 {
157 	/* assume that the OS only provides a single fragment */
158 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
159 					__qdf_dma_dir_to_os(dir));
160 	return dma_mapping_error(osdev->dev, *phy_addr) ?
161 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
162 }
163 
164 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
165 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
166 					    qdf_dma_addr_t buf,
167 					    qdf_dma_dir_t dir,
168 					    int nbytes)
169 {
170 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
171 }
172 #else
173 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
174 					    qdf_dma_addr_t buf,
175 					    qdf_dma_dir_t dir,
176 					    int nbytes)
177 {
178 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
179 				__qdf_dma_dir_to_os(dir));
180 }
181 #endif
182 
183 /**
184  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
185  *
186  * @osdev: pomter OS device context
187  * @phy_addr: physical address of memory to be dma unmapped
188  * @dir: DMA unmap direction
189  * @nbytes: number of bytes to be unmapped.
190  *
191  * Return - none
192  */
193 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
194 						 qdf_dma_addr_t phy_addr,
195 						 qdf_dma_dir_t dir, int nbytes)
196 {
197 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
198 				__qdf_dma_dir_to_os(dir));
199 }
200 #ifdef __KERNEL__
201 
202 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
203 
204 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
205 		       size_t pool_entry_size, u_int32_t flags);
206 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
207 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
208 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
209 #define QDF_RET_IP ((void *)_RET_IP_)
210 
211 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
212 #endif
213 
214 /**
215  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
216  * @osdev parent device instance
217  *
218  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
219  */
220 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
221 {
222 	return osdev->smmu_s1_enabled;
223 }
224 
225 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
226 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
227 /**
228  * __qdf_dev_get_domain() - get iommu domain from osdev
229  * @osdev: parent device instance
230  *
231  * Return: iommu domain
232  */
233 static inline struct iommu_domain *
234 __qdf_dev_get_domain(qdf_device_t osdev)
235 {
236 	return osdev->domain;
237 }
238 #else
239 static inline struct iommu_domain *
240 __qdf_dev_get_domain(qdf_device_t osdev)
241 {
242 	if (osdev->iommu_mapping)
243 		return osdev->iommu_mapping->domain;
244 
245 	return NULL;
246 }
247 #endif
248 
249 /**
250  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
251  * @osdev: parent device instance
252  * @dma_addr: dma_addr
253  *
254  * Get actual physical address from dma_addr based on SMMU enablement status.
255  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
256  * (IOVA) otherwise returns physical address. So get SMMU physical address
257  * mapping from IOVA.
258  *
259  * Return: dmaable physical address
260  */
261 static inline unsigned long
262 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
263 			     qdf_dma_addr_t dma_addr)
264 {
265 	struct iommu_domain *domain;
266 
267 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
268 		domain = __qdf_dev_get_domain(osdev);
269 		if (domain)
270 			return iommu_iova_to_phys(domain, dma_addr);
271 	}
272 
273 	return dma_addr;
274 }
275 #else
276 static inline unsigned long
277 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
278 			     qdf_dma_addr_t dma_addr)
279 {
280 	return dma_addr;
281 }
282 #endif
283 
284 /**
285  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
286  * @dev: device instace
287  * @sgt: scatter gather table pointer
288  * @cpu_addr: HLOS virtual address
289  * @dma_addr: dma/iova
290  * @size: allocated memory size
291  *
292  * Return: physical address
293  */
294 static inline int
295 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
296 			     qdf_dma_addr_t dma_addr, size_t size)
297 {
298 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
299 				size);
300 }
301 
302 /**
303  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
304  * @sgt: the mapped sg table header
305  *
306  * Return: None
307  */
308 static inline void
309 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
310 {
311 	sg_free_table(sgt);
312 }
313 
314 /**
315  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
316  * @sgt: scatter gather table pointer
317  *
318  * Return: None
319  */
320 static inline void
321 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
322 {
323 	struct scatterlist *sg;
324 	int i;
325 
326 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
327 		if (!sg)
328 			break;
329 
330 		sg->dma_address = sg_phys(sg);
331 	}
332 }
333 
334 /**
335  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
336  * @osdev: parent device instance
337  * @mem_info: Pointer to allocated memory information
338  *
339  * Based on smmu stage 1 translation enablement status, return corresponding dma
340  * address from qdf_mem_info_t. If stage 1 translation enabled, return
341  * IO virtual address otherwise return physical address.
342  *
343  * Return: dma address
344  */
345 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
346 						    qdf_mem_info_t *mem_info)
347 {
348 	if (__qdf_mem_smmu_s1_enabled(osdev))
349 		return (qdf_dma_addr_t)mem_info->iova;
350 	else
351 		return (qdf_dma_addr_t)mem_info->pa;
352 }
353 
354 /**
355  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
356  * @osdev: parent device instance
357  * @mem_info: Pointer to allocated memory information
358  *
359  * Based on smmu stage 1 translation enablement status, return corresponding
360  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
361  * enabled, return pointer to IO virtual address otherwise return pointer to
362  * physical address
363  *
364  * Return: dma address storage pointer
365  */
366 static inline qdf_dma_addr_t *
367 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
368 			   qdf_mem_info_t *mem_info)
369 {
370 	if (__qdf_mem_smmu_s1_enabled(osdev))
371 		return (qdf_dma_addr_t *)(&mem_info->iova);
372 	else
373 		return (qdf_dma_addr_t *)(&mem_info->pa);
374 }
375 
376 /**
377  * __qdf_update_mem_map_table() - Update DMA memory map info
378  * @osdev: Parent device instance
379  * @mem_info: Pointer to shared memory information
380  * @dma_addr: dma address
381  * @mem_size: memory size allocated
382  *
383  * Store DMA shared memory information
384  *
385  * Return: none
386  */
387 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
388 					      qdf_mem_info_t *mem_info,
389 					      qdf_dma_addr_t dma_addr,
390 					      uint32_t mem_size)
391 {
392 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
393 	mem_info->iova = dma_addr;
394 	mem_info->size = mem_size;
395 }
396 
397 /**
398  * __qdf_mem_get_dma_size() - Return DMA memory size
399  * @osdev: parent device instance
400  * @mem_info: Pointer to allocated memory information
401  *
402  * Return: DMA memory size
403  */
404 static inline uint32_t
405 __qdf_mem_get_dma_size(qdf_device_t osdev,
406 		       qdf_mem_info_t *mem_info)
407 {
408 	return mem_info->size;
409 }
410 
411 /**
412  * __qdf_mem_set_dma_size() - Set DMA memory size
413  * @osdev: parent device instance
414  * @mem_info: Pointer to allocated memory information
415  * @mem_size: memory size allocated
416  *
417  * Return: none
418  */
419 static inline void
420 __qdf_mem_set_dma_size(qdf_device_t osdev,
421 		       qdf_mem_info_t *mem_info,
422 		       uint32_t mem_size)
423 {
424 	mem_info->size = mem_size;
425 }
426 
427 /**
428  * __qdf_mem_get_dma_size() - Return DMA physical address
429  * @osdev: parent device instance
430  * @mem_info: Pointer to allocated memory information
431  *
432  * Return: DMA physical address
433  */
434 static inline qdf_dma_addr_t
435 __qdf_mem_get_dma_pa(qdf_device_t osdev,
436 		     qdf_mem_info_t *mem_info)
437 {
438 	return mem_info->pa;
439 }
440 
441 /**
442  * __qdf_mem_set_dma_size() - Set DMA physical address
443  * @osdev: parent device instance
444  * @mem_info: Pointer to allocated memory information
445  * @dma_pa: DMA phsical address
446  *
447  * Return: none
448  */
449 static inline void
450 __qdf_mem_set_dma_pa(qdf_device_t osdev,
451 		     qdf_mem_info_t *mem_info,
452 		     qdf_dma_addr_t dma_pa)
453 {
454 	mem_info->pa = dma_pa;
455 }
456 
457 /**
458  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
459  * @osdev: OS device handle
460  * @dev: Pointer to device handle
461  * @size: Size to be allocated
462  * @paddr: Physical address
463  * @func: Function name of the call site
464  * @line: line numbe rof the call site
465  *
466  * Return: pointer of allocated memory or null if memory alloc fails
467  */
468 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
469 				 qdf_size_t size, qdf_dma_addr_t *paddr,
470 				 const char *func, uint32_t line);
471 
472 /**
473  * __qdf_mem_malloc() - allocates QDF memory
474  * @size: Number of bytes of memory to allocate.
475  *
476  * @func: Function name of the call site
477  * @line: line numbe rof the call site
478  *
479  * This function will dynamicallly allocate the specified number of bytes of
480  * memory.
481  *
482  * Return:
483  * Upon successful allocate, returns a non-NULL pointer to the allocated
484  * memory.  If this function is unable to allocate the amount of memory
485  * specified (for any reason) it returns NULL.
486  */
487 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
488 
489 /**
490  * __qdf_mem_free() - free QDF memory
491  * @ptr: Pointer to the starting address of the memory to be freed.
492  *
493  * This function will free the memory pointed to by 'ptr'.
494  * Return: None
495  */
496 void __qdf_mem_free(void *ptr);
497 
498 /**
499  * __qdf_mem_free_consistent() - free consistent qdf memory
500  * @osdev: OS device handle
501  * @dev: Pointer to device handle
502  * @size: Size to be allocated
503  * @vaddr: virtual address
504  * @paddr: Physical address
505  * @memctx: Pointer to DMA context
506  *
507  * Return: none
508  */
509 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
510 			       qdf_size_t size, void *vaddr,
511 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
512 
513 #endif /* __I_QDF_MEM_H */
514