xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * QCA driver framework (QDF) memory management APIs
22  */
23 
24 #if !defined(__QDF_MEMORY_H)
25 #define __QDF_MEMORY_H
26 
27 /* Include Files */
28 #include <qdf_types.h>
29 #include <i_qdf_mem.h>
30 #include <i_qdf_trace.h>
31 
32 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
33 
34 /**
35  * qdf_align() - align to the given size.
36  * @a: input that needs to be aligned.
37  * @align_size: boundary on which 'a' has to be alinged.
38  *
39  * Return: aligned value.
40  */
41 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
42 
43 /**
44  * struct qdf_mem_dma_page_t - Allocated dmaable page
45  * @page_v_addr_start: Page start virtual address
46  * @page_v_addr_end: Page end virtual address
47  * @page_p_addr: Page start physical address
48  */
49 struct qdf_mem_dma_page_t {
50 	char *page_v_addr_start;
51 	char *page_v_addr_end;
52 	qdf_dma_addr_t page_p_addr;
53 };
54 
55 /**
56  * struct qdf_mem_multi_page_t - multiple page allocation information storage
57  * @num_element_per_page: Number of element in single page
58  * @num_pages: Number of allocation needed pages
59  * @dma_pages: page information storage in case of coherent memory
60  * @cacheable_pages: page information storage in case of cacheable memory
61  */
62 struct qdf_mem_multi_page_t {
63 	uint16_t num_element_per_page;
64 	uint16_t num_pages;
65 	struct qdf_mem_dma_page_t *dma_pages;
66 	void **cacheable_pages;
67 };
68 
69 
70 /* Preprocessor definitions and constants */
71 
72 typedef __qdf_mempool_t qdf_mempool_t;
73 
74 /**
75  * qdf_mem_init() - Initialize QDF memory module
76  *
77  * Return: None
78  *
79  */
80 void qdf_mem_init(void);
81 
82 /**
83  * qdf_mem_exit() - Exit QDF memory module
84  *
85  * Return: None
86  *
87  */
88 void qdf_mem_exit(void);
89 
90 #define QDF_MEM_FUNC_NAME_SIZE 48
91 
92 #ifdef MEMORY_DEBUG
93 /**
94  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
95  *
96  * Return: value of mem_debug_disabled qdf module argument
97  */
98 bool qdf_mem_debug_config_get(void);
99 
100 /**
101  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
102  * @size: Number of bytes of memory to allocate.
103  * @func: Function name of the call site
104  * @line: Line number of the call site
105  * @caller: Address of the caller function
106  * @flag: GFP flag
107  *
108  * This function will dynamicallly allocate the specified number of bytes of
109  * memory and add it to the qdf tracking list to check for memory leaks and
110  * corruptions
111  *
112  * Return: A valid memory location on success, or NULL on failure
113  */
114 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
115 			   void *caller, uint32_t flag);
116 
117 #define qdf_mem_malloc(size) \
118 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
119 
120 #define qdf_mem_malloc_fl(size, func, line) \
121 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
122 
123 #define qdf_mem_malloc_atomic(size) \
124 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC)
125 /**
126  * qdf_mem_free_debug() - debug version of qdf_mem_free
127  * @ptr: Pointer to the starting address of the memory to be freed.
128  *
129  * This function will free the memory pointed to by 'ptr'. It also checks for
130  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
131  *
132  * Return: none
133  */
134 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
135 
136 #define qdf_mem_free(ptr) \
137 	qdf_mem_free_debug(ptr, __func__, __LINE__)
138 
139 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
140 				     struct qdf_mem_multi_page_t *pages,
141 				     size_t element_size, uint16_t element_num,
142 				     qdf_dma_context_t memctxt, bool cacheable,
143 				     const char *func, uint32_t line,
144 				     void *caller);
145 
146 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
147 				  memctxt, cacheable) \
148 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
149 					element_num, memctxt, cacheable, \
150 					__func__, __LINE__, QDF_RET_IP)
151 
152 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
153 				    struct qdf_mem_multi_page_t *pages,
154 				    qdf_dma_context_t memctxt, bool cacheable,
155 				    const char *func, uint32_t line);
156 
157 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
158 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
159 				       __func__, __LINE__)
160 
161 /**
162  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
163  *
164  * Call this to ensure there are no active memory allocations being tracked
165  * against the current debug domain. For example, one should call this function
166  * immediately before a call to qdf_debug_domain_set() as a memory leak
167  * detection mechanism.
168  *
169  * e.g.
170  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
171  *
172  *	...
173  *
174  *	// memory is allocated and freed
175  *
176  *	...
177  *
178  *	// before transitioning back to inactive state,
179  *	// make sure all active memory has been freed
180  *	qdf_mem_check_for_leaks();
181  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
182  *
183  *	...
184  *
185  *	// also, before program exit, make sure init time memory is freed
186  *	qdf_mem_check_for_leaks();
187  *	exit();
188  *
189  * Return: None
190  */
191 void qdf_mem_check_for_leaks(void);
192 
193 /**
194  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
195  * @osdev: OS device handle
196  * @dev: Pointer to device handle
197  * @size: Size to be allocated
198  * @paddr: Physical address
199  * @func: Function name of the call site
200  * @line: line numbe rof the call site
201  * @caller: Address of the caller function
202  *
203  * Return: pointer of allocated memory or null if memory alloc fails
204  */
205 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
206 				     qdf_size_t size, qdf_dma_addr_t *paddr,
207 				     const char *func, uint32_t line,
208 				     void *caller);
209 
210 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
211 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
212 				       __func__, __LINE__, QDF_RET_IP)
213 
214 /**
215  * qdf_mem_free_consistent_debug() - free consistent qdf memory
216  * @osdev: OS device handle
217  * @size: Size to be allocated
218  * @vaddr: virtual address
219  * @paddr: Physical address
220  * @memctx: Pointer to DMA context
221  * @func: Function name of the call site
222  * @line: line numbe rof the call site
223  *
224  * Return: none
225  */
226 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
227 				   qdf_size_t size, void *vaddr,
228 				   qdf_dma_addr_t paddr,
229 				   qdf_dma_context_t memctx,
230 				   const char *func, uint32_t line);
231 
232 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
233 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
234 				  __func__, __LINE__)
235 #else
236 static inline bool qdf_mem_debug_config_get(void)
237 {
238 	return false;
239 }
240 
241 /**
242  * qdf_mem_malloc() - allocation QDF memory
243  * @size: Number of bytes of memory to allocate.
244  *
245  * This function will dynamicallly allocate the specified number of bytes of
246  * memory.
247  *
248  * Return:
249  * Upon successful allocate, returns a non-NULL pointer to the allocated
250  * memory.  If this function is unable to allocate the amount of memory
251  * specified (for any reason) it returns NULL.
252  */
253 #define qdf_mem_malloc(size) \
254 	__qdf_mem_malloc(size, __func__, __LINE__)
255 
256 #define qdf_mem_malloc_fl(size, func, line) \
257 	__qdf_mem_malloc(size, func, line)
258 
259 /**
260  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
261  * @size: Number of bytes of memory to allocate.
262  *
263  * This function will dynamicallly allocate the specified number of bytes of
264  * memory.
265  *
266  * Return:
267  * Upon successful allocate, returns a non-NULL pointer to the allocated
268  * memory.  If this function is unable to allocate the amount of memory
269  * specified (for any reason) it returns NULL.
270  */
271 #define qdf_mem_malloc_atomic(size) \
272 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
273 
274 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
275 			       const char *func,
276 			       uint32_t line);
277 
278 #define qdf_mem_free(ptr) \
279 	__qdf_mem_free(ptr)
280 
281 static inline void qdf_mem_check_for_leaks(void) { }
282 
283 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
284 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
285 
286 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
287 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
288 
289 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
290 			       struct qdf_mem_multi_page_t *pages,
291 			       size_t element_size, uint16_t element_num,
292 			       qdf_dma_context_t memctxt, bool cacheable);
293 
294 void qdf_mem_multi_pages_free(qdf_device_t osdev,
295 			      struct qdf_mem_multi_page_t *pages,
296 			      qdf_dma_context_t memctxt, bool cacheable);
297 
298 #endif /* MEMORY_DEBUG */
299 
300 /**
301  * qdf_aligned_malloc() - allocates aligned QDF memory.
302  * @size: Size to be allocated
303  * @vaddr_unaligned: Unaligned virtual address.
304  * @paddr_unaligned: Unaligned physical address.
305  * @paddr_aligned: Aligned physical address.
306  * @align: Base address alignment.
307  * @func: Function name of the call site.
308  * @line: Line number of the call site.
309  *
310  * This function will dynamically allocate the specified number of bytes of
311  * memory. Checks if the allocated base address is aligned with base_align.
312  * If not, it frees the allocated memory, adds base_align to alloc size and
313  * re-allocates the memory.
314  *
315  * Return:
316  * Upon successful allocate, returns an aligned base address of the allocated
317  * memory.  If this function is unable to allocate the amount of memory
318  * specified (for any reason) it returns NULL.
319  */
320 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
321 			   paddr_aligned, align) \
322 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
323 			   paddr_aligned, align, __func__, __LINE__)
324 
325 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
326 			    qdf_dma_addr_t *paddr_unaligned,
327 			    qdf_dma_addr_t *paddr_aligned,
328 			    uint32_t align,
329 			    const char *func, uint32_t line);
330 
331 /**
332  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
333  * @osdev: OS device handle
334  * @size: Size to be allocated
335  * @vaddr_unaligned: Unaligned virtual address.
336  * @paddr_unaligned: Unaligned physical address.
337  * @paddr_aligned: Aligned physical address.
338  * @align: Base address alignment.
339  * @func: Function name of the call site.
340  * @line: Line number of the call site.
341  *
342  * Return: pointer of allocated memory or null if memory alloc fails.
343  */
344 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
345 					 paddr_unaligned, paddr_aligned, \
346 					 align) \
347 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
348 					    paddr_unaligned, paddr_aligned, \
349 					    align, __func__, __LINE__)
350 
351 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
352 					  void **vaddr_unaligned,
353 					  qdf_dma_addr_t *paddr_unaligned,
354 					  qdf_dma_addr_t *paddr_aligned,
355 					  uint32_t align, const char *func,
356 					  uint32_t line);
357 
358 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
359 
360 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
361 
362 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
363 					   uint32_t num_bytes);
364 
365 /**
366  * qdf_mem_set() - set (fill) memory with a specified byte value.
367  * @ptr: Pointer to memory that will be set
368  * @num_bytes: Number of bytes to be set
369  * @value: Byte set in memory
370  *
371  * WARNING: parameter @num_bytes and @value are swapped comparing with
372  * standard C function "memset", please ensure correct usage of this function!
373  *
374  * Return: None
375  */
376 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
377 
378 /**
379  * qdf_mem_zero() - zero out memory
380  * @ptr: pointer to memory that will be set to zero
381  * @num_bytes: number of bytes zero
382  *
383  * This function sets the memory location to all zeros, essentially clearing
384  * the memory.
385  *
386  * Return: None
387  */
388 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
389 {
390 	qdf_mem_set(ptr, num_bytes, 0);
391 }
392 
393 /**
394  * qdf_mem_copy() - copy memory
395  * @dst_addr: Pointer to destination memory location (to copy to)
396  * @src_addr: Pointer to source memory location (to copy from)
397  * @num_bytes: Number of bytes to copy.
398  *
399  * Copy host memory from one location to another, similar to memcpy in
400  * standard C.  Note this function does not specifically handle overlapping
401  * source and destination memory locations.  Calling this function with
402  * overlapping source and destination memory locations will result in
403  * unpredictable results.  Use qdf_mem_move() if the memory locations
404  * for the source and destination are overlapping (or could be overlapping!)
405  *
406  * Return: none
407  */
408 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
409 
410 /**
411  * qdf_mem_move() - move memory
412  * @dst_addr: pointer to destination memory location (to move to)
413  * @src_addr: pointer to source memory location (to move from)
414  * @num_bytes: number of bytes to move.
415  *
416  * Move host memory from one location to another, similar to memmove in
417  * standard C.  Note this function *does* handle overlapping
418  * source and destination memory locations.
419 
420  * Return: None
421  */
422 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
423 
424 /**
425  * qdf_mem_cmp() - memory compare
426  * @left: pointer to one location in memory to compare
427  * @right: pointer to second location in memory to compare
428  * @size: the number of bytes to compare
429  *
430  * Function to compare two pieces of memory, similar to memcmp function
431  * in standard C.
432  *
433  * Return:
434  *	0 -- equal
435  *	< 0 -- *memory1 is less than *memory2
436  *	> 0 -- *memory1 is bigger than *memory2
437  */
438 int qdf_mem_cmp(const void *left, const void *right, size_t size);
439 
440 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
441 
442 /**
443  * qdf_mem_map_nbytes_single - Map memory for DMA
444  * @osdev: pomter OS device context
445  * @buf: pointer to memory to be dma mapped
446  * @dir: DMA map direction
447  * @nbytes: number of bytes to be mapped.
448  * @phy_addr: ponter to recive physical address.
449  *
450  * Return: success/failure
451  */
452 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
453 						 qdf_dma_dir_t dir, int nbytes,
454 						 qdf_dma_addr_t *phy_addr)
455 {
456 #if defined(HIF_PCI) || defined(HIF_IPCI)
457 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
458 #else
459 	return 0;
460 #endif
461 }
462 
463 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
464 					  qdf_dma_addr_t buf,
465 					  qdf_dma_dir_t dir,
466 					  int nbytes)
467 {
468 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
469 }
470 
471 /**
472  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
473  * @osdev: pomter OS device context
474  * @phy_addr: physical address of memory to be dma unmapped
475  * @dir: DMA unmap direction
476  * @nbytes: number of bytes to be unmapped.
477  *
478  * Return: none
479  */
480 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
481 					       qdf_dma_addr_t phy_addr,
482 					       qdf_dma_dir_t dir,
483 					       int nbytes)
484 {
485 #if defined(HIF_PCI) || defined(HIF_IPCI)
486 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
487 #endif
488 }
489 
490 /**
491  * qdf_mempool_init - Create and initialize memory pool
492  * @osdev: platform device object
493  * @pool_addr: address of the pool created
494  * @elem_cnt: no. of elements in pool
495  * @elem_size: size of each pool element in bytes
496  * @flags: flags
497  * Return: Handle to memory pool or NULL if allocation failed
498  */
499 static inline int qdf_mempool_init(qdf_device_t osdev,
500 				   qdf_mempool_t *pool_addr, int elem_cnt,
501 				   size_t elem_size, uint32_t flags)
502 {
503 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
504 				  flags);
505 }
506 
507 /**
508  * qdf_mempool_destroy - Destroy memory pool
509  * @osdev: platform device object
510  * @Handle: to memory pool
511  * Return: none
512  */
513 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
514 {
515 	__qdf_mempool_destroy(osdev, pool);
516 }
517 
518 /**
519  * qdf_mempool_alloc - Allocate an element memory pool
520  * @osdev: platform device object
521  * @Handle: to memory pool
522  * Return: Pointer to the allocated element or NULL if the pool is empty
523  */
524 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
525 {
526 	return (void *)__qdf_mempool_alloc(osdev, pool);
527 }
528 
529 /**
530  * qdf_mempool_free - Free a memory pool element
531  * @osdev: Platform device object
532  * @pool: Handle to memory pool
533  * @buf: Element to be freed
534  * Return: none
535  */
536 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
537 				    void *buf)
538 {
539 	__qdf_mempool_free(osdev, pool, buf);
540 }
541 
542 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
543 					qdf_dma_addr_t bus_addr,
544 					qdf_size_t size,
545 					__dma_data_direction direction);
546 
547 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
548 					qdf_dma_addr_t bus_addr,
549 					qdf_size_t size,
550 					__dma_data_direction direction);
551 
552 int qdf_mem_multi_page_link(qdf_device_t osdev,
553 		struct qdf_mem_multi_page_t *pages,
554 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
555 
556 #ifdef WLAN_DEBUGFS
557 
558 /**
559  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
560  * @size: number of bytes to increment by
561  *
562  * Return: None
563  */
564 void qdf_mem_kmalloc_inc(qdf_size_t size);
565 
566 /**
567  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
568  * @size: number of bytes to decrement by
569  *
570  * Return: None
571  */
572 void qdf_mem_kmalloc_dec(qdf_size_t size);
573 
574 #else
575 
576 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) { }
577 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) { }
578 
579 #endif /* WLAN_DEBUGFS */
580 
581 /**
582  * qdf_mem_skb_inc() - increment total skb allocation size
583  * @size: size to be added
584  *
585  * Return: none
586  */
587 void qdf_mem_skb_inc(qdf_size_t size);
588 
589 /**
590  * qdf_mem_skb_dec() - decrement total skb allocation size
591  * @size: size to be decremented
592  *
593  * Return: none
594  */
595 void qdf_mem_skb_dec(qdf_size_t size);
596 
597 /**
598  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
599  * @num: number of required storage
600  *
601  * Allocate mapping table for DMA memory allocation. This is needed for
602  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
603  *
604  * Return: shared memory info storage table pointer
605  */
606 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
607 {
608 	qdf_mem_info_t *mem_info_arr;
609 
610 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
611 	return mem_info_arr;
612 }
613 
614 /**
615  * qdf_update_mem_map_table() - Update DMA memory map info
616  * @osdev: Parent device instance
617  * @mem_info: Pointer to shared memory information
618  * @dma_addr: dma address
619  * @mem_size: memory size allocated
620  *
621  * Store DMA shared memory information
622  *
623  * Return: none
624  */
625 static inline void qdf_update_mem_map_table(qdf_device_t osdev,
626 					    qdf_mem_info_t *mem_info,
627 					    qdf_dma_addr_t dma_addr,
628 					    uint32_t mem_size)
629 {
630 	if (!mem_info) {
631 		qdf_nofl_err("%s: NULL mem_info", __func__);
632 		return;
633 	}
634 
635 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
636 }
637 
638 /**
639  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
640  * @osdev parent device instance
641  *
642  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
643  */
644 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
645 {
646 	return __qdf_mem_smmu_s1_enabled(osdev);
647 }
648 
649 /**
650  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
651  * @osdev: Parent device instance
652  * @dma_addr: DMA/IOVA address
653  *
654  * Get actual physical address from dma_addr based on SMMU enablement status.
655  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
656  * (IOVA) otherwise returns physical address. So get SMMU physical address
657  * mapping from IOVA.
658  *
659  * Return: dmaable physical address
660  */
661 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
662 							qdf_dma_addr_t dma_addr)
663 {
664 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
665 }
666 
667 /**
668  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
669  * @dev: device instace
670  * @sgt: scatter gather table pointer
671  * @cpu_addr: HLOS virtual address
672  * @dma_addr: dma address
673  * @size: allocated memory size
674  *
675  * Return: physical address
676  */
677 static inline int
678 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
679 			qdf_dma_addr_t dma_addr, size_t size)
680 {
681 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
682 }
683 
684 /**
685  * qdf_mem_free_sgtable() - Free a previously allocated sg table
686  * @sgt: the mapped sg table header
687  *
688  * Return: None
689  */
690 static inline void
691 qdf_mem_free_sgtable(struct sg_table *sgt)
692 {
693 	__qdf_os_mem_free_sgtable(sgt);
694 }
695 
696 /**
697  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
698  * @sgt: scatter gather table pointer
699  *
700  * Return: None
701  */
702 static inline void
703 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
704 {
705 	__qdf_dma_get_sgtable_dma_addr(sgt);
706 }
707 
708 /**
709  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
710  * @osdev: Parent device instance
711  * @mem_info: Pointer to allocated memory information
712  *
713  * Get dma address based on SMMU enablement status. If SMMU Stage 1
714  * tranlation is enabled, DMA APIs return IO virtual address otherwise
715  * returns physical address.
716  *
717  * Return: dma address
718  */
719 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
720 						  qdf_mem_info_t *mem_info)
721 {
722 	return __qdf_mem_get_dma_addr(osdev, mem_info);
723 }
724 
725 /**
726  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
727  * @osdev: Parent device instance
728  * @mem_info: Pointer to allocated memory information
729  *
730  * Based on smmu stage 1 translation enablement, return corresponding dma
731  * address storage pointer.
732  *
733  * Return: dma address storage pointer
734  */
735 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
736 						       qdf_mem_info_t *mem_info)
737 {
738 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
739 }
740 
741 
742 /**
743  * qdf_mem_get_dma_size() - Return DMA memory size
744  * @osdev: parent device instance
745  * @mem_info: Pointer to allocated memory information
746  *
747  * Return: DMA memory size
748  */
749 static inline uint32_t
750 qdf_mem_get_dma_size(qdf_device_t osdev,
751 		       qdf_mem_info_t *mem_info)
752 {
753 	return __qdf_mem_get_dma_size(osdev, mem_info);
754 }
755 
756 /**
757  * qdf_mem_set_dma_size() - Set DMA memory size
758  * @osdev: parent device instance
759  * @mem_info: Pointer to allocated memory information
760  * @mem_size: memory size allocated
761  *
762  * Return: none
763  */
764 static inline void
765 qdf_mem_set_dma_size(qdf_device_t osdev,
766 		       qdf_mem_info_t *mem_info,
767 		       uint32_t mem_size)
768 {
769 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
770 }
771 
772 /**
773  * qdf_mem_get_dma_size() - Return DMA physical address
774  * @osdev: parent device instance
775  * @mem_info: Pointer to allocated memory information
776  *
777  * Return: DMA physical address
778  */
779 static inline qdf_dma_addr_t
780 qdf_mem_get_dma_pa(qdf_device_t osdev,
781 		     qdf_mem_info_t *mem_info)
782 {
783 	return __qdf_mem_get_dma_pa(osdev, mem_info);
784 }
785 
786 /**
787  * qdf_mem_set_dma_size() - Set DMA physical address
788  * @osdev: parent device instance
789  * @mem_info: Pointer to allocated memory information
790  * @dma_pa: DMA phsical address
791  *
792  * Return: none
793  */
794 static inline void
795 qdf_mem_set_dma_pa(qdf_device_t osdev,
796 		     qdf_mem_info_t *mem_info,
797 		     qdf_dma_addr_t dma_pa)
798 {
799 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
800 }
801 
802 /**
803  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
804  * @osdev: parent device instance
805  * @mem_info: Pointer to allocated memory information
806  * @size: size to be allocated
807  *
808  * Allocate DMA memory which will be shared with external kernel module. This
809  * information is needed for SMMU mapping.
810  *
811  * Return: 0 success
812  */
813 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
814 
815 /**
816  * qdf_mem_shared_mem_free() - Free shared memory
817  * @osdev: parent device instance
818  * @shared_mem: shared memory information storage
819  *
820  * Free DMA shared memory resource
821  *
822  * Return: None
823  */
824 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
825 					   qdf_shared_mem_t *shared_mem)
826 {
827 	if (!shared_mem) {
828 		qdf_nofl_err("%s: NULL shared mem struct passed",
829 			     __func__);
830 		return;
831 	}
832 
833 	if (shared_mem->vaddr) {
834 		qdf_mem_free_consistent(osdev, osdev->dev,
835 					qdf_mem_get_dma_size(osdev,
836 						&shared_mem->mem_info),
837 					shared_mem->vaddr,
838 					qdf_mem_get_dma_addr(osdev,
839 						&shared_mem->mem_info),
840 					qdf_get_dma_mem_context(shared_mem,
841 								memctx));
842 	}
843 	qdf_mem_free_sgtable(&shared_mem->sgtable);
844 	qdf_mem_free(shared_mem);
845 }
846 
847 #endif /* __QDF_MEMORY_H */
848