xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * QCA driver framework (QDF) memory management APIs
22  */
23 
24 #if !defined(__QDF_MEMORY_H)
25 #define __QDF_MEMORY_H
26 
27 /* Include Files */
28 #include <qdf_types.h>
29 #include <i_qdf_mem.h>
30 #include <i_qdf_trace.h>
31 
32 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
33 
34 /**
35  * qdf_align() - align to the given size.
36  * @a: input that needs to be aligned.
37  * @align_size: boundary on which 'a' has to be alinged.
38  *
39  * Return: aligned value.
40  */
41 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
42 
43 /**
44  * struct qdf_mem_dma_page_t - Allocated dmaable page
45  * @page_v_addr_start: Page start virtual address
46  * @page_v_addr_end: Page end virtual address
47  * @page_p_addr: Page start physical address
48  */
49 struct qdf_mem_dma_page_t {
50 	char *page_v_addr_start;
51 	char *page_v_addr_end;
52 	qdf_dma_addr_t page_p_addr;
53 };
54 
55 /**
56  * struct qdf_mem_multi_page_t - multiple page allocation information storage
57  * @num_element_per_page: Number of element in single page
58  * @num_pages: Number of allocation needed pages
59  * @dma_pages: page information storage in case of coherent memory
60  * @cacheable_pages: page information storage in case of cacheable memory
61  */
62 struct qdf_mem_multi_page_t {
63 	uint16_t num_element_per_page;
64 	uint16_t num_pages;
65 	struct qdf_mem_dma_page_t *dma_pages;
66 	void **cacheable_pages;
67 };
68 
69 
70 /* Preprocessor definitions and constants */
71 
72 typedef __qdf_mempool_t qdf_mempool_t;
73 
74 /**
75  * qdf_mem_init() - Initialize QDF memory module
76  *
77  * Return: None
78  *
79  */
80 void qdf_mem_init(void);
81 
82 /**
83  * qdf_mem_exit() - Exit QDF memory module
84  *
85  * Return: None
86  *
87  */
88 void qdf_mem_exit(void);
89 
90 #define QDF_MEM_FUNC_NAME_SIZE 48
91 
92 #ifdef MEMORY_DEBUG
93 /**
94  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
95  * @size: Number of bytes of memory to allocate.
96  * @func: Function name of the call site
97  * @line: Line number of the call site
98  * @caller: Address of the caller function
99  * @flag: GFP flag
100  *
101  * This function will dynamicallly allocate the specified number of bytes of
102  * memory and add it to the qdf tracking list to check for memory leaks and
103  * corruptions
104  *
105  * Return: A valid memory location on success, or NULL on failure
106  */
107 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
108 			   void *caller, uint32_t flag);
109 
110 #define qdf_mem_malloc(size) \
111 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
112 
113 #define qdf_mem_malloc_fl(size, func, line) \
114 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
115 
116 #define qdf_mem_malloc_atomic(size) \
117 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC)
118 /**
119  * qdf_mem_free_debug() - debug version of qdf_mem_free
120  * @ptr: Pointer to the starting address of the memory to be freed.
121  *
122  * This function will free the memory pointed to by 'ptr'. It also checks for
123  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
124  *
125  * Return: none
126  */
127 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
128 
129 #define qdf_mem_free(ptr) \
130 	qdf_mem_free_debug(ptr, __func__, __LINE__)
131 
132 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
133 				     struct qdf_mem_multi_page_t *pages,
134 				     size_t element_size, uint16_t element_num,
135 				     qdf_dma_context_t memctxt, bool cacheable,
136 				     const char *func, uint32_t line,
137 				     void *caller);
138 
139 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
140 				  memctxt, cacheable) \
141 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
142 					element_num, memctxt, cacheable, \
143 					__func__, __LINE__, QDF_RET_IP)
144 
145 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
146 				    struct qdf_mem_multi_page_t *pages,
147 				    qdf_dma_context_t memctxt, bool cacheable,
148 				    const char *func, uint32_t line);
149 
150 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
151 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
152 				       __func__, __LINE__)
153 
154 /**
155  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
156  *
157  * Call this to ensure there are no active memory allocations being tracked
158  * against the current debug domain. For example, one should call this function
159  * immediately before a call to qdf_debug_domain_set() as a memory leak
160  * detection mechanism.
161  *
162  * e.g.
163  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
164  *
165  *	...
166  *
167  *	// memory is allocated and freed
168  *
169  *	...
170  *
171  *	// before transitioning back to inactive state,
172  *	// make sure all active memory has been freed
173  *	qdf_mem_check_for_leaks();
174  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
175  *
176  *	...
177  *
178  *	// also, before program exit, make sure init time memory is freed
179  *	qdf_mem_check_for_leaks();
180  *	exit();
181  *
182  * Return: None
183  */
184 void qdf_mem_check_for_leaks(void);
185 
186 /**
187  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
188  * @osdev: OS device handle
189  * @dev: Pointer to device handle
190  * @size: Size to be allocated
191  * @paddr: Physical address
192  * @func: Function name of the call site
193  * @line: line numbe rof the call site
194  * @caller: Address of the caller function
195  *
196  * Return: pointer of allocated memory or null if memory alloc fails
197  */
198 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
199 				     qdf_size_t size, qdf_dma_addr_t *paddr,
200 				     const char *func, uint32_t line,
201 				     void *caller);
202 
203 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
204 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
205 				       __func__, __LINE__, QDF_RET_IP)
206 
207 /**
208  * qdf_mem_free_consistent_debug() - free consistent qdf memory
209  * @osdev: OS device handle
210  * @size: Size to be allocated
211  * @vaddr: virtual address
212  * @paddr: Physical address
213  * @memctx: Pointer to DMA context
214  * @func: Function name of the call site
215  * @line: line numbe rof the call site
216  *
217  * Return: none
218  */
219 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
220 				   qdf_size_t size, void *vaddr,
221 				   qdf_dma_addr_t paddr,
222 				   qdf_dma_context_t memctx,
223 				   const char *func, uint32_t line);
224 
225 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
226 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
227 				  __func__, __LINE__)
228 #else
229 
230 /**
231  * qdf_mem_malloc() - allocation QDF memory
232  * @size: Number of bytes of memory to allocate.
233  *
234  * This function will dynamicallly allocate the specified number of bytes of
235  * memory.
236  *
237  * Return:
238  * Upon successful allocate, returns a non-NULL pointer to the allocated
239  * memory.  If this function is unable to allocate the amount of memory
240  * specified (for any reason) it returns NULL.
241  */
242 #define qdf_mem_malloc(size) \
243 	qdf_mem_malloc_fl(size, __func__, __LINE__)
244 
245 void *qdf_mem_malloc_fl(qdf_size_t size, const char *func, uint32_t line);
246 
247 /**
248  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
249  * @size: Number of bytes of memory to allocate.
250  *
251  * This function will dynamicallly allocate the specified number of bytes of
252  * memory.
253  *
254  * Return:
255  * Upon successful allocate, returns a non-NULL pointer to the allocated
256  * memory.  If this function is unable to allocate the amount of memory
257  * specified (for any reason) it returns NULL.
258  */
259 #define qdf_mem_malloc_atomic(size) \
260 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
261 
262 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
263 			       const char *func,
264 			       uint32_t line);
265 
266 /**
267  * qdf_mem_free() - free QDF memory
268  * @ptr: Pointer to the starting address of the memory to be freed.
269  *
270  * Return: None
271  */
272 void qdf_mem_free(void *ptr);
273 
274 static inline void qdf_mem_check_for_leaks(void) { }
275 
276 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
277 			       qdf_size_t size, qdf_dma_addr_t *paddr);
278 
279 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
280 			     qdf_size_t size, void *vaddr,
281 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
282 
283 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
284 			       struct qdf_mem_multi_page_t *pages,
285 			       size_t element_size, uint16_t element_num,
286 			       qdf_dma_context_t memctxt, bool cacheable);
287 
288 void qdf_mem_multi_pages_free(qdf_device_t osdev,
289 			      struct qdf_mem_multi_page_t *pages,
290 			      qdf_dma_context_t memctxt, bool cacheable);
291 
292 #endif /* MEMORY_DEBUG */
293 
294 /**
295  * qdf_aligned_malloc() - allocates aligned QDF memory.
296  * @size: Size to be allocated
297  * @vaddr_unaligned: Unaligned virtual address.
298  * @paddr_unaligned: Unaligned physical address.
299  * @paddr_aligned: Aligned physical address.
300  * @align: Base address alignment.
301  * @func: Function name of the call site.
302  * @line: Line number of the call site.
303  *
304  * This function will dynamically allocate the specified number of bytes of
305  * memory. Checks if the allocated base address is aligned with base_align.
306  * If not, it frees the allocated memory, adds base_align to alloc size and
307  * re-allocates the memory.
308  *
309  * Return:
310  * Upon successful allocate, returns an aligned base address of the allocated
311  * memory.  If this function is unable to allocate the amount of memory
312  * specified (for any reason) it returns NULL.
313  */
314 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
315 			   paddr_aligned, align) \
316 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
317 			   paddr_aligned, align, __func__, __LINE__)
318 
319 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
320 			    qdf_dma_addr_t *paddr_unaligned,
321 			    qdf_dma_addr_t *paddr_aligned,
322 			    uint32_t align,
323 			    const char *func, uint32_t line);
324 
325 /**
326  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
327  * @osdev: OS device handle
328  * @size: Size to be allocated
329  * @vaddr_unaligned: Unaligned virtual address.
330  * @paddr_unaligned: Unaligned physical address.
331  * @paddr_aligned: Aligned physical address.
332  * @align: Base address alignment.
333  * @func: Function name of the call site.
334  * @line: Line number of the call site.
335  *
336  * Return: pointer of allocated memory or null if memory alloc fails.
337  */
338 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
339 					 paddr_unaligned, paddr_aligned, \
340 					 align) \
341 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
342 					    paddr_unaligned, paddr_aligned, \
343 					    align, __func__, __LINE__)
344 
345 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
346 					  void **vaddr_unaligned,
347 					  qdf_dma_addr_t *paddr_unaligned,
348 					  qdf_dma_addr_t *paddr_aligned,
349 					  uint32_t align, const char *func,
350 					  uint32_t line);
351 
352 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
353 
354 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
355 
356 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
357 					   uint32_t num_bytes);
358 
359 /**
360  * qdf_mem_set() - set (fill) memory with a specified byte value.
361  * @ptr: Pointer to memory that will be set
362  * @num_bytes: Number of bytes to be set
363  * @value: Byte set in memory
364  *
365  * WARNING: parameter @num_bytes and @value are swapped comparing with
366  * standard C function "memset", please ensure correct usage of this function!
367  *
368  * Return: None
369  */
370 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
371 
372 /**
373  * qdf_mem_zero() - zero out memory
374  * @ptr: pointer to memory that will be set to zero
375  * @num_bytes: number of bytes zero
376  *
377  * This function sets the memory location to all zeros, essentially clearing
378  * the memory.
379  *
380  * Return: None
381  */
382 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
383 {
384 	qdf_mem_set(ptr, num_bytes, 0);
385 }
386 
387 /**
388  * qdf_mem_copy() - copy memory
389  * @dst_addr: Pointer to destination memory location (to copy to)
390  * @src_addr: Pointer to source memory location (to copy from)
391  * @num_bytes: Number of bytes to copy.
392  *
393  * Copy host memory from one location to another, similar to memcpy in
394  * standard C.  Note this function does not specifically handle overlapping
395  * source and destination memory locations.  Calling this function with
396  * overlapping source and destination memory locations will result in
397  * unpredictable results.  Use qdf_mem_move() if the memory locations
398  * for the source and destination are overlapping (or could be overlapping!)
399  *
400  * Return: none
401  */
402 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
403 
404 /**
405  * qdf_mem_move() - move memory
406  * @dst_addr: pointer to destination memory location (to move to)
407  * @src_addr: pointer to source memory location (to move from)
408  * @num_bytes: number of bytes to move.
409  *
410  * Move host memory from one location to another, similar to memmove in
411  * standard C.  Note this function *does* handle overlapping
412  * source and destination memory locations.
413 
414  * Return: None
415  */
416 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
417 
418 /**
419  * qdf_mem_cmp() - memory compare
420  * @left: pointer to one location in memory to compare
421  * @right: pointer to second location in memory to compare
422  * @size: the number of bytes to compare
423  *
424  * Function to compare two pieces of memory, similar to memcmp function
425  * in standard C.
426  *
427  * Return:
428  *	0 -- equal
429  *	< 0 -- *memory1 is less than *memory2
430  *	> 0 -- *memory1 is bigger than *memory2
431  */
432 int qdf_mem_cmp(const void *left, const void *right, size_t size);
433 
434 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
435 
436 /**
437  * qdf_mem_map_nbytes_single - Map memory for DMA
438  * @osdev: pomter OS device context
439  * @buf: pointer to memory to be dma mapped
440  * @dir: DMA map direction
441  * @nbytes: number of bytes to be mapped.
442  * @phy_addr: ponter to recive physical address.
443  *
444  * Return: success/failure
445  */
446 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
447 						 qdf_dma_dir_t dir, int nbytes,
448 						 qdf_dma_addr_t *phy_addr)
449 {
450 #if defined(HIF_PCI)
451 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
452 #else
453 	return 0;
454 #endif
455 }
456 
457 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
458 					  qdf_dma_addr_t buf,
459 					  qdf_dma_dir_t dir,
460 					  int nbytes)
461 {
462 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
463 }
464 
465 /**
466  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
467  * @osdev: pomter OS device context
468  * @phy_addr: physical address of memory to be dma unmapped
469  * @dir: DMA unmap direction
470  * @nbytes: number of bytes to be unmapped.
471  *
472  * Return: none
473  */
474 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
475 					       qdf_dma_addr_t phy_addr,
476 					       qdf_dma_dir_t dir,
477 					       int nbytes)
478 {
479 #if defined(HIF_PCI)
480 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
481 #endif
482 }
483 
484 /**
485  * qdf_mempool_init - Create and initialize memory pool
486  * @osdev: platform device object
487  * @pool_addr: address of the pool created
488  * @elem_cnt: no. of elements in pool
489  * @elem_size: size of each pool element in bytes
490  * @flags: flags
491  * Return: Handle to memory pool or NULL if allocation failed
492  */
493 static inline int qdf_mempool_init(qdf_device_t osdev,
494 				   qdf_mempool_t *pool_addr, int elem_cnt,
495 				   size_t elem_size, uint32_t flags)
496 {
497 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
498 				  flags);
499 }
500 
501 /**
502  * qdf_mempool_destroy - Destroy memory pool
503  * @osdev: platform device object
504  * @Handle: to memory pool
505  * Return: none
506  */
507 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
508 {
509 	__qdf_mempool_destroy(osdev, pool);
510 }
511 
512 /**
513  * qdf_mempool_alloc - Allocate an element memory pool
514  * @osdev: platform device object
515  * @Handle: to memory pool
516  * Return: Pointer to the allocated element or NULL if the pool is empty
517  */
518 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
519 {
520 	return (void *)__qdf_mempool_alloc(osdev, pool);
521 }
522 
523 /**
524  * qdf_mempool_free - Free a memory pool element
525  * @osdev: Platform device object
526  * @pool: Handle to memory pool
527  * @buf: Element to be freed
528  * Return: none
529  */
530 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
531 				    void *buf)
532 {
533 	__qdf_mempool_free(osdev, pool, buf);
534 }
535 
536 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
537 					qdf_dma_addr_t bus_addr,
538 					qdf_size_t size,
539 					__dma_data_direction direction);
540 
541 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
542 					qdf_dma_addr_t bus_addr,
543 					qdf_size_t size,
544 					__dma_data_direction direction);
545 
546 int qdf_mem_multi_page_link(qdf_device_t osdev,
547 		struct qdf_mem_multi_page_t *pages,
548 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
549 
550 #ifdef WLAN_DEBUGFS
551 
552 /**
553  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
554  * @size: number of bytes to increment by
555  *
556  * Return: None
557  */
558 void qdf_mem_kmalloc_inc(qdf_size_t size);
559 
560 /**
561  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
562  * @size: number of bytes to decrement by
563  *
564  * Return: None
565  */
566 void qdf_mem_kmalloc_dec(qdf_size_t size);
567 
568 #else
569 
570 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) { }
571 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) { }
572 
573 #endif /* WLAN_DEBUGFS */
574 
575 /**
576  * qdf_mem_skb_inc() - increment total skb allocation size
577  * @size: size to be added
578  *
579  * Return: none
580  */
581 void qdf_mem_skb_inc(qdf_size_t size);
582 
583 /**
584  * qdf_mem_skb_dec() - decrement total skb allocation size
585  * @size: size to be decremented
586  *
587  * Return: none
588  */
589 void qdf_mem_skb_dec(qdf_size_t size);
590 
591 /**
592  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
593  * @num: number of required storage
594  *
595  * Allocate mapping table for DMA memory allocation. This is needed for
596  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
597  *
598  * Return: shared memory info storage table pointer
599  */
600 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
601 {
602 	qdf_mem_info_t *mem_info_arr;
603 
604 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
605 	return mem_info_arr;
606 }
607 
608 /**
609  * qdf_update_mem_map_table() - Update DMA memory map info
610  * @osdev: Parent device instance
611  * @mem_info: Pointer to shared memory information
612  * @dma_addr: dma address
613  * @mem_size: memory size allocated
614  *
615  * Store DMA shared memory information
616  *
617  * Return: none
618  */
619 static inline void qdf_update_mem_map_table(qdf_device_t osdev,
620 					    qdf_mem_info_t *mem_info,
621 					    qdf_dma_addr_t dma_addr,
622 					    uint32_t mem_size)
623 {
624 	if (!mem_info) {
625 		qdf_nofl_err("%s: NULL mem_info", __func__);
626 		return;
627 	}
628 
629 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
630 }
631 
632 /**
633  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
634  * @osdev parent device instance
635  *
636  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
637  */
638 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
639 {
640 	return __qdf_mem_smmu_s1_enabled(osdev);
641 }
642 
643 /**
644  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
645  * @osdev: Parent device instance
646  * @dma_addr: DMA/IOVA address
647  *
648  * Get actual physical address from dma_addr based on SMMU enablement status.
649  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
650  * (IOVA) otherwise returns physical address. So get SMMU physical address
651  * mapping from IOVA.
652  *
653  * Return: dmaable physical address
654  */
655 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
656 							qdf_dma_addr_t dma_addr)
657 {
658 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
659 }
660 
661 /**
662  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
663  * @dev: device instace
664  * @sgt: scatter gather table pointer
665  * @cpu_addr: HLOS virtual address
666  * @dma_addr: dma address
667  * @size: allocated memory size
668  *
669  * Return: physical address
670  */
671 static inline int
672 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
673 			qdf_dma_addr_t dma_addr, size_t size)
674 {
675 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
676 }
677 
678 /**
679  * qdf_mem_free_sgtable() - Free a previously allocated sg table
680  * @sgt: the mapped sg table header
681  *
682  * Return: None
683  */
684 static inline void
685 qdf_mem_free_sgtable(struct sg_table *sgt)
686 {
687 	__qdf_os_mem_free_sgtable(sgt);
688 }
689 
690 /**
691  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
692  * @sgt: scatter gather table pointer
693  *
694  * Return: None
695  */
696 static inline void
697 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
698 {
699 	__qdf_dma_get_sgtable_dma_addr(sgt);
700 }
701 
702 /**
703  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
704  * @osdev: Parent device instance
705  * @mem_info: Pointer to allocated memory information
706  *
707  * Get dma address based on SMMU enablement status. If SMMU Stage 1
708  * tranlation is enabled, DMA APIs return IO virtual address otherwise
709  * returns physical address.
710  *
711  * Return: dma address
712  */
713 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
714 						  qdf_mem_info_t *mem_info)
715 {
716 	return __qdf_mem_get_dma_addr(osdev, mem_info);
717 }
718 
719 /**
720  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
721  * @osdev: Parent device instance
722  * @mem_info: Pointer to allocated memory information
723  *
724  * Based on smmu stage 1 translation enablement, return corresponding dma
725  * address storage pointer.
726  *
727  * Return: dma address storage pointer
728  */
729 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
730 						       qdf_mem_info_t *mem_info)
731 {
732 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
733 }
734 
735 
736 /**
737  * qdf_mem_get_dma_size() - Return DMA memory size
738  * @osdev: parent device instance
739  * @mem_info: Pointer to allocated memory information
740  *
741  * Return: DMA memory size
742  */
743 static inline uint32_t
744 qdf_mem_get_dma_size(qdf_device_t osdev,
745 		       qdf_mem_info_t *mem_info)
746 {
747 	return __qdf_mem_get_dma_size(osdev, mem_info);
748 }
749 
750 /**
751  * qdf_mem_set_dma_size() - Set DMA memory size
752  * @osdev: parent device instance
753  * @mem_info: Pointer to allocated memory information
754  * @mem_size: memory size allocated
755  *
756  * Return: none
757  */
758 static inline void
759 qdf_mem_set_dma_size(qdf_device_t osdev,
760 		       qdf_mem_info_t *mem_info,
761 		       uint32_t mem_size)
762 {
763 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
764 }
765 
766 /**
767  * qdf_mem_get_dma_size() - Return DMA physical address
768  * @osdev: parent device instance
769  * @mem_info: Pointer to allocated memory information
770  *
771  * Return: DMA physical address
772  */
773 static inline qdf_dma_addr_t
774 qdf_mem_get_dma_pa(qdf_device_t osdev,
775 		     qdf_mem_info_t *mem_info)
776 {
777 	return __qdf_mem_get_dma_pa(osdev, mem_info);
778 }
779 
780 /**
781  * qdf_mem_set_dma_size() - Set DMA physical address
782  * @osdev: parent device instance
783  * @mem_info: Pointer to allocated memory information
784  * @dma_pa: DMA phsical address
785  *
786  * Return: none
787  */
788 static inline void
789 qdf_mem_set_dma_pa(qdf_device_t osdev,
790 		     qdf_mem_info_t *mem_info,
791 		     qdf_dma_addr_t dma_pa)
792 {
793 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
794 }
795 
796 /**
797  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
798  * @osdev: parent device instance
799  * @mem_info: Pointer to allocated memory information
800  * @size: size to be allocated
801  *
802  * Allocate DMA memory which will be shared with external kernel module. This
803  * information is needed for SMMU mapping.
804  *
805  * Return: 0 success
806  */
807 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
808 
809 /**
810  * qdf_mem_shared_mem_free() - Free shared memory
811  * @osdev: parent device instance
812  * @shared_mem: shared memory information storage
813  *
814  * Free DMA shared memory resource
815  *
816  * Return: None
817  */
818 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
819 					   qdf_shared_mem_t *shared_mem)
820 {
821 	if (!shared_mem) {
822 		qdf_nofl_err("%s: NULL shared mem struct passed",
823 			     __func__);
824 		return;
825 	}
826 
827 	if (shared_mem->vaddr) {
828 		qdf_mem_free_consistent(osdev, osdev->dev,
829 					qdf_mem_get_dma_size(osdev,
830 						&shared_mem->mem_info),
831 					shared_mem->vaddr,
832 					qdf_mem_get_dma_addr(osdev,
833 						&shared_mem->mem_info),
834 					qdf_get_dma_mem_context(shared_mem,
835 								memctx));
836 	}
837 	qdf_mem_free_sgtable(&shared_mem->sgtable);
838 	qdf_mem_free(shared_mem);
839 }
840 
841 #endif /* __QDF_MEMORY_H */
842