xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * QCA driver framework (QDF) memory management APIs
23  */
24 
25 #if !defined(__QDF_MEMORY_H)
26 #define __QDF_MEMORY_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <i_qdf_mem.h>
31 #include <i_qdf_trace.h>
32 #include <qdf_atomic.h>
33 
34 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
35 
36 /**
37  * qdf_align() - align to the given size.
38  * @a: input that needs to be aligned.
39  * @align_size: boundary on which 'a' has to be aligned.
40  *
41  * Return: aligned value.
42  */
43 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
44 #define qdf_page_size __page_size
45 
46 /**
47  * struct qdf_mem_dma_page_t - Allocated dmaable page
48  * @page_v_addr_start: Page start virtual address
49  * @page_v_addr_end: Page end virtual address
50  * @page_p_addr: Page start physical address
51  */
52 struct qdf_mem_dma_page_t {
53 	char *page_v_addr_start;
54 	char *page_v_addr_end;
55 	qdf_dma_addr_t page_p_addr;
56 };
57 
58 /**
59  * struct qdf_mem_multi_page_t - multiple page allocation information storage
60  * @num_element_per_page: Number of element in single page
61  * @num_pages: Number of allocation needed pages
62  * @dma_pages: page information storage in case of coherent memory
63  * @cacheable_pages: page information storage in case of cacheable memory
64  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
65  */
66 struct qdf_mem_multi_page_t {
67 	uint16_t num_element_per_page;
68 	uint16_t num_pages;
69 	struct qdf_mem_dma_page_t *dma_pages;
70 	void **cacheable_pages;
71 	qdf_size_t page_size;
72 #ifdef DP_MEM_PRE_ALLOC
73 	uint8_t is_mem_prealloc;
74 #endif
75 };
76 
77 
78 /* Preprocessor definitions and constants */
79 
80 typedef __qdf_mempool_t qdf_mempool_t;
81 
82 /**
83  * qdf_mem_init() - Initialize QDF memory module
84  *
85  * Return: None
86  *
87  */
88 void qdf_mem_init(void);
89 
90 /**
91  * qdf_mem_exit() - Exit QDF memory module
92  *
93  * Return: None
94  *
95  */
96 void qdf_mem_exit(void);
97 
98 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
99 #define qdf_untracked_mem_malloc(size) \
100 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
101 
102 #define qdf_untracked_mem_free(ptr) \
103 	__qdf_untracked_mem_free(ptr)
104 #endif
105 
106 #define QDF_MEM_FUNC_NAME_SIZE 48
107 
108 #ifdef MEMORY_DEBUG
109 /**
110  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
111  *
112  * Return: value of mem_debug_disabled qdf module argument
113  */
114 bool qdf_mem_debug_config_get(void);
115 
116 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
117 /**
118  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
119  * @str_value: value of the module param
120  *
121  * This function will set qdf module param mem_debug_disabled
122  *
123  * Return: QDF_STATUS_SUCCESS on Success
124  */
125 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
126 #endif
127 
128 /**
129  * qdf_mem_malloc_atomic_debug() - debug version of QDF memory allocation API
130  * @size: Number of bytes of memory to allocate.
131  * @func: Function name of the call site
132  * @line: Line number of the call site
133  * @caller: Address of the caller function
134  *
135  * This function will dynamicallly allocate the specified number of bytes of
136  * memory and add it to the qdf tracking list to check for memory leaks and
137  * corruptions
138  *
139  * Return: A valid memory location on success, or NULL on failure
140  */
141 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
142 				  uint32_t line, void *caller);
143 
144 /**
145  * qdf_mem_malloc_atomic_debug_fl() - allocation QDF memory atomically
146  * @size: Number of bytes of memory to allocate.
147  *
148  * This function will dynamicallly allocate the specified number of bytes of
149  * memory.
150  *
151  * Return:
152  * Upon successful allocate, returns a non-NULL pointer to the allocated
153  * memory.  If this function is unable to allocate the amount of memory
154  * specified (for any reason) it returns NULL.
155  */
156 void *qdf_mem_malloc_atomic_debug_fl(qdf_size_t size, const char *func,
157 				     uint32_t line);
158 
159 /**
160  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
161  * @size: Number of bytes of memory to allocate.
162  * @func: Function name of the call site
163  * @line: Line number of the call site
164  * @caller: Address of the caller function
165  * @flag: GFP flag
166  *
167  * This function will dynamicallly allocate the specified number of bytes of
168  * memory and add it to the qdf tracking list to check for memory leaks and
169  * corruptions
170  *
171  * Return: A valid memory location on success, or NULL on failure
172  */
173 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
174 			   void *caller, uint32_t flag);
175 
176 #define qdf_mem_malloc(size) \
177 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
178 
179 #define qdf_mem_malloc_fl(size, func, line) \
180 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
181 
182 #define qdf_mem_malloc_atomic(size) \
183 	qdf_mem_malloc_atomic_debug(size, __func__, __LINE__, QDF_RET_IP)
184 
185 /**
186  * qdf_mem_free_debug() - debug version of qdf_mem_free
187  * @ptr: Pointer to the starting address of the memory to be freed.
188  *
189  * This function will free the memory pointed to by 'ptr'. It also checks for
190  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
191  *
192  * Return: none
193  */
194 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
195 
196 #define qdf_mem_free(ptr) \
197 	qdf_mem_free_debug(ptr, __func__, __LINE__)
198 
199 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
200 				     struct qdf_mem_multi_page_t *pages,
201 				     size_t element_size, uint32_t element_num,
202 				     qdf_dma_context_t memctxt, bool cacheable,
203 				     const char *func, uint32_t line,
204 				     void *caller);
205 
206 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
207 				  memctxt, cacheable) \
208 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
209 					element_num, memctxt, cacheable, \
210 					__func__, __LINE__, QDF_RET_IP)
211 
212 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
213 				    struct qdf_mem_multi_page_t *pages,
214 				    qdf_dma_context_t memctxt, bool cacheable,
215 				    const char *func, uint32_t line);
216 
217 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
218 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
219 				       __func__, __LINE__)
220 
221 /**
222  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
223  *
224  * Call this to ensure there are no active memory allocations being tracked
225  * against the current debug domain. For example, one should call this function
226  * immediately before a call to qdf_debug_domain_set() as a memory leak
227  * detection mechanism.
228  *
229  * e.g.
230  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
231  *
232  *	...
233  *
234  *	// memory is allocated and freed
235  *
236  *	...
237  *
238  *	// before transitioning back to inactive state,
239  *	// make sure all active memory has been freed
240  *	qdf_mem_check_for_leaks();
241  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
242  *
243  *	...
244  *
245  *	// also, before program exit, make sure init time memory is freed
246  *	qdf_mem_check_for_leaks();
247  *	exit();
248  *
249  * Return: None
250  */
251 void qdf_mem_check_for_leaks(void);
252 
253 /**
254  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
255  * @osdev: OS device handle
256  * @dev: Pointer to device handle
257  * @size: Size to be allocated
258  * @paddr: Physical address
259  * @func: Function name of the call site
260  * @line: line numbe rof the call site
261  * @caller: Address of the caller function
262  *
263  * Return: pointer of allocated memory or null if memory alloc fails
264  */
265 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
266 				     qdf_size_t size, qdf_dma_addr_t *paddr,
267 				     const char *func, uint32_t line,
268 				     void *caller);
269 
270 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
271 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
272 				       __func__, __LINE__, QDF_RET_IP)
273 
274 /**
275  * qdf_mem_free_consistent_debug() - free consistent qdf memory
276  * @osdev: OS device handle
277  * @size: Size to be allocated
278  * @vaddr: virtual address
279  * @paddr: Physical address
280  * @memctx: Pointer to DMA context
281  * @func: Function name of the call site
282  * @line: line numbe rof the call site
283  *
284  * Return: none
285  */
286 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
287 				   qdf_size_t size, void *vaddr,
288 				   qdf_dma_addr_t paddr,
289 				   qdf_dma_context_t memctx,
290 				   const char *func, uint32_t line);
291 
292 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
293 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
294 				  __func__, __LINE__)
295 #else
296 static inline bool qdf_mem_debug_config_get(void)
297 {
298 	return false;
299 }
300 
301 static inline
302 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
303 {
304 	return QDF_STATUS_SUCCESS;
305 }
306 
307 /**
308  * qdf_mem_malloc() - allocation QDF memory
309  * @size: Number of bytes of memory to allocate.
310  *
311  * This function will dynamicallly allocate the specified number of bytes of
312  * memory.
313  *
314  * Return:
315  * Upon successful allocate, returns a non-NULL pointer to the allocated
316  * memory.  If this function is unable to allocate the amount of memory
317  * specified (for any reason) it returns NULL.
318  */
319 #define qdf_mem_malloc(size) \
320 	__qdf_mem_malloc(size, __func__, __LINE__)
321 
322 #define qdf_mem_malloc_fl(size, func, line) \
323 	__qdf_mem_malloc(size, func, line)
324 
325 /**
326  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
327  * @size: Number of bytes of memory to allocate.
328  *
329  * This function will dynamicallly allocate the specified number of bytes of
330  * memory.
331  *
332  * Return:
333  * Upon successful allocate, returns a non-NULL pointer to the allocated
334  * memory.  If this function is unable to allocate the amount of memory
335  * specified (for any reason) it returns NULL.
336  */
337 #define qdf_mem_malloc_atomic(size) \
338 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
339 
340 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
341 			       const char *func,
342 			       uint32_t line);
343 
344 #define qdf_mem_free(ptr) \
345 	__qdf_mem_free(ptr)
346 
347 static inline void qdf_mem_check_for_leaks(void) { }
348 
349 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
350 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
351 
352 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
353 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
354 
355 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
356 			       struct qdf_mem_multi_page_t *pages,
357 			       size_t element_size, uint32_t element_num,
358 			       qdf_dma_context_t memctxt, bool cacheable);
359 
360 void qdf_mem_multi_pages_free(qdf_device_t osdev,
361 			      struct qdf_mem_multi_page_t *pages,
362 			      qdf_dma_context_t memctxt, bool cacheable);
363 
364 #endif /* MEMORY_DEBUG */
365 
366 /**
367  * qdf_mem_malloc_flags: Get mem allocation flags
368  *
369  * Return the flag to be use for memory allocation
370  * based on the context
371  *
372  * Returns: Based on the context, returns the GFP flag
373  * for memory alloaction
374  */
375 int qdf_mem_malloc_flags(void);
376 
377 /**
378  * qdf_prealloc_disabled_config_get() - Get the user configuration of
379  *                                      prealloc_disabled
380  *
381  * Return: value of prealloc_disabled qdf module argument
382  */
383 bool qdf_prealloc_disabled_config_get(void);
384 
385 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
386 /**
387  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
388  * @str_value: value of the module param
389  *
390  * This function will set qdf module param prealloc_disabled
391  *
392  * Return: QDF_STATUS_SUCCESS on Success
393  */
394 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
395 #endif
396 
397 /**
398  * qdf_mem_multi_pages_zero() - zero out each page memory
399  * @pages: Multi page information storage
400  * @cacheable: Coherent memory or cacheable memory
401  *
402  * This function will zero out each page memory
403  *
404  * Return: None
405  */
406 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
407 			      bool cacheable);
408 
409 /**
410  * qdf_aligned_malloc() - allocates aligned QDF memory.
411  * @size: Size to be allocated
412  * @vaddr_unaligned: Unaligned virtual address.
413  * @paddr_unaligned: Unaligned physical address.
414  * @paddr_aligned: Aligned physical address.
415  * @align: Base address alignment.
416  * @func: Function name of the call site.
417  * @line: Line number of the call site.
418  *
419  * This function will dynamically allocate the specified number of bytes of
420  * memory. Checks if the allocated base address is aligned with base_align.
421  * If not, it frees the allocated memory, adds base_align to alloc size and
422  * re-allocates the memory.
423  *
424  * Return:
425  * Upon successful allocate, returns an aligned base address of the allocated
426  * memory.  If this function is unable to allocate the amount of memory
427  * specified (for any reason) it returns NULL.
428  */
429 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
430 			   paddr_aligned, align) \
431 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
432 			   paddr_aligned, align, __func__, __LINE__)
433 
434 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
435 			    qdf_dma_addr_t *paddr_unaligned,
436 			    qdf_dma_addr_t *paddr_aligned,
437 			    uint32_t align,
438 			    const char *func, uint32_t line);
439 
440 /**
441  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
442  * @osdev: OS device handle
443  * @size: Size to be allocated
444  * @vaddr_unaligned: Unaligned virtual address.
445  * @paddr_unaligned: Unaligned physical address.
446  * @paddr_aligned: Aligned physical address.
447  * @align: Base address alignment.
448  * @func: Function name of the call site.
449  * @line: Line number of the call site.
450  *
451  * Return: pointer of allocated memory or null if memory alloc fails.
452  */
453 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
454 					 paddr_unaligned, paddr_aligned, \
455 					 align) \
456 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
457 					    paddr_unaligned, paddr_aligned, \
458 					    align, __func__, __LINE__)
459 
460 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
461 					  void **vaddr_unaligned,
462 					  qdf_dma_addr_t *paddr_unaligned,
463 					  qdf_dma_addr_t *paddr_aligned,
464 					  uint32_t align, const char *func,
465 					  uint32_t line);
466 
467 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
468 
469 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
470 
471 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
472 					   uint32_t num_bytes);
473 
474 /**
475  * qdf_mem_set() - set (fill) memory with a specified byte value.
476  * @ptr: Pointer to memory that will be set
477  * @num_bytes: Number of bytes to be set
478  * @value: Byte set in memory
479  *
480  * WARNING: parameter @num_bytes and @value are swapped comparing with
481  * standard C function "memset", please ensure correct usage of this function!
482  *
483  * Return: None
484  */
485 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
486 
487 /**
488  * qdf_mem_zero() - zero out memory
489  * @ptr: pointer to memory that will be set to zero
490  * @num_bytes: number of bytes zero
491  *
492  * This function sets the memory location to all zeros, essentially clearing
493  * the memory.
494  *
495  * Return: None
496  */
497 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
498 {
499 	qdf_mem_set(ptr, num_bytes, 0);
500 }
501 
502 /**
503  * qdf_mem_copy() - copy memory
504  * @dst_addr: Pointer to destination memory location (to copy to)
505  * @src_addr: Pointer to source memory location (to copy from)
506  * @num_bytes: Number of bytes to copy.
507  *
508  * Copy host memory from one location to another, similar to memcpy in
509  * standard C.  Note this function does not specifically handle overlapping
510  * source and destination memory locations.  Calling this function with
511  * overlapping source and destination memory locations will result in
512  * unpredictable results.  Use qdf_mem_move() if the memory locations
513  * for the source and destination are overlapping (or could be overlapping!)
514  *
515  * Return: none
516  */
517 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
518 
519 /**
520  * qdf_mem_move() - move memory
521  * @dst_addr: pointer to destination memory location (to move to)
522  * @src_addr: pointer to source memory location (to move from)
523  * @num_bytes: number of bytes to move.
524  *
525  * Move host memory from one location to another, similar to memmove in
526  * standard C.  Note this function *does* handle overlapping
527  * source and destination memory locations.
528 
529  * Return: None
530  */
531 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
532 
533 /**
534  * qdf_mem_cmp() - memory compare
535  * @left: pointer to one location in memory to compare
536  * @right: pointer to second location in memory to compare
537  * @size: the number of bytes to compare
538  *
539  * Function to compare two pieces of memory, similar to memcmp function
540  * in standard C.
541  *
542  * Return:
543  *	0 -- equal
544  *	< 0 -- *memory1 is less than *memory2
545  *	> 0 -- *memory1 is bigger than *memory2
546  */
547 int qdf_mem_cmp(const void *left, const void *right, size_t size);
548 
549 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
550 
551 /**
552  * qdf_mem_map_nbytes_single - Map memory for DMA
553  * @osdev: pomter OS device context
554  * @buf: pointer to memory to be dma mapped
555  * @dir: DMA map direction
556  * @nbytes: number of bytes to be mapped.
557  * @phy_addr: pointer to receive physical address.
558  *
559  * Return: success/failure
560  */
561 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
562 						 qdf_dma_dir_t dir, int nbytes,
563 						 qdf_dma_addr_t *phy_addr)
564 {
565 #if defined(HIF_PCI) || defined(HIF_IPCI)
566 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
567 #else
568 	return 0;
569 #endif
570 }
571 
572 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
573 					  qdf_dma_addr_t buf,
574 					  qdf_dma_dir_t dir,
575 					  int nbytes)
576 {
577 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
578 }
579 
580 /**
581  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
582  * @osdev: pomter OS device context
583  * @phy_addr: physical address of memory to be dma unmapped
584  * @dir: DMA unmap direction
585  * @nbytes: number of bytes to be unmapped.
586  *
587  * Return: none
588  */
589 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
590 					       qdf_dma_addr_t phy_addr,
591 					       qdf_dma_dir_t dir,
592 					       int nbytes)
593 {
594 #if defined(HIF_PCI) || defined(HIF_IPCI)
595 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
596 #endif
597 }
598 
599 /**
600  * qdf_mempool_init - Create and initialize memory pool
601  * @osdev: platform device object
602  * @pool_addr: address of the pool created
603  * @elem_cnt: no. of elements in pool
604  * @elem_size: size of each pool element in bytes
605  * @flags: flags
606  * Return: Handle to memory pool or NULL if allocation failed
607  */
608 static inline int qdf_mempool_init(qdf_device_t osdev,
609 				   qdf_mempool_t *pool_addr, int elem_cnt,
610 				   size_t elem_size, uint32_t flags)
611 {
612 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
613 				  flags);
614 }
615 
616 /**
617  * qdf_mempool_destroy - Destroy memory pool
618  * @osdev: platform device object
619  * @Handle: to memory pool
620  * Return: none
621  */
622 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
623 {
624 	__qdf_mempool_destroy(osdev, pool);
625 }
626 
627 /**
628  * qdf_mempool_alloc - Allocate an element memory pool
629  * @osdev: platform device object
630  * @Handle: to memory pool
631  * Return: Pointer to the allocated element or NULL if the pool is empty
632  */
633 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
634 {
635 	return (void *)__qdf_mempool_alloc(osdev, pool);
636 }
637 
638 /**
639  * qdf_mempool_free - Free a memory pool element
640  * @osdev: Platform device object
641  * @pool: Handle to memory pool
642  * @buf: Element to be freed
643  * Return: none
644  */
645 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
646 				    void *buf)
647 {
648 	__qdf_mempool_free(osdev, pool, buf);
649 }
650 
651 /**
652  * qdf_kmem_cache_create() - OS abstraction for cache creation
653  *
654  * @cache_name: Cache name
655  * @size: Size of the object to be created
656  *
657  * Return: Cache address on successful creation, else NULL
658  */
659 static inline qdf_kmem_cache_t
660 qdf_kmem_cache_create(const char *cache_name,
661 		      qdf_size_t size)
662 {
663 	return __qdf_kmem_cache_create(cache_name, size);
664 }
665 
666 /**
667  * qdf_kmem_cache_destroy() - OS abstraction for cache destructin
668  *
669  * @cache: Cache pointer
670  *
671  * Return: void
672  */
673 static inline void qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
674 {
675 	__qdf_kmem_cache_destroy(cache);
676 }
677 
678 /**
679  * qdf_kmem_cache_alloc() - Function to allocation object from a cache
680  *
681  * @cache: Cache address
682  *
683  * Return: Object from cache
684  *
685  */
686 static inline void *qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
687 {
688 	return __qdf_kmem_cache_alloc(cache);
689 }
690 
691 /**
692  * qdf_kmem_cache_free() - Function to free cache object
693  *
694  * @cache: Cache address
695  * @object: Object to be returned to cache
696  *
697  * Return: void
698  */
699 static inline void qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
700 {
701 	__qdf_kmem_cache_free(cache, node);
702 }
703 
704 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
705 					qdf_dma_addr_t bus_addr,
706 					qdf_size_t size,
707 					__dma_data_direction direction);
708 
709 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
710 					qdf_dma_addr_t bus_addr,
711 					qdf_size_t size,
712 					__dma_data_direction direction);
713 
714 int qdf_mem_multi_page_link(qdf_device_t osdev,
715 		struct qdf_mem_multi_page_t *pages,
716 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
717 
718 /**
719  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
720  * @size: number of bytes to increment by
721  *
722  * Return: None
723  */
724 void qdf_mem_kmalloc_inc(qdf_size_t size);
725 
726 /**
727  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
728  * @size: number of bytes to decrement by
729  *
730  * Return: None
731  */
732 void qdf_mem_kmalloc_dec(qdf_size_t size);
733 
734 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
735 /**
736  * qdf_mem_skb_inc() - increment total skb allocation size
737  * @size: size to be added
738  *
739  * Return: none
740  */
741 void qdf_mem_skb_inc(qdf_size_t size);
742 
743 /**
744  * qdf_mem_skb_dec() - decrement total skb allocation size
745  * @size: size to be decremented
746  *
747  * Return: none
748  */
749 void qdf_mem_skb_dec(qdf_size_t size);
750 
751 /**
752  * qdf_mem_skb_total_inc() - increment total skb allocation size
753  * in host driver in both debug and perf builds
754  * @size: size to be added
755  *
756  * Return: none
757  */
758 void qdf_mem_skb_total_inc(qdf_size_t size);
759 
760 /**
761  * qdf_mem_skb_total_dec() - decrement total skb allocation size
762  * in the host driver in debug and perf flavors
763  * @size: size to be decremented
764  *
765  * Return: none
766  */
767 void qdf_mem_skb_total_dec(qdf_size_t size);
768 
769 /**
770  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
771  * @size: size to be added
772  *
773  * Return: none
774  */
775 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
776 
777 /**
778  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
779  * @size: size to be decreased
780  *
781  * Return: none
782  */
783 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
784 
785 /**
786  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
787  * @size: size to be added
788  *
789  * Return: none
790  */
791 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
792 
793 /**
794  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
795  * @size: size to be decreased
796  *
797  * Return: none
798  */
799 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
800 
801 /**
802  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
803  *
804  * Return: none
805  */
806 void qdf_mem_dp_tx_skb_cnt_inc(void);
807 
808 /**
809  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
810  *
811  * Return: none
812  */
813 void qdf_mem_dp_tx_skb_cnt_dec(void);
814 
815 /**
816  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
817  *
818  * Return: none
819  */
820 void qdf_mem_dp_rx_skb_cnt_inc(void);
821 
822 /**
823  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
824  *
825  * Return: none
826  */
827 void qdf_mem_dp_rx_skb_cnt_dec(void);
828 #else
829 
830 static inline void qdf_mem_skb_inc(qdf_size_t size)
831 {
832 }
833 
834 static inline void qdf_mem_skb_dec(qdf_size_t size)
835 {
836 }
837 
838 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
839 {
840 }
841 
842 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
843 {
844 }
845 
846 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
847 {
848 }
849 
850 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
851 {
852 }
853 
854 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
855 {
856 }
857 
858 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
859 {
860 }
861 
862 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
863 {
864 }
865 
866 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
867 {
868 }
869 
870 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
871 {
872 }
873 
874 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
875 {
876 }
877 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
878 
879 /**
880  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
881  * @num: number of required storage
882  *
883  * Allocate mapping table for DMA memory allocation. This is needed for
884  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
885  *
886  * Return: shared memory info storage table pointer
887  */
888 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
889 {
890 	qdf_mem_info_t *mem_info_arr;
891 
892 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
893 	return mem_info_arr;
894 }
895 
896 #ifdef ENHANCED_OS_ABSTRACTION
897 /**
898  * qdf_update_mem_map_table() - Update DMA memory map info
899  * @osdev: Parent device instance
900  * @mem_info: Pointer to shared memory information
901  * @dma_addr: dma address
902  * @mem_size: memory size allocated
903  *
904  * Store DMA shared memory information
905  *
906  * Return: none
907  */
908 void qdf_update_mem_map_table(qdf_device_t osdev,
909 			      qdf_mem_info_t *mem_info,
910 			      qdf_dma_addr_t dma_addr,
911 			      uint32_t mem_size);
912 
913 /**
914  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
915  * @osdev: Parent device instance
916  * @dma_addr: DMA/IOVA address
917  *
918  * Get actual physical address from dma_addr based on SMMU enablement status.
919  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
920  * (IOVA) otherwise returns physical address. So get SMMU physical address
921  * mapping from IOVA.
922  *
923  * Return: dmaable physical address
924  */
925 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
926 					  qdf_dma_addr_t dma_addr);
927 #else
928 static inline
929 void qdf_update_mem_map_table(qdf_device_t osdev,
930 			      qdf_mem_info_t *mem_info,
931 			      qdf_dma_addr_t dma_addr,
932 			      uint32_t mem_size)
933 {
934 	if (!mem_info) {
935 		qdf_nofl_err("%s: NULL mem_info", __func__);
936 		return;
937 	}
938 
939 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
940 }
941 
942 static inline
943 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
944 					  qdf_dma_addr_t dma_addr)
945 {
946 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
947 }
948 #endif
949 
950 /**
951  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
952  * @osdev parent device instance
953  *
954  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
955  */
956 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
957 {
958 	return __qdf_mem_smmu_s1_enabled(osdev);
959 }
960 
961 /**
962  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
963  * @dev: device instance
964  * @sgt: scatter gather table pointer
965  * @cpu_addr: HLOS virtual address
966  * @dma_addr: dma address
967  * @size: allocated memory size
968  *
969  * Return: physical address
970  */
971 static inline int
972 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
973 			qdf_dma_addr_t dma_addr, size_t size)
974 {
975 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
976 }
977 
978 /**
979  * qdf_mem_free_sgtable() - Free a previously allocated sg table
980  * @sgt: the mapped sg table header
981  *
982  * Return: None
983  */
984 static inline void
985 qdf_mem_free_sgtable(struct sg_table *sgt)
986 {
987 	__qdf_os_mem_free_sgtable(sgt);
988 }
989 
990 /**
991  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
992  * @sgt: scatter gather table pointer
993  *
994  * Return: None
995  */
996 static inline void
997 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
998 {
999 	__qdf_dma_get_sgtable_dma_addr(sgt);
1000 }
1001 
1002 /**
1003  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
1004  * @osdev: Parent device instance
1005  * @mem_info: Pointer to allocated memory information
1006  *
1007  * Get dma address based on SMMU enablement status. If SMMU Stage 1
1008  * translation is enabled, DMA APIs return IO virtual address otherwise
1009  * returns physical address.
1010  *
1011  * Return: dma address
1012  */
1013 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
1014 						  qdf_mem_info_t *mem_info)
1015 {
1016 	return __qdf_mem_get_dma_addr(osdev, mem_info);
1017 }
1018 
1019 /**
1020  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
1021  * @osdev: Parent device instance
1022  * @mem_info: Pointer to allocated memory information
1023  *
1024  * Based on smmu stage 1 translation enablement, return corresponding dma
1025  * address storage pointer.
1026  *
1027  * Return: dma address storage pointer
1028  */
1029 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
1030 						       qdf_mem_info_t *mem_info)
1031 {
1032 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
1033 }
1034 
1035 
1036 /**
1037  * qdf_mem_get_dma_size() - Return DMA memory size
1038  * @osdev: parent device instance
1039  * @mem_info: Pointer to allocated memory information
1040  *
1041  * Return: DMA memory size
1042  */
1043 static inline uint32_t
1044 qdf_mem_get_dma_size(qdf_device_t osdev,
1045 		       qdf_mem_info_t *mem_info)
1046 {
1047 	return __qdf_mem_get_dma_size(osdev, mem_info);
1048 }
1049 
1050 /**
1051  * qdf_mem_set_dma_size() - Set DMA memory size
1052  * @osdev: parent device instance
1053  * @mem_info: Pointer to allocated memory information
1054  * @mem_size: memory size allocated
1055  *
1056  * Return: none
1057  */
1058 static inline void
1059 qdf_mem_set_dma_size(qdf_device_t osdev,
1060 		       qdf_mem_info_t *mem_info,
1061 		       uint32_t mem_size)
1062 {
1063 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
1064 }
1065 
1066 /**
1067  * qdf_mem_get_dma_size() - Return DMA physical address
1068  * @osdev: parent device instance
1069  * @mem_info: Pointer to allocated memory information
1070  *
1071  * Return: DMA physical address
1072  */
1073 static inline qdf_dma_addr_t
1074 qdf_mem_get_dma_pa(qdf_device_t osdev,
1075 		     qdf_mem_info_t *mem_info)
1076 {
1077 	return __qdf_mem_get_dma_pa(osdev, mem_info);
1078 }
1079 
1080 /**
1081  * qdf_mem_set_dma_size() - Set DMA physical address
1082  * @osdev: parent device instance
1083  * @mem_info: Pointer to allocated memory information
1084  * @dma_pa: DMA phsical address
1085  *
1086  * Return: none
1087  */
1088 static inline void
1089 qdf_mem_set_dma_pa(qdf_device_t osdev,
1090 		     qdf_mem_info_t *mem_info,
1091 		     qdf_dma_addr_t dma_pa)
1092 {
1093 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
1094 }
1095 
1096 /**
1097  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1098  * @osdev: parent device instance
1099  * @mem_info: Pointer to allocated memory information
1100  * @size: size to be allocated
1101  *
1102  * Allocate DMA memory which will be shared with external kernel module. This
1103  * information is needed for SMMU mapping.
1104  *
1105  * Return: 0 success
1106  */
1107 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1108 
1109 #ifdef DP_UMAC_HW_RESET_SUPPORT
1110 /**
1111  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
1112  * @ctxt: Context to be passed to the cb
1113  * @pages: Multi page information storage
1114  * @elem_size: Each element size
1115  * @elem_count: Total number of elements in the pool.
1116  * @cacheable: Coherent memory or cacheable memory
1117  * @cb: Callback to free the elements
1118  * @elem_list: elem list for delayed free
1119  *
1120  * Return: 0 on Succscc, or Error code
1121  */
1122 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
1123 			       uint32_t elem_size, uint32_t elem_count,
1124 			       uint8_t cacheable, qdf_mem_release_cb cb,
1125 			       void *elem_list);
1126 #endif
1127 
1128 /**
1129  * qdf_mem_shared_mem_free() - Free shared memory
1130  * @osdev: parent device instance
1131  * @shared_mem: shared memory information storage
1132  *
1133  * Free DMA shared memory resource
1134  *
1135  * Return: None
1136  */
1137 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1138 					   qdf_shared_mem_t *shared_mem)
1139 {
1140 	if (!shared_mem) {
1141 		qdf_nofl_err("%s: NULL shared mem struct passed",
1142 			     __func__);
1143 		return;
1144 	}
1145 
1146 	if (shared_mem->vaddr) {
1147 		qdf_mem_free_consistent(osdev, osdev->dev,
1148 					qdf_mem_get_dma_size(osdev,
1149 						&shared_mem->mem_info),
1150 					shared_mem->vaddr,
1151 					qdf_mem_get_dma_addr(osdev,
1152 						&shared_mem->mem_info),
1153 					qdf_get_dma_mem_context(shared_mem,
1154 								memctx));
1155 	}
1156 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1157 	qdf_mem_free(shared_mem);
1158 }
1159 
1160 /**
1161  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1162  * host driver
1163  *
1164  * Return: Total DMA memory allocated
1165  */
1166 int32_t qdf_dma_mem_stats_read(void);
1167 
1168 /**
1169  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1170  * in host driver
1171  *
1172  * Return: Total heap memory allocated
1173  */
1174 int32_t qdf_heap_mem_stats_read(void);
1175 
1176 /**
1177  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1178  * host driver
1179  *
1180  * Return: Total SKB memory allocated
1181  */
1182 int32_t qdf_skb_mem_stats_read(void);
1183 
1184 /**
1185  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1186  * in the host driver tracked in both debug and perf builds
1187  *
1188  * Return: Total SKB memory allocated
1189  */
1190 int32_t qdf_skb_total_mem_stats_read(void);
1191 
1192 /**
1193  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1194  * allocated in host driver. This is the high watermark for the
1195  * total SKB allocated in the host driver
1196  *
1197  * Return: None
1198  */
1199 int32_t qdf_skb_max_mem_stats_read(void);
1200 
1201 /**
1202  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1203  * which are waiting on Tx completions
1204  *
1205  * Return: Outstanding Tx desc count
1206  */
1207 int32_t qdf_mem_tx_desc_cnt_read(void);
1208 
1209 /**
1210  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1211  * descs which are waiting on Tx completions. This is the high
1212  * watermark for the pending desc count
1213  *
1214  * Return: Max outstanding Tx desc count
1215  */
1216 int32_t qdf_mem_tx_desc_max_read(void);
1217 
1218 /**
1219  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1220  * creating the sysfs node
1221  *
1222  * Return: None
1223  */
1224 void qdf_mem_stats_init(void);
1225 
1226 /**
1227  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1228  * allocated for Tx data path
1229  *
1230  * Return: Tx SKB memory allocated
1231  */
1232 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1233 
1234 /**
1235  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1236  * allocated for Rx data path
1237  *
1238  * Return: Rx SKB memory allocated
1239  */
1240 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1241 
1242 /**
1243  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1244  * watermark for the SKB memory allocated for Tx data path
1245  *
1246  * Return: Max Tx SKB memory allocated
1247  */
1248 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1249 
1250 /**
1251  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1252  * watermark for the SKB memory allocated for Rx data path
1253  *
1254  * Return: Max Rx SKB memory allocated
1255  */
1256 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1257 
1258 /**
1259  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1260  * allocated in the Tx data path by the host driver or
1261  * buffers coming from the n/w stack
1262  *
1263  * Return: Number of DP Tx buffers allocated
1264  */
1265 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1266 
1267 /**
1268  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1269  * buffers allocated in the Tx data path
1270  *
1271  * Return: Max number of DP Tx buffers allocated
1272  */
1273 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1274 
1275 /**
1276  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1277  * allocated in the Rx data path
1278  *
1279  * Return: Number of DP Rx buffers allocated
1280  */
1281 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1282 
1283 /**
1284  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1285  * buffers allocated in the Rx data path
1286  *
1287  * Return: Max number of DP Rx buffers allocated
1288  */
1289 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1290 
1291 /**
1292  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1293  * count and the high watermark for pending tx desc count
1294  *
1295  * @pending_tx_descs: outstanding Tx desc count
1296  * @tx_descs_max: high watermark for outstanding Tx desc count
1297  *
1298  * Return: None
1299  */
1300 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1301 				int32_t tx_descs_max);
1302 
1303 /**
1304  * qdf_mem_vfree() - Free the virtual memory pointed to by ptr
1305  * @ptr: Pointer to the starting address of the memory to
1306  * be freed.
1307  *
1308  * Return: None
1309  */
1310 #define qdf_mem_vfree(ptr)   __qdf_mem_vfree(ptr)
1311 
1312 /**
1313  * qdf_mem_valloc() - Allocate virtual memory for the given
1314  * size
1315  * @size: Number of bytes of memory to be allocated
1316  *
1317  * Return: Pointer to the starting address of the allocated virtual memory
1318  */
1319 #define qdf_mem_valloc(size) __qdf_mem_valloc(size, __func__, __LINE__)
1320 
1321 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
1322 /*
1323  * typedef qdf_iommu_domain_t: Platform independent iommu domain
1324  * abstraction
1325  */
1326 typedef __qdf_iommu_domain_t qdf_iommu_domain_t;
1327 
1328 /**
1329  * qdf_iommu_domain_get_attr() - API to get iommu domain attributes
1330  * @domain: iommu domain
1331  * @attr: iommu attribute
1332  * @data: data pointer
1333  *
1334  * Return: 0 on success, else errno
1335  */
1336 int
1337 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
1338 			  enum qdf_iommu_attr attr, void *data);
1339 #endif
1340 #endif /* __QDF_MEMORY_H */
1341