xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 6f3a375902d676398fbb5b8710604e6236bff43f)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * QCA driver framework (QDF) memory management APIs
23  */
24 
25 #if !defined(__QDF_MEMORY_H)
26 #define __QDF_MEMORY_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <i_qdf_mem.h>
31 #include <i_qdf_trace.h>
32 #include <qdf_atomic.h>
33 
34 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
35 
36 /**
37  * qdf_align() - align to the given size.
38  * @a: input that needs to be aligned.
39  * @align_size: boundary on which 'a' has to be aligned.
40  *
41  * Return: aligned value.
42  */
43 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
44 #define qdf_page_size __page_size
45 
46 /**
47  * struct qdf_mem_dma_page_t - Allocated dmaable page
48  * @page_v_addr_start: Page start virtual address
49  * @page_v_addr_end: Page end virtual address
50  * @page_p_addr: Page start physical address
51  */
52 struct qdf_mem_dma_page_t {
53 	char *page_v_addr_start;
54 	char *page_v_addr_end;
55 	qdf_dma_addr_t page_p_addr;
56 };
57 
58 /**
59  * struct qdf_mem_multi_page_t - multiple page allocation information storage
60  * @num_element_per_page: Number of element in single page
61  * @num_pages: Number of allocation needed pages
62  * @dma_pages: page information storage in case of coherent memory
63  * @cacheable_pages: page information storage in case of cacheable memory
64  * @page_size: page size
65  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
66  */
67 struct qdf_mem_multi_page_t {
68 	uint16_t num_element_per_page;
69 	uint16_t num_pages;
70 	struct qdf_mem_dma_page_t *dma_pages;
71 	void **cacheable_pages;
72 	qdf_size_t page_size;
73 #ifdef DP_MEM_PRE_ALLOC
74 	uint8_t is_mem_prealloc;
75 #endif
76 };
77 
78 
79 /* Preprocessor definitions and constants */
80 
81 typedef __qdf_mempool_t qdf_mempool_t;
82 
83 /**
84  * qdf_mem_init() - Initialize QDF memory module
85  *
86  * Return: None
87  *
88  */
89 void qdf_mem_init(void);
90 
91 /**
92  * qdf_mem_exit() - Exit QDF memory module
93  *
94  * Return: None
95  *
96  */
97 void qdf_mem_exit(void);
98 
99 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
100 #define qdf_untracked_mem_malloc(size) \
101 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
102 
103 #define qdf_untracked_mem_free(ptr) \
104 	__qdf_untracked_mem_free(ptr)
105 #endif
106 
107 #define QDF_MEM_FUNC_NAME_SIZE 48
108 
109 #ifdef MEMORY_DEBUG
110 /**
111  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
112  *
113  * Return: value of mem_debug_disabled qdf module argument
114  */
115 bool qdf_mem_debug_config_get(void);
116 
117 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
118 /**
119  * qdf_mem_debug_disabled_config_set() - Set mem_debug_disabled
120  * @str_value: value of the module param
121  *
122  * This function will set qdf module param mem_debug_disabled
123  *
124  * Return: QDF_STATUS_SUCCESS on Success
125  */
126 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
127 #endif
128 
129 /**
130  * qdf_mem_malloc_atomic_debug() - debug version of QDF memory allocation API
131  * @size: Number of bytes of memory to allocate.
132  * @func: Function name of the call site
133  * @line: Line number of the call site
134  * @caller: Address of the caller function
135  *
136  * This function will dynamically allocate the specified number of bytes of
137  * memory and add it to the qdf tracking list to check for memory leaks and
138  * corruptions
139  *
140  * Return: A valid memory location on success, or NULL on failure
141  */
142 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
143 				  uint32_t line, void *caller);
144 
145 /**
146  * qdf_mem_malloc_atomic_debug_fl() - allocation QDF memory atomically
147  * @size: Number of bytes of memory to allocate.
148  * @func: Function name of the call site
149  * @line: Line number of the call site
150  *
151  * This function will dynamically allocate the specified number of bytes of
152  * memory.
153  *
154  * Return:
155  * Upon successful allocate, returns a non-NULL pointer to the allocated
156  * memory.  If this function is unable to allocate the amount of memory
157  * specified (for any reason) it returns NULL.
158  */
159 void *qdf_mem_malloc_atomic_debug_fl(qdf_size_t size, const char *func,
160 				     uint32_t line);
161 
162 /**
163  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
164  * @size: Number of bytes of memory to allocate.
165  * @func: Function name of the call site
166  * @line: Line number of the call site
167  * @caller: Address of the caller function
168  * @flag: GFP flag
169  *
170  * This function will dynamically allocate the specified number of bytes of
171  * memory and add it to the qdf tracking list to check for memory leaks and
172  * corruptions
173  *
174  * Return: A valid memory location on success, or NULL on failure
175  */
176 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
177 			   void *caller, uint32_t flag);
178 
179 #define qdf_mem_malloc(size) \
180 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
181 
182 #define qdf_mem_malloc_fl(size, func, line) \
183 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
184 
185 #define qdf_mem_malloc_atomic(size) \
186 	qdf_mem_malloc_atomic_debug(size, __func__, __LINE__, QDF_RET_IP)
187 
188 /**
189  * qdf_mem_free() - free allocate memory
190  * @ptr: Pointer to the starting address of the memory to be freed.
191  *
192  * This function will free the memory pointed to by 'ptr'. It also checks for
193  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
194  *
195  * Return: none
196  */
197 #define qdf_mem_free(ptr) \
198 	qdf_mem_free_debug(ptr, __func__, __LINE__)
199 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
200 
201 /**
202  * qdf_mem_multi_pages_alloc_debug() - Debug version of
203  * qdf_mem_multi_pages_alloc
204  * @osdev: OS device handle pointer
205  * @pages: Multi page information storage
206  * @element_size: Each element size
207  * @element_num: Total number of elements should be allocated
208  * @memctxt: Memory context
209  * @cacheable: Coherent memory or cacheable memory
210  * @func: Caller of this allocator
211  * @line: Line number of the caller
212  * @caller: Return address of the caller
213  *
214  * This function will allocate large size of memory over multiple pages.
215  * Large size of contiguous memory allocation will fail frequently, then
216  * instead of allocate large memory by one shot, allocate through multiple, non
217  * contiguous memory and combine pages when actual usage
218  *
219  * Return: None
220  */
221 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
222 				     struct qdf_mem_multi_page_t *pages,
223 				     size_t element_size, uint32_t element_num,
224 				     qdf_dma_context_t memctxt, bool cacheable,
225 				     const char *func, uint32_t line,
226 				     void *caller);
227 
228 /**
229  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
230  * @osdev: OS device handle pointer
231  * @pages: Multi page information storage
232  * @element_size: Each element size
233  * @element_num: Total number of elements should be allocated
234  * @memctxt: Memory context
235  * @cacheable: Coherent memory or cacheable memory
236  *
237  * This function will allocate large size of memory over multiple pages.
238  * Large size of contiguous memory allocation will fail frequently, then
239  * instead of allocate large memory by one shot, allocate through multiple, non
240  * contiguous memory and combine pages when actual usage
241  *
242  * Return: None
243  */
244 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
245 				  memctxt, cacheable) \
246 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
247 					element_num, memctxt, cacheable, \
248 					__func__, __LINE__, QDF_RET_IP)
249 
250 /**
251  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
252  * @osdev: OS device handle pointer
253  * @pages: Multi page information storage
254  * @memctxt: Memory context
255  * @cacheable: Coherent memory or cacheable memory
256  * @func: Caller of this allocator
257  * @line: Line number of the caller
258  *
259  * This function will free large size of memory over multiple pages.
260  *
261  * Return: None
262  */
263 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
264 				    struct qdf_mem_multi_page_t *pages,
265 				    qdf_dma_context_t memctxt, bool cacheable,
266 				    const char *func, uint32_t line);
267 
268 /**
269  * qdf_mem_multi_pages_free() - free large size of kernel memory
270  * @osdev: OS device handle pointer
271  * @pages: Multi page information storage
272  * @memctxt: Memory context
273  * @cacheable: Coherent memory or cacheable memory
274  *
275  * This function will free large size of memory over multiple pages.
276  *
277  * Return: None
278  */
279 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
280 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
281 				       __func__, __LINE__)
282 
283 /**
284  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
285  *
286  * Call this to ensure there are no active memory allocations being tracked
287  * against the current debug domain. For example, one should call this function
288  * immediately before a call to qdf_debug_domain_set() as a memory leak
289  * detection mechanism.
290  *
291  * e.g.
292  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
293  *
294  *	...
295  *
296  *	// memory is allocated and freed
297  *
298  *	...
299  *
300  *	// before transitioning back to inactive state,
301  *	// make sure all active memory has been freed
302  *	qdf_mem_check_for_leaks();
303  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
304  *
305  *	...
306  *
307  *	// also, before program exit, make sure init time memory is freed
308  *	qdf_mem_check_for_leaks();
309  *	exit();
310  *
311  * Return: None
312  */
313 void qdf_mem_check_for_leaks(void);
314 
315 /**
316  * qdf_mem_alloc_consistent() - allocates consistent qdf memory
317  * @osdev: OS device handle
318  * @dev: Pointer to device handle
319  * @size: Size to be allocated
320  * @paddr: Physical address
321  *
322  * Return: pointer of allocated memory or null if memory alloc fails
323  */
324 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
325 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
326 				       __func__, __LINE__, QDF_RET_IP)
327 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
328 				     qdf_size_t size, qdf_dma_addr_t *paddr,
329 				     const char *func, uint32_t line,
330 				     void *caller);
331 
332 /**
333  * qdf_mem_free_consistent() - free consistent qdf memory
334  * @osdev: OS device handle
335  * @dev: OS device
336  * @size: Size to be allocated
337  * @vaddr: virtual address
338  * @paddr: Physical address
339  * @memctx: Pointer to DMA context
340  *
341  * Return: none
342  */
343 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
344 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
345 				  __func__, __LINE__)
346 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
347 				   qdf_size_t size, void *vaddr,
348 				   qdf_dma_addr_t paddr,
349 				   qdf_dma_context_t memctx,
350 				   const char *func, uint32_t line);
351 
352 #else
353 static inline bool qdf_mem_debug_config_get(void)
354 {
355 	return false;
356 }
357 
358 static inline
359 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
360 {
361 	return QDF_STATUS_SUCCESS;
362 }
363 
364 /**
365  * qdf_mem_malloc() - allocation QDF memory
366  * @size: Number of bytes of memory to allocate.
367  *
368  * This function will dynamically allocate the specified number of bytes of
369  * memory.
370  *
371  * Return:
372  * Upon successful allocate, returns a non-NULL pointer to the allocated
373  * memory.  If this function is unable to allocate the amount of memory
374  * specified (for any reason) it returns NULL.
375  */
376 #define qdf_mem_malloc(size) \
377 	__qdf_mem_malloc(size, __func__, __LINE__)
378 
379 #define qdf_mem_malloc_fl(size, func, line) \
380 	__qdf_mem_malloc(size, func, line)
381 
382 /**
383  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
384  * @size: Number of bytes of memory to allocate.
385  *
386  * This function will dynamically allocate the specified number of bytes of
387  * memory.
388  *
389  * Return:
390  * Upon successful allocate, returns a non-NULL pointer to the allocated
391  * memory.  If this function is unable to allocate the amount of memory
392  * specified (for any reason) it returns NULL.
393  */
394 #define qdf_mem_malloc_atomic(size) \
395 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
396 
397 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
398 			       const char *func,
399 			       uint32_t line);
400 
401 #define qdf_mem_free(ptr) \
402 	__qdf_mem_free(ptr)
403 
404 static inline void qdf_mem_check_for_leaks(void) { }
405 
406 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
407 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
408 
409 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
410 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
411 
412 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
413 			       struct qdf_mem_multi_page_t *pages,
414 			       size_t element_size, uint32_t element_num,
415 			       qdf_dma_context_t memctxt, bool cacheable);
416 
417 void qdf_mem_multi_pages_free(qdf_device_t osdev,
418 			      struct qdf_mem_multi_page_t *pages,
419 			      qdf_dma_context_t memctxt, bool cacheable);
420 
421 #endif /* MEMORY_DEBUG */
422 
423 /**
424  * qdf_mem_malloc_flags: Get mem allocation flags
425  *
426  * Return the flag to be use for memory allocation
427  * based on the context
428  *
429  * Returns: Based on the context, returns the GFP flag
430  * for memory alloaction
431  */
432 int qdf_mem_malloc_flags(void);
433 
434 /**
435  * qdf_prealloc_disabled_config_get() - Get the user configuration of
436  *                                      prealloc_disabled
437  *
438  * Return: value of prealloc_disabled qdf module argument
439  */
440 bool qdf_prealloc_disabled_config_get(void);
441 
442 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
443 /**
444  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
445  * @str_value: value of the module param
446  *
447  * This function will set qdf module param prealloc_disabled
448  *
449  * Return: QDF_STATUS_SUCCESS on Success
450  */
451 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
452 #endif
453 
454 /**
455  * qdf_mem_multi_pages_zero() - zero out each page memory
456  * @pages: Multi page information storage
457  * @cacheable: Coherent memory or cacheable memory
458  *
459  * This function will zero out each page memory
460  *
461  * Return: None
462  */
463 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
464 			      bool cacheable);
465 
466 /**
467  * qdf_aligned_malloc() - allocates aligned QDF memory.
468  * @size: Size to be allocated
469  * @vaddr_unaligned: Unaligned virtual address.
470  * @paddr_unaligned: Unaligned physical address.
471  * @paddr_aligned: Aligned physical address.
472  * @align: Base address alignment.
473  *
474  * This function will dynamically allocate the specified number of bytes of
475  * memory. Checks if the allocated base address is aligned with base_align.
476  * If not, it frees the allocated memory, adds base_align to alloc size and
477  * re-allocates the memory.
478  *
479  * Return:
480  * Upon successful allocate, returns an aligned base address of the allocated
481  * memory.  If this function is unable to allocate the amount of memory
482  * specified (for any reason) it returns NULL.
483  */
484 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
485 			   paddr_aligned, align) \
486 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
487 			   paddr_aligned, align, __func__, __LINE__)
488 
489 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
490 			    qdf_dma_addr_t *paddr_unaligned,
491 			    qdf_dma_addr_t *paddr_aligned,
492 			    uint32_t align,
493 			    const char *func, uint32_t line);
494 
495 /**
496  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
497  * @osdev: OS device handle
498  * @size: Size to be allocated
499  * @vaddr_unaligned: Unaligned virtual address.
500  * @paddr_unaligned: Unaligned physical address.
501  * @paddr_aligned: Aligned physical address.
502  * @align: Base address alignment.
503  *
504  * Return: pointer of allocated memory or null if memory alloc fails.
505  */
506 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
507 					 paddr_unaligned, paddr_aligned, \
508 					 align) \
509 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
510 					    paddr_unaligned, paddr_aligned, \
511 					    align, __func__, __LINE__)
512 
513 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
514 					  void **vaddr_unaligned,
515 					  qdf_dma_addr_t *paddr_unaligned,
516 					  qdf_dma_addr_t *paddr_aligned,
517 					  uint32_t align, const char *func,
518 					  uint32_t line);
519 
520 /**
521  * qdf_mem_virt_to_phys() - Convert virtual address to physical
522  * @vaddr: virtual address
523  *
524  * Return: physical address
525  */
526 #define qdf_mem_virt_to_phys(vaddr) __qdf_mem_virt_to_phys(vaddr)
527 
528 /**
529  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
530  * @ptr: Pointer to memory that will be set
531  * @value: Byte set in memory
532  * @num_bytes: Number of bytes to be set
533  *
534  * Return: None
535  */
536 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
537 
538 /**
539  * qdf_mem_copy_toio() - copy memory
540  * @dst_addr: Pointer to destination memory location (to copy to)
541  * @src_addr: Pointer to source memory location (to copy from)
542  * @num_bytes: Number of bytes to copy.
543  *
544  * Return: none
545  */
546 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
547 					   uint32_t num_bytes);
548 
549 /**
550  * qdf_mem_set() - set (fill) memory with a specified byte value.
551  * @ptr: Pointer to memory that will be set
552  * @num_bytes: Number of bytes to be set
553  * @value: Byte set in memory
554  *
555  * WARNING: parameter @num_bytes and @value are swapped comparing with
556  * standard C function "memset", please ensure correct usage of this function!
557  *
558  * Return: None
559  */
560 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
561 
562 /**
563  * qdf_mem_zero() - zero out memory
564  * @ptr: pointer to memory that will be set to zero
565  * @num_bytes: number of bytes zero
566  *
567  * This function sets the memory location to all zeros, essentially clearing
568  * the memory.
569  *
570  * Return: None
571  */
572 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
573 {
574 	qdf_mem_set(ptr, num_bytes, 0);
575 }
576 
577 /**
578  * qdf_mem_copy() - copy memory
579  * @dst_addr: Pointer to destination memory location (to copy to)
580  * @src_addr: Pointer to source memory location (to copy from)
581  * @num_bytes: Number of bytes to copy.
582  *
583  * Copy host memory from one location to another, similar to memcpy in
584  * standard C.  Note this function does not specifically handle overlapping
585  * source and destination memory locations.  Calling this function with
586  * overlapping source and destination memory locations will result in
587  * unpredictable results.  Use qdf_mem_move() if the memory locations
588  * for the source and destination are overlapping (or could be overlapping!)
589  *
590  * Return: none
591  */
592 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
593 
594 /**
595  * qdf_mem_move() - move memory
596  * @dst_addr: pointer to destination memory location (to move to)
597  * @src_addr: pointer to source memory location (to move from)
598  * @num_bytes: number of bytes to move.
599  *
600  * Move host memory from one location to another, similar to memmove in
601  * standard C.  Note this function *does* handle overlapping
602  * source and destination memory locations.
603  *
604  * Return: None
605  */
606 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
607 
608 /**
609  * qdf_mem_cmp() - memory compare
610  * @left: pointer to one location in memory to compare
611  * @right: pointer to second location in memory to compare
612  * @size: the number of bytes to compare
613  *
614  * Function to compare two pieces of memory, similar to memcmp function
615  * in standard C.
616  *
617  * Return:
618  *	0 -- equal
619  *	< 0 -- *memory1 is less than *memory2
620  *	> 0 -- *memory1 is bigger than *memory2
621  */
622 int qdf_mem_cmp(const void *left, const void *right, size_t size);
623 
624 /**
625  * qdf_ether_addr_copy() - copy an Ethernet address
626  * @dst_addr: A six-byte array Ethernet address destination
627  * @src_addr: A six-byte array Ethernet address source
628  *
629  * Please note: dst & src must both be aligned to u16.
630  *
631  * Return: none
632  */
633 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
634 
635 /**
636  * qdf_mem_map_nbytes_single - Map memory for DMA
637  * @osdev: pomter OS device context
638  * @buf: pointer to memory to be dma mapped
639  * @dir: DMA map direction
640  * @nbytes: number of bytes to be mapped.
641  * @phy_addr: pointer to receive physical address.
642  *
643  * Return: success/failure
644  */
645 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
646 						 qdf_dma_dir_t dir, int nbytes,
647 						 qdf_dma_addr_t *phy_addr)
648 {
649 #if defined(HIF_PCI) || defined(HIF_IPCI)
650 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
651 #else
652 	return 0;
653 #endif
654 }
655 
656 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
657 					  qdf_dma_addr_t buf,
658 					  qdf_dma_dir_t dir,
659 					  int nbytes)
660 {
661 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
662 }
663 
664 /**
665  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
666  * @osdev: pomter OS device context
667  * @phy_addr: physical address of memory to be dma unmapped
668  * @dir: DMA unmap direction
669  * @nbytes: number of bytes to be unmapped.
670  *
671  * Return: none
672  */
673 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
674 					       qdf_dma_addr_t phy_addr,
675 					       qdf_dma_dir_t dir,
676 					       int nbytes)
677 {
678 #if defined(HIF_PCI) || defined(HIF_IPCI)
679 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
680 #endif
681 }
682 
683 /**
684  * qdf_mempool_init - Create and initialize memory pool
685  * @osdev: platform device object
686  * @pool_addr: address of the pool created
687  * @elem_cnt: no. of elements in pool
688  * @elem_size: size of each pool element in bytes
689  * @flags: flags
690  * Return: Handle to memory pool or NULL if allocation failed
691  */
692 static inline int qdf_mempool_init(qdf_device_t osdev,
693 				   qdf_mempool_t *pool_addr, int elem_cnt,
694 				   size_t elem_size, uint32_t flags)
695 {
696 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
697 				  flags);
698 }
699 
700 /**
701  * qdf_mempool_destroy() - Destroy memory pool
702  * @osdev: platform device object
703  * @pool: to memory pool
704  *
705  * Return: none
706  */
707 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
708 {
709 	__qdf_mempool_destroy(osdev, pool);
710 }
711 
712 /**
713  * qdf_mempool_alloc() - Allocate an element memory pool
714  * @osdev: platform device object
715  * @pool: to memory pool
716  *
717  * Return: Pointer to the allocated element or NULL if the pool is empty
718  */
719 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
720 {
721 	return (void *)__qdf_mempool_alloc(osdev, pool);
722 }
723 
724 /**
725  * qdf_mempool_free() - Free a memory pool element
726  * @osdev: Platform device object
727  * @pool: Handle to memory pool
728  * @buf: Element to be freed
729  *
730  * Return: none
731  */
732 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
733 				    void *buf)
734 {
735 	__qdf_mempool_free(osdev, pool, buf);
736 }
737 
738 /**
739  * qdf_kmem_cache_create() - OS abstraction for cache creation
740  * @cache_name: Cache name
741  * @size: Size of the object to be created
742  *
743  * Return: Cache address on successful creation, else NULL
744  */
745 static inline qdf_kmem_cache_t
746 qdf_kmem_cache_create(const char *cache_name,
747 		      qdf_size_t size)
748 {
749 	return __qdf_kmem_cache_create(cache_name, size);
750 }
751 
752 /**
753  * qdf_kmem_cache_destroy() - OS abstraction for cache destruction
754  * @cache: Cache pointer
755  *
756  * Return: void
757  */
758 static inline void qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
759 {
760 	__qdf_kmem_cache_destroy(cache);
761 }
762 
763 /**
764  * qdf_kmem_cache_alloc() - Function to allocation object from a cache
765  * @cache: Cache address
766  *
767  * Return: Object from cache
768  *
769  */
770 static inline void *qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
771 {
772 	return __qdf_kmem_cache_alloc(cache);
773 }
774 
775 /**
776  * qdf_kmem_cache_free() - Function to free cache object
777  * @cache: Cache address
778  * @node: Object to be returned to cache
779  *
780  * Return: void
781  */
782 static inline void qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
783 {
784 	__qdf_kmem_cache_free(cache, node);
785 }
786 
787 /**
788  * qdf_mem_dma_sync_single_for_device() - assign memory to device
789  * @osdev: OS device handle
790  * @bus_addr: dma address to give to the device
791  * @size: Size of the memory block
792  * @direction: direction data will be DMAed
793  *
794  * Assign memory to the remote device.
795  * The cache lines are flushed to ram or invalidated as needed.
796  *
797  * Return: none
798  */
799 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
800 					qdf_dma_addr_t bus_addr,
801 					qdf_size_t size,
802 					__dma_data_direction direction);
803 
804 /**
805  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
806  * @osdev: OS device handle
807  * @bus_addr: dma address to give to the cpu
808  * @size: Size of the memory block
809  * @direction: direction data will be DMAed
810  *
811  * Assign memory to the CPU.
812  *
813  * Return: none
814  */
815 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
816 					qdf_dma_addr_t bus_addr,
817 					qdf_size_t size,
818 					__dma_data_direction direction);
819 
820 /**
821  * qdf_mem_multi_page_link() - Make links for multi page elements
822  * @osdev: OS device handle pointer
823  * @pages: Multi page information storage
824  * @elem_size: Single element size
825  * @elem_count: elements count should be linked
826  * @cacheable: Coherent memory or cacheable memory
827  *
828  * This function will make links for multi page allocated structure
829  *
830  * Return: 0 success
831  */
832 int qdf_mem_multi_page_link(qdf_device_t osdev,
833 			    struct qdf_mem_multi_page_t *pages,
834 			    uint32_t elem_size, uint32_t elem_count,
835 			    uint8_t cacheable);
836 
837 /**
838  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
839  * @size: number of bytes to increment by
840  *
841  * Return: None
842  */
843 void qdf_mem_kmalloc_inc(qdf_size_t size);
844 
845 /**
846  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
847  * @size: number of bytes to decrement by
848  *
849  * Return: None
850  */
851 void qdf_mem_kmalloc_dec(qdf_size_t size);
852 
853 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
854 /**
855  * qdf_mem_skb_inc() - increment total skb allocation size
856  * @size: size to be added
857  *
858  * Return: none
859  */
860 void qdf_mem_skb_inc(qdf_size_t size);
861 
862 /**
863  * qdf_mem_skb_dec() - decrement total skb allocation size
864  * @size: size to be decremented
865  *
866  * Return: none
867  */
868 void qdf_mem_skb_dec(qdf_size_t size);
869 
870 /**
871  * qdf_mem_skb_total_inc() - increment total skb allocation size
872  * in host driver in both debug and perf builds
873  * @size: size to be added
874  *
875  * Return: none
876  */
877 void qdf_mem_skb_total_inc(qdf_size_t size);
878 
879 /**
880  * qdf_mem_skb_total_dec() - decrement total skb allocation size
881  * in the host driver in debug and perf flavors
882  * @size: size to be decremented
883  *
884  * Return: none
885  */
886 void qdf_mem_skb_total_dec(qdf_size_t size);
887 
888 /**
889  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
890  * @size: size to be added
891  *
892  * Return: none
893  */
894 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
895 
896 /**
897  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
898  * @size: size to be decreased
899  *
900  * Return: none
901  */
902 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
903 
904 /**
905  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
906  * @size: size to be added
907  *
908  * Return: none
909  */
910 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
911 
912 /**
913  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
914  * @size: size to be decreased
915  *
916  * Return: none
917  */
918 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
919 
920 /**
921  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
922  *
923  * Return: none
924  */
925 void qdf_mem_dp_tx_skb_cnt_inc(void);
926 
927 /**
928  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
929  *
930  * Return: none
931  */
932 void qdf_mem_dp_tx_skb_cnt_dec(void);
933 
934 /**
935  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
936  *
937  * Return: none
938  */
939 void qdf_mem_dp_rx_skb_cnt_inc(void);
940 
941 /**
942  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
943  *
944  * Return: none
945  */
946 void qdf_mem_dp_rx_skb_cnt_dec(void);
947 #else
948 
949 static inline void qdf_mem_skb_inc(qdf_size_t size)
950 {
951 }
952 
953 static inline void qdf_mem_skb_dec(qdf_size_t size)
954 {
955 }
956 
957 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
958 {
959 }
960 
961 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
962 {
963 }
964 
965 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
966 {
967 }
968 
969 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
970 {
971 }
972 
973 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
974 {
975 }
976 
977 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
978 {
979 }
980 
981 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
982 {
983 }
984 
985 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
986 {
987 }
988 
989 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
990 {
991 }
992 
993 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
994 {
995 }
996 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
997 
998 /**
999  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
1000  * @num: number of required storage
1001  *
1002  * Allocate mapping table for DMA memory allocation. This is needed for
1003  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
1004  *
1005  * Return: shared memory info storage table pointer
1006  */
1007 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
1008 {
1009 	qdf_mem_info_t *mem_info_arr;
1010 
1011 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
1012 	return mem_info_arr;
1013 }
1014 
1015 #ifdef ENHANCED_OS_ABSTRACTION
1016 /**
1017  * qdf_update_mem_map_table() - Update DMA memory map info
1018  * @osdev: Parent device instance
1019  * @mem_info: Pointer to shared memory information
1020  * @dma_addr: dma address
1021  * @mem_size: memory size allocated
1022  *
1023  * Store DMA shared memory information
1024  *
1025  * Return: none
1026  */
1027 void qdf_update_mem_map_table(qdf_device_t osdev,
1028 			      qdf_mem_info_t *mem_info,
1029 			      qdf_dma_addr_t dma_addr,
1030 			      uint32_t mem_size);
1031 
1032 /**
1033  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
1034  * @osdev: Parent device instance
1035  * @dma_addr: DMA/IOVA address
1036  *
1037  * Get actual physical address from dma_addr based on SMMU enablement status.
1038  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
1039  * (IOVA) otherwise returns physical address. So get SMMU physical address
1040  * mapping from IOVA.
1041  *
1042  * Return: dmaable physical address
1043  */
1044 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1045 					  qdf_dma_addr_t dma_addr);
1046 #else
1047 static inline
1048 void qdf_update_mem_map_table(qdf_device_t osdev,
1049 			      qdf_mem_info_t *mem_info,
1050 			      qdf_dma_addr_t dma_addr,
1051 			      uint32_t mem_size)
1052 {
1053 	if (!mem_info) {
1054 		qdf_nofl_err("%s: NULL mem_info", __func__);
1055 		return;
1056 	}
1057 
1058 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
1059 }
1060 
1061 static inline
1062 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1063 					  qdf_dma_addr_t dma_addr)
1064 {
1065 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1066 }
1067 #endif
1068 
1069 /**
1070  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
1071  * @osdev: parent device instance
1072  *
1073  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
1074  */
1075 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
1076 {
1077 	return __qdf_mem_smmu_s1_enabled(osdev);
1078 }
1079 
1080 /**
1081  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
1082  * @dev: device instance
1083  * @sgt: scatter gather table pointer
1084  * @cpu_addr: HLOS virtual address
1085  * @dma_addr: dma address
1086  * @size: allocated memory size
1087  *
1088  * Return: physical address
1089  */
1090 static inline int
1091 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
1092 			qdf_dma_addr_t dma_addr, size_t size)
1093 {
1094 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
1095 }
1096 
1097 /**
1098  * qdf_mem_free_sgtable() - Free a previously allocated sg table
1099  * @sgt: the mapped sg table header
1100  *
1101  * Return: None
1102  */
1103 static inline void
1104 qdf_mem_free_sgtable(struct sg_table *sgt)
1105 {
1106 	__qdf_os_mem_free_sgtable(sgt);
1107 }
1108 
1109 /**
1110  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
1111  * @sgt: scatter gather table pointer
1112  *
1113  * Return: None
1114  */
1115 static inline void
1116 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
1117 {
1118 	__qdf_dma_get_sgtable_dma_addr(sgt);
1119 }
1120 
1121 /**
1122  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
1123  * @osdev: Parent device instance
1124  * @mem_info: Pointer to allocated memory information
1125  *
1126  * Get dma address based on SMMU enablement status. If SMMU Stage 1
1127  * translation is enabled, DMA APIs return IO virtual address otherwise
1128  * returns physical address.
1129  *
1130  * Return: dma address
1131  */
1132 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
1133 						  qdf_mem_info_t *mem_info)
1134 {
1135 	return __qdf_mem_get_dma_addr(osdev, mem_info);
1136 }
1137 
1138 /**
1139  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
1140  * @osdev: Parent device instance
1141  * @mem_info: Pointer to allocated memory information
1142  *
1143  * Based on smmu stage 1 translation enablement, return corresponding dma
1144  * address storage pointer.
1145  *
1146  * Return: dma address storage pointer
1147  */
1148 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
1149 						       qdf_mem_info_t *mem_info)
1150 {
1151 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
1152 }
1153 
1154 
1155 /**
1156  * qdf_mem_get_dma_size() - Return DMA memory size
1157  * @osdev: parent device instance
1158  * @mem_info: Pointer to allocated memory information
1159  *
1160  * Return: DMA memory size
1161  */
1162 static inline uint32_t
1163 qdf_mem_get_dma_size(qdf_device_t osdev,
1164 		       qdf_mem_info_t *mem_info)
1165 {
1166 	return __qdf_mem_get_dma_size(osdev, mem_info);
1167 }
1168 
1169 /**
1170  * qdf_mem_set_dma_size() - Set DMA memory size
1171  * @osdev: parent device instance
1172  * @mem_info: Pointer to allocated memory information
1173  * @mem_size: memory size allocated
1174  *
1175  * Return: none
1176  */
1177 static inline void
1178 qdf_mem_set_dma_size(qdf_device_t osdev,
1179 		       qdf_mem_info_t *mem_info,
1180 		       uint32_t mem_size)
1181 {
1182 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
1183 }
1184 
1185 /**
1186  * qdf_mem_get_dma_pa() - Return DMA physical address
1187  * @osdev: parent device instance
1188  * @mem_info: Pointer to allocated memory information
1189  *
1190  * Return: DMA physical address
1191  */
1192 static inline qdf_dma_addr_t
1193 qdf_mem_get_dma_pa(qdf_device_t osdev,
1194 		     qdf_mem_info_t *mem_info)
1195 {
1196 	return __qdf_mem_get_dma_pa(osdev, mem_info);
1197 }
1198 
1199 /**
1200  * qdf_mem_set_dma_pa() - Set DMA physical address
1201  * @osdev: parent device instance
1202  * @mem_info: Pointer to allocated memory information
1203  * @dma_pa: DMA phsical address
1204  *
1205  * Return: none
1206  */
1207 static inline void
1208 qdf_mem_set_dma_pa(qdf_device_t osdev,
1209 		     qdf_mem_info_t *mem_info,
1210 		     qdf_dma_addr_t dma_pa)
1211 {
1212 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
1213 }
1214 
1215 /**
1216  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1217  * @osdev: parent device instance
1218  * @size: size to be allocated
1219  *
1220  * Allocate DMA memory which will be shared with external kernel module. This
1221  * information is needed for SMMU mapping.
1222  *
1223  * Return: Pointer to allocated DMA memory on success, NULL on failure
1224  */
1225 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1226 
1227 #ifdef DP_UMAC_HW_RESET_SUPPORT
1228 /**
1229  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
1230  * @ctxt: Context to be passed to the cb
1231  * @pages: Multi page information storage
1232  * @elem_size: Each element size
1233  * @elem_count: Total number of elements in the pool.
1234  * @cacheable: Coherent memory or cacheable memory
1235  * @cb: Callback to free the elements
1236  * @elem_list: elem list for delayed free
1237  *
1238  * Return: 0 on Succscc, or Error code
1239  */
1240 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
1241 			       uint32_t elem_size, uint32_t elem_count,
1242 			       uint8_t cacheable, qdf_mem_release_cb cb,
1243 			       void *elem_list);
1244 #endif
1245 
1246 /**
1247  * qdf_mem_shared_mem_free() - Free shared memory
1248  * @osdev: parent device instance
1249  * @shared_mem: shared memory information storage
1250  *
1251  * Free DMA shared memory resource
1252  *
1253  * Return: None
1254  */
1255 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1256 					   qdf_shared_mem_t *shared_mem)
1257 {
1258 	if (!shared_mem) {
1259 		qdf_nofl_err("%s: NULL shared mem struct passed",
1260 			     __func__);
1261 		return;
1262 	}
1263 
1264 	if (shared_mem->vaddr) {
1265 		qdf_mem_free_consistent(osdev, osdev->dev,
1266 					qdf_mem_get_dma_size(osdev,
1267 						&shared_mem->mem_info),
1268 					shared_mem->vaddr,
1269 					qdf_mem_get_dma_addr(osdev,
1270 						&shared_mem->mem_info),
1271 					qdf_get_dma_mem_context(shared_mem,
1272 								memctx));
1273 	}
1274 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1275 	qdf_mem_free(shared_mem);
1276 }
1277 
1278 /**
1279  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1280  * host driver
1281  *
1282  * Return: Total DMA memory allocated
1283  */
1284 int32_t qdf_dma_mem_stats_read(void);
1285 
1286 /**
1287  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1288  * in host driver
1289  *
1290  * Return: Total heap memory allocated
1291  */
1292 int32_t qdf_heap_mem_stats_read(void);
1293 
1294 /**
1295  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1296  * host driver
1297  *
1298  * Return: Total SKB memory allocated
1299  */
1300 int32_t qdf_skb_mem_stats_read(void);
1301 
1302 /**
1303  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1304  * in the host driver tracked in both debug and perf builds
1305  *
1306  * Return: Total SKB memory allocated
1307  */
1308 int32_t qdf_skb_total_mem_stats_read(void);
1309 
1310 /**
1311  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1312  * allocated in host driver. This is the high watermark for the
1313  * total SKB allocated in the host driver
1314  *
1315  * Return: None
1316  */
1317 int32_t qdf_skb_max_mem_stats_read(void);
1318 
1319 /**
1320  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1321  * which are waiting on Tx completions
1322  *
1323  * Return: Outstanding Tx desc count
1324  */
1325 int32_t qdf_mem_tx_desc_cnt_read(void);
1326 
1327 /**
1328  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1329  * descs which are waiting on Tx completions. This is the high
1330  * watermark for the pending desc count
1331  *
1332  * Return: Max outstanding Tx desc count
1333  */
1334 int32_t qdf_mem_tx_desc_max_read(void);
1335 
1336 /**
1337  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1338  * creating the sysfs node
1339  *
1340  * Return: None
1341  */
1342 void qdf_mem_stats_init(void);
1343 
1344 /**
1345  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1346  * allocated for Tx data path
1347  *
1348  * Return: Tx SKB memory allocated
1349  */
1350 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1351 
1352 /**
1353  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1354  * allocated for Rx data path
1355  *
1356  * Return: Rx SKB memory allocated
1357  */
1358 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1359 
1360 /**
1361  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1362  * watermark for the SKB memory allocated for Tx data path
1363  *
1364  * Return: Max Tx SKB memory allocated
1365  */
1366 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1367 
1368 /**
1369  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1370  * watermark for the SKB memory allocated for Rx data path
1371  *
1372  * Return: Max Rx SKB memory allocated
1373  */
1374 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1375 
1376 /**
1377  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1378  * allocated in the Tx data path by the host driver or
1379  * buffers coming from the n/w stack
1380  *
1381  * Return: Number of DP Tx buffers allocated
1382  */
1383 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1384 
1385 /**
1386  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1387  * buffers allocated in the Tx data path
1388  *
1389  * Return: Max number of DP Tx buffers allocated
1390  */
1391 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1392 
1393 /**
1394  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1395  * allocated in the Rx data path
1396  *
1397  * Return: Number of DP Rx buffers allocated
1398  */
1399 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1400 
1401 /**
1402  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1403  * buffers allocated in the Rx data path
1404  *
1405  * Return: Max number of DP Rx buffers allocated
1406  */
1407 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1408 
1409 /**
1410  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1411  * count and the high watermark for pending tx desc count
1412  *
1413  * @pending_tx_descs: outstanding Tx desc count
1414  * @tx_descs_max: high watermark for outstanding Tx desc count
1415  *
1416  * Return: None
1417  */
1418 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1419 				int32_t tx_descs_max);
1420 
1421 /**
1422  * qdf_mem_vfree() - Free the virtual memory pointed to by ptr
1423  * @ptr: Pointer to the starting address of the memory to
1424  * be freed.
1425  *
1426  * Return: None
1427  */
1428 #define qdf_mem_vfree(ptr)   __qdf_mem_vfree(ptr)
1429 
1430 /**
1431  * qdf_mem_valloc() - Allocate virtual memory for the given
1432  * size
1433  * @size: Number of bytes of memory to be allocated
1434  *
1435  * Return: Pointer to the starting address of the allocated virtual memory
1436  */
1437 #define qdf_mem_valloc(size) __qdf_mem_valloc(size, __func__, __LINE__)
1438 
1439 #ifdef ENABLE_VALLOC_REPLACE_MALLOC
1440 /**
1441  * qdf_mem_common_alloc() - Common function to allocate memory for the
1442  * given size, allocation method decided by ENABLE_VALLOC_REPLACE_MALLOC
1443  * @size: Number of bytes of memory to be allocated
1444  *
1445  * Return: Pointer to the starting address of the allocated memory
1446  */
1447 #define qdf_mem_common_alloc(size) qdf_mem_valloc(size)
1448 
1449 /**
1450  * qdf_mem_common_free() - Common function to free the memory pointed
1451  * to by ptr, memory free method decided by ENABLE_VALLOC_REPLACE_MALLOC
1452  * @ptr: Pointer to the starting address of the memory to
1453  * be freed.
1454  *
1455  * Return: None
1456  */
1457 #define qdf_mem_common_free(ptr) qdf_mem_vfree(ptr)
1458 #else
1459 #define qdf_mem_common_alloc(size) qdf_mem_malloc(size)
1460 #define qdf_mem_common_free(ptr) qdf_mem_free(ptr)
1461 #endif
1462 
1463 /**
1464  * qdf_ioremap() - map bus memory into cpu space
1465  * @HOST_CE_ADDRESS: bus address of the memory
1466  * @HOST_CE_SIZE: memory size to map
1467  */
1468 #define qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
1469 			__qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
1470 
1471 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
1472 /*
1473  * typedef qdf_iommu_domain_t: Platform independent iommu domain
1474  * abstraction
1475  */
1476 typedef __qdf_iommu_domain_t qdf_iommu_domain_t;
1477 
1478 /**
1479  * qdf_iommu_domain_get_attr() - API to get iommu domain attributes
1480  * @domain: iommu domain
1481  * @attr: iommu attribute
1482  * @data: data pointer
1483  *
1484  * Return: 0 on success, else errno
1485  */
1486 int
1487 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
1488 			  enum qdf_iommu_attr attr, void *data);
1489 #endif
1490 #endif /* __QDF_MEMORY_H */
1491