xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * QCA driver framework (QDF) memory management APIs
23  */
24 
25 #if !defined(__QDF_MEMORY_H)
26 #define __QDF_MEMORY_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <i_qdf_mem.h>
31 #include <i_qdf_trace.h>
32 #include <qdf_atomic.h>
33 
34 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
35 
36 /**
37  * qdf_align() - align to the given size.
38  * @a: input that needs to be aligned.
39  * @align_size: boundary on which 'a' has to be alinged.
40  *
41  * Return: aligned value.
42  */
43 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
44 #define qdf_page_size __page_size
45 
46 /**
47  * struct qdf_mem_dma_page_t - Allocated dmaable page
48  * @page_v_addr_start: Page start virtual address
49  * @page_v_addr_end: Page end virtual address
50  * @page_p_addr: Page start physical address
51  */
52 struct qdf_mem_dma_page_t {
53 	char *page_v_addr_start;
54 	char *page_v_addr_end;
55 	qdf_dma_addr_t page_p_addr;
56 };
57 
58 /**
59  * struct qdf_mem_multi_page_t - multiple page allocation information storage
60  * @num_element_per_page: Number of element in single page
61  * @num_pages: Number of allocation needed pages
62  * @dma_pages: page information storage in case of coherent memory
63  * @cacheable_pages: page information storage in case of cacheable memory
64  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
65  */
66 struct qdf_mem_multi_page_t {
67 	uint16_t num_element_per_page;
68 	uint16_t num_pages;
69 	struct qdf_mem_dma_page_t *dma_pages;
70 	void **cacheable_pages;
71 	qdf_size_t page_size;
72 #ifdef DP_MEM_PRE_ALLOC
73 	uint8_t is_mem_prealloc;
74 #endif
75 };
76 
77 
78 /* Preprocessor definitions and constants */
79 
80 typedef __qdf_mempool_t qdf_mempool_t;
81 
82 /**
83  * qdf_mem_init() - Initialize QDF memory module
84  *
85  * Return: None
86  *
87  */
88 void qdf_mem_init(void);
89 
90 /**
91  * qdf_mem_exit() - Exit QDF memory module
92  *
93  * Return: None
94  *
95  */
96 void qdf_mem_exit(void);
97 
98 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
99 #define qdf_untracked_mem_malloc(size) \
100 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
101 
102 #define qdf_untracked_mem_free(ptr) \
103 	__qdf_untracked_mem_free(ptr)
104 #endif
105 
106 #define QDF_MEM_FUNC_NAME_SIZE 48
107 
108 #ifdef MEMORY_DEBUG
109 /**
110  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
111  *
112  * Return: value of mem_debug_disabled qdf module argument
113  */
114 bool qdf_mem_debug_config_get(void);
115 
116 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
117 /**
118  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
119  * @str_value: value of the module param
120  *
121  * This function will set qdf module param mem_debug_disabled
122  *
123  * Return: QDF_STATUS_SUCCESS on Success
124  */
125 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
126 #endif
127 
128 /**
129  * qdf_mem_malloc_atomic_debug() - debug version of QDF memory allocation API
130  * @size: Number of bytes of memory to allocate.
131  * @func: Function name of the call site
132  * @line: Line number of the call site
133  * @caller: Address of the caller function
134  *
135  * This function will dynamicallly allocate the specified number of bytes of
136  * memory and add it to the qdf tracking list to check for memory leaks and
137  * corruptions
138  *
139  * Return: A valid memory location on success, or NULL on failure
140  */
141 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
142 				  uint32_t line, void *caller);
143 
144 /**
145  * qdf_mem_malloc_atomic_debug_fl() - allocation QDF memory atomically
146  * @size: Number of bytes of memory to allocate.
147  *
148  * This function will dynamicallly allocate the specified number of bytes of
149  * memory.
150  *
151  * Return:
152  * Upon successful allocate, returns a non-NULL pointer to the allocated
153  * memory.  If this function is unable to allocate the amount of memory
154  * specified (for any reason) it returns NULL.
155  */
156 void *qdf_mem_malloc_atomic_debug_fl(qdf_size_t size, const char *func,
157 				     uint32_t line);
158 
159 /**
160  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
161  * @size: Number of bytes of memory to allocate.
162  * @func: Function name of the call site
163  * @line: Line number of the call site
164  * @caller: Address of the caller function
165  * @flag: GFP flag
166  *
167  * This function will dynamicallly allocate the specified number of bytes of
168  * memory and add it to the qdf tracking list to check for memory leaks and
169  * corruptions
170  *
171  * Return: A valid memory location on success, or NULL on failure
172  */
173 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
174 			   void *caller, uint32_t flag);
175 
176 #define qdf_mem_malloc(size) \
177 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
178 
179 #define qdf_mem_malloc_fl(size, func, line) \
180 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
181 
182 #define qdf_mem_malloc_atomic(size) \
183 	qdf_mem_malloc_atomic_debug(size, __func__, __LINE__, QDF_RET_IP)
184 
185 /**
186  * qdf_mem_free_debug() - debug version of qdf_mem_free
187  * @ptr: Pointer to the starting address of the memory to be freed.
188  *
189  * This function will free the memory pointed to by 'ptr'. It also checks for
190  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
191  *
192  * Return: none
193  */
194 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
195 
196 #define qdf_mem_free(ptr) \
197 	qdf_mem_free_debug(ptr, __func__, __LINE__)
198 
199 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
200 				     struct qdf_mem_multi_page_t *pages,
201 				     size_t element_size, uint32_t element_num,
202 				     qdf_dma_context_t memctxt, bool cacheable,
203 				     const char *func, uint32_t line,
204 				     void *caller);
205 
206 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
207 				  memctxt, cacheable) \
208 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
209 					element_num, memctxt, cacheable, \
210 					__func__, __LINE__, QDF_RET_IP)
211 
212 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
213 				    struct qdf_mem_multi_page_t *pages,
214 				    qdf_dma_context_t memctxt, bool cacheable,
215 				    const char *func, uint32_t line);
216 
217 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
218 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
219 				       __func__, __LINE__)
220 
221 /**
222  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
223  *
224  * Call this to ensure there are no active memory allocations being tracked
225  * against the current debug domain. For example, one should call this function
226  * immediately before a call to qdf_debug_domain_set() as a memory leak
227  * detection mechanism.
228  *
229  * e.g.
230  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
231  *
232  *	...
233  *
234  *	// memory is allocated and freed
235  *
236  *	...
237  *
238  *	// before transitioning back to inactive state,
239  *	// make sure all active memory has been freed
240  *	qdf_mem_check_for_leaks();
241  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
242  *
243  *	...
244  *
245  *	// also, before program exit, make sure init time memory is freed
246  *	qdf_mem_check_for_leaks();
247  *	exit();
248  *
249  * Return: None
250  */
251 void qdf_mem_check_for_leaks(void);
252 
253 /**
254  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
255  * @osdev: OS device handle
256  * @dev: Pointer to device handle
257  * @size: Size to be allocated
258  * @paddr: Physical address
259  * @func: Function name of the call site
260  * @line: line numbe rof the call site
261  * @caller: Address of the caller function
262  *
263  * Return: pointer of allocated memory or null if memory alloc fails
264  */
265 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
266 				     qdf_size_t size, qdf_dma_addr_t *paddr,
267 				     const char *func, uint32_t line,
268 				     void *caller);
269 
270 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
271 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
272 				       __func__, __LINE__, QDF_RET_IP)
273 
274 /**
275  * qdf_mem_free_consistent_debug() - free consistent qdf memory
276  * @osdev: OS device handle
277  * @size: Size to be allocated
278  * @vaddr: virtual address
279  * @paddr: Physical address
280  * @memctx: Pointer to DMA context
281  * @func: Function name of the call site
282  * @line: line numbe rof the call site
283  *
284  * Return: none
285  */
286 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
287 				   qdf_size_t size, void *vaddr,
288 				   qdf_dma_addr_t paddr,
289 				   qdf_dma_context_t memctx,
290 				   const char *func, uint32_t line);
291 
292 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
293 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
294 				  __func__, __LINE__)
295 #else
296 static inline bool qdf_mem_debug_config_get(void)
297 {
298 	return false;
299 }
300 
301 static inline
302 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
303 {
304 	return QDF_STATUS_SUCCESS;
305 }
306 
307 /**
308  * qdf_mem_malloc() - allocation QDF memory
309  * @size: Number of bytes of memory to allocate.
310  *
311  * This function will dynamicallly allocate the specified number of bytes of
312  * memory.
313  *
314  * Return:
315  * Upon successful allocate, returns a non-NULL pointer to the allocated
316  * memory.  If this function is unable to allocate the amount of memory
317  * specified (for any reason) it returns NULL.
318  */
319 #define qdf_mem_malloc(size) \
320 	__qdf_mem_malloc(size, __func__, __LINE__)
321 
322 #define qdf_mem_malloc_fl(size, func, line) \
323 	__qdf_mem_malloc(size, func, line)
324 
325 /**
326  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
327  * @size: Number of bytes of memory to allocate.
328  *
329  * This function will dynamicallly allocate the specified number of bytes of
330  * memory.
331  *
332  * Return:
333  * Upon successful allocate, returns a non-NULL pointer to the allocated
334  * memory.  If this function is unable to allocate the amount of memory
335  * specified (for any reason) it returns NULL.
336  */
337 #define qdf_mem_malloc_atomic(size) \
338 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
339 
340 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
341 			       const char *func,
342 			       uint32_t line);
343 
344 #define qdf_mem_free(ptr) \
345 	__qdf_mem_free(ptr)
346 
347 static inline void qdf_mem_check_for_leaks(void) { }
348 
349 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
350 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
351 
352 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
353 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
354 
355 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
356 			       struct qdf_mem_multi_page_t *pages,
357 			       size_t element_size, uint32_t element_num,
358 			       qdf_dma_context_t memctxt, bool cacheable);
359 
360 void qdf_mem_multi_pages_free(qdf_device_t osdev,
361 			      struct qdf_mem_multi_page_t *pages,
362 			      qdf_dma_context_t memctxt, bool cacheable);
363 
364 #endif /* MEMORY_DEBUG */
365 
366 /**
367  * qdf_mem_malloc_flags: Get mem allocation flags
368  *
369  * Return the flag to be use for memory allocation
370  * based on the context
371  *
372  * Returns: Based on the context, returns the GFP flag
373  * for memory alloaction
374  */
375 int qdf_mem_malloc_flags(void);
376 
377 /**
378  * qdf_prealloc_disabled_config_get() - Get the user configuration of
379  *                                      prealloc_disabled
380  *
381  * Return: value of prealloc_disabled qdf module argument
382  */
383 bool qdf_prealloc_disabled_config_get(void);
384 
385 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
386 /**
387  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
388  * @str_value: value of the module param
389  *
390  * This function will set qdf module param prealloc_disabled
391  *
392  * Return: QDF_STATUS_SUCCESS on Success
393  */
394 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
395 #endif
396 
397 /**
398  * qdf_mem_multi_pages_zero() - zero out each page memory
399  * @pages: Multi page information storage
400  * @cacheable: Coherent memory or cacheable memory
401  *
402  * This function will zero out each page memory
403  *
404  * Return: None
405  */
406 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
407 			      bool cacheable);
408 
409 /**
410  * qdf_aligned_malloc() - allocates aligned QDF memory.
411  * @size: Size to be allocated
412  * @vaddr_unaligned: Unaligned virtual address.
413  * @paddr_unaligned: Unaligned physical address.
414  * @paddr_aligned: Aligned physical address.
415  * @align: Base address alignment.
416  * @func: Function name of the call site.
417  * @line: Line number of the call site.
418  *
419  * This function will dynamically allocate the specified number of bytes of
420  * memory. Checks if the allocated base address is aligned with base_align.
421  * If not, it frees the allocated memory, adds base_align to alloc size and
422  * re-allocates the memory.
423  *
424  * Return:
425  * Upon successful allocate, returns an aligned base address of the allocated
426  * memory.  If this function is unable to allocate the amount of memory
427  * specified (for any reason) it returns NULL.
428  */
429 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
430 			   paddr_aligned, align) \
431 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
432 			   paddr_aligned, align, __func__, __LINE__)
433 
434 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
435 			    qdf_dma_addr_t *paddr_unaligned,
436 			    qdf_dma_addr_t *paddr_aligned,
437 			    uint32_t align,
438 			    const char *func, uint32_t line);
439 
440 /**
441  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
442  * @osdev: OS device handle
443  * @size: Size to be allocated
444  * @vaddr_unaligned: Unaligned virtual address.
445  * @paddr_unaligned: Unaligned physical address.
446  * @paddr_aligned: Aligned physical address.
447  * @align: Base address alignment.
448  * @func: Function name of the call site.
449  * @line: Line number of the call site.
450  *
451  * Return: pointer of allocated memory or null if memory alloc fails.
452  */
453 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
454 					 paddr_unaligned, paddr_aligned, \
455 					 align) \
456 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
457 					    paddr_unaligned, paddr_aligned, \
458 					    align, __func__, __LINE__)
459 
460 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
461 					  void **vaddr_unaligned,
462 					  qdf_dma_addr_t *paddr_unaligned,
463 					  qdf_dma_addr_t *paddr_aligned,
464 					  uint32_t align, const char *func,
465 					  uint32_t line);
466 
467 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
468 
469 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
470 
471 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
472 					   uint32_t num_bytes);
473 
474 /**
475  * qdf_mem_set() - set (fill) memory with a specified byte value.
476  * @ptr: Pointer to memory that will be set
477  * @num_bytes: Number of bytes to be set
478  * @value: Byte set in memory
479  *
480  * WARNING: parameter @num_bytes and @value are swapped comparing with
481  * standard C function "memset", please ensure correct usage of this function!
482  *
483  * Return: None
484  */
485 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
486 
487 /**
488  * qdf_mem_zero() - zero out memory
489  * @ptr: pointer to memory that will be set to zero
490  * @num_bytes: number of bytes zero
491  *
492  * This function sets the memory location to all zeros, essentially clearing
493  * the memory.
494  *
495  * Return: None
496  */
497 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
498 {
499 	qdf_mem_set(ptr, num_bytes, 0);
500 }
501 
502 /**
503  * qdf_mem_copy() - copy memory
504  * @dst_addr: Pointer to destination memory location (to copy to)
505  * @src_addr: Pointer to source memory location (to copy from)
506  * @num_bytes: Number of bytes to copy.
507  *
508  * Copy host memory from one location to another, similar to memcpy in
509  * standard C.  Note this function does not specifically handle overlapping
510  * source and destination memory locations.  Calling this function with
511  * overlapping source and destination memory locations will result in
512  * unpredictable results.  Use qdf_mem_move() if the memory locations
513  * for the source and destination are overlapping (or could be overlapping!)
514  *
515  * Return: none
516  */
517 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
518 
519 /**
520  * qdf_mem_move() - move memory
521  * @dst_addr: pointer to destination memory location (to move to)
522  * @src_addr: pointer to source memory location (to move from)
523  * @num_bytes: number of bytes to move.
524  *
525  * Move host memory from one location to another, similar to memmove in
526  * standard C.  Note this function *does* handle overlapping
527  * source and destination memory locations.
528 
529  * Return: None
530  */
531 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
532 
533 /**
534  * qdf_mem_cmp() - memory compare
535  * @left: pointer to one location in memory to compare
536  * @right: pointer to second location in memory to compare
537  * @size: the number of bytes to compare
538  *
539  * Function to compare two pieces of memory, similar to memcmp function
540  * in standard C.
541  *
542  * Return:
543  *	0 -- equal
544  *	< 0 -- *memory1 is less than *memory2
545  *	> 0 -- *memory1 is bigger than *memory2
546  */
547 int qdf_mem_cmp(const void *left, const void *right, size_t size);
548 
549 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
550 
551 /**
552  * qdf_mem_map_nbytes_single - Map memory for DMA
553  * @osdev: pomter OS device context
554  * @buf: pointer to memory to be dma mapped
555  * @dir: DMA map direction
556  * @nbytes: number of bytes to be mapped.
557  * @phy_addr: ponter to recive physical address.
558  *
559  * Return: success/failure
560  */
561 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
562 						 qdf_dma_dir_t dir, int nbytes,
563 						 qdf_dma_addr_t *phy_addr)
564 {
565 #if defined(HIF_PCI) || defined(HIF_IPCI)
566 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
567 #else
568 	return 0;
569 #endif
570 }
571 
572 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
573 					  qdf_dma_addr_t buf,
574 					  qdf_dma_dir_t dir,
575 					  int nbytes)
576 {
577 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
578 }
579 
580 /**
581  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
582  * @osdev: pomter OS device context
583  * @phy_addr: physical address of memory to be dma unmapped
584  * @dir: DMA unmap direction
585  * @nbytes: number of bytes to be unmapped.
586  *
587  * Return: none
588  */
589 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
590 					       qdf_dma_addr_t phy_addr,
591 					       qdf_dma_dir_t dir,
592 					       int nbytes)
593 {
594 #if defined(HIF_PCI) || defined(HIF_IPCI)
595 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
596 #endif
597 }
598 
599 /**
600  * qdf_mempool_init - Create and initialize memory pool
601  * @osdev: platform device object
602  * @pool_addr: address of the pool created
603  * @elem_cnt: no. of elements in pool
604  * @elem_size: size of each pool element in bytes
605  * @flags: flags
606  * Return: Handle to memory pool or NULL if allocation failed
607  */
608 static inline int qdf_mempool_init(qdf_device_t osdev,
609 				   qdf_mempool_t *pool_addr, int elem_cnt,
610 				   size_t elem_size, uint32_t flags)
611 {
612 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
613 				  flags);
614 }
615 
616 /**
617  * qdf_mempool_destroy - Destroy memory pool
618  * @osdev: platform device object
619  * @Handle: to memory pool
620  * Return: none
621  */
622 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
623 {
624 	__qdf_mempool_destroy(osdev, pool);
625 }
626 
627 /**
628  * qdf_mempool_alloc - Allocate an element memory pool
629  * @osdev: platform device object
630  * @Handle: to memory pool
631  * Return: Pointer to the allocated element or NULL if the pool is empty
632  */
633 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
634 {
635 	return (void *)__qdf_mempool_alloc(osdev, pool);
636 }
637 
638 /**
639  * qdf_mempool_free - Free a memory pool element
640  * @osdev: Platform device object
641  * @pool: Handle to memory pool
642  * @buf: Element to be freed
643  * Return: none
644  */
645 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
646 				    void *buf)
647 {
648 	__qdf_mempool_free(osdev, pool, buf);
649 }
650 
651 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
652 					qdf_dma_addr_t bus_addr,
653 					qdf_size_t size,
654 					__dma_data_direction direction);
655 
656 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
657 					qdf_dma_addr_t bus_addr,
658 					qdf_size_t size,
659 					__dma_data_direction direction);
660 
661 int qdf_mem_multi_page_link(qdf_device_t osdev,
662 		struct qdf_mem_multi_page_t *pages,
663 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
664 
665 /**
666  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
667  * @size: number of bytes to increment by
668  *
669  * Return: None
670  */
671 void qdf_mem_kmalloc_inc(qdf_size_t size);
672 
673 /**
674  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
675  * @size: number of bytes to decrement by
676  *
677  * Return: None
678  */
679 void qdf_mem_kmalloc_dec(qdf_size_t size);
680 
681 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
682 /**
683  * qdf_mem_skb_inc() - increment total skb allocation size
684  * @size: size to be added
685  *
686  * Return: none
687  */
688 void qdf_mem_skb_inc(qdf_size_t size);
689 
690 /**
691  * qdf_mem_skb_dec() - decrement total skb allocation size
692  * @size: size to be decremented
693  *
694  * Return: none
695  */
696 void qdf_mem_skb_dec(qdf_size_t size);
697 
698 /**
699  * qdf_mem_skb_total_inc() - increment total skb allocation size
700  * in host driver in both debug and perf builds
701  * @size: size to be added
702  *
703  * Return: none
704  */
705 void qdf_mem_skb_total_inc(qdf_size_t size);
706 
707 /**
708  * qdf_mem_skb_total_dec() - decrement total skb allocation size
709  * in the host driver in debug and perf flavors
710  * @size: size to be decremented
711  *
712  * Return: none
713  */
714 void qdf_mem_skb_total_dec(qdf_size_t size);
715 
716 /**
717  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
718  * @size: size to be added
719  *
720  * Return: none
721  */
722 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
723 
724 /**
725  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
726  * @size: size to be decreased
727  *
728  * Return: none
729  */
730 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
731 
732 /**
733  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
734  * @size: size to be added
735  *
736  * Return: none
737  */
738 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
739 
740 /**
741  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
742  * @size: size to be decreased
743  *
744  * Return: none
745  */
746 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
747 
748 /**
749  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
750  *
751  * Return: none
752  */
753 void qdf_mem_dp_tx_skb_cnt_inc(void);
754 
755 /**
756  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
757  *
758  * Return: none
759  */
760 void qdf_mem_dp_tx_skb_cnt_dec(void);
761 
762 /**
763  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
764  *
765  * Return: none
766  */
767 void qdf_mem_dp_rx_skb_cnt_inc(void);
768 
769 /**
770  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
771  *
772  * Return: none
773  */
774 void qdf_mem_dp_rx_skb_cnt_dec(void);
775 #else
776 
777 static inline void qdf_mem_skb_inc(qdf_size_t size)
778 {
779 }
780 
781 static inline void qdf_mem_skb_dec(qdf_size_t size)
782 {
783 }
784 
785 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
786 {
787 }
788 
789 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
790 {
791 }
792 
793 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
794 {
795 }
796 
797 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
798 {
799 }
800 
801 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
802 {
803 }
804 
805 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
806 {
807 }
808 
809 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
810 {
811 }
812 
813 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
814 {
815 }
816 
817 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
818 {
819 }
820 
821 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
822 {
823 }
824 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
825 
826 /**
827  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
828  * @num: number of required storage
829  *
830  * Allocate mapping table for DMA memory allocation. This is needed for
831  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
832  *
833  * Return: shared memory info storage table pointer
834  */
835 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
836 {
837 	qdf_mem_info_t *mem_info_arr;
838 
839 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
840 	return mem_info_arr;
841 }
842 
843 #ifdef ENHANCED_OS_ABSTRACTION
844 /**
845  * qdf_update_mem_map_table() - Update DMA memory map info
846  * @osdev: Parent device instance
847  * @mem_info: Pointer to shared memory information
848  * @dma_addr: dma address
849  * @mem_size: memory size allocated
850  *
851  * Store DMA shared memory information
852  *
853  * Return: none
854  */
855 void qdf_update_mem_map_table(qdf_device_t osdev,
856 			      qdf_mem_info_t *mem_info,
857 			      qdf_dma_addr_t dma_addr,
858 			      uint32_t mem_size);
859 
860 /**
861  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
862  * @osdev: Parent device instance
863  * @dma_addr: DMA/IOVA address
864  *
865  * Get actual physical address from dma_addr based on SMMU enablement status.
866  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
867  * (IOVA) otherwise returns physical address. So get SMMU physical address
868  * mapping from IOVA.
869  *
870  * Return: dmaable physical address
871  */
872 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
873 					  qdf_dma_addr_t dma_addr);
874 #else
875 static inline
876 void qdf_update_mem_map_table(qdf_device_t osdev,
877 			      qdf_mem_info_t *mem_info,
878 			      qdf_dma_addr_t dma_addr,
879 			      uint32_t mem_size)
880 {
881 	if (!mem_info) {
882 		qdf_nofl_err("%s: NULL mem_info", __func__);
883 		return;
884 	}
885 
886 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
887 }
888 
889 static inline
890 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
891 					  qdf_dma_addr_t dma_addr)
892 {
893 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
894 }
895 #endif
896 
897 /**
898  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
899  * @osdev parent device instance
900  *
901  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
902  */
903 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
904 {
905 	return __qdf_mem_smmu_s1_enabled(osdev);
906 }
907 
908 /**
909  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
910  * @dev: device instace
911  * @sgt: scatter gather table pointer
912  * @cpu_addr: HLOS virtual address
913  * @dma_addr: dma address
914  * @size: allocated memory size
915  *
916  * Return: physical address
917  */
918 static inline int
919 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
920 			qdf_dma_addr_t dma_addr, size_t size)
921 {
922 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
923 }
924 
925 /**
926  * qdf_mem_free_sgtable() - Free a previously allocated sg table
927  * @sgt: the mapped sg table header
928  *
929  * Return: None
930  */
931 static inline void
932 qdf_mem_free_sgtable(struct sg_table *sgt)
933 {
934 	__qdf_os_mem_free_sgtable(sgt);
935 }
936 
937 /**
938  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
939  * @sgt: scatter gather table pointer
940  *
941  * Return: None
942  */
943 static inline void
944 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
945 {
946 	__qdf_dma_get_sgtable_dma_addr(sgt);
947 }
948 
949 /**
950  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
951  * @osdev: Parent device instance
952  * @mem_info: Pointer to allocated memory information
953  *
954  * Get dma address based on SMMU enablement status. If SMMU Stage 1
955  * tranlation is enabled, DMA APIs return IO virtual address otherwise
956  * returns physical address.
957  *
958  * Return: dma address
959  */
960 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
961 						  qdf_mem_info_t *mem_info)
962 {
963 	return __qdf_mem_get_dma_addr(osdev, mem_info);
964 }
965 
966 /**
967  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
968  * @osdev: Parent device instance
969  * @mem_info: Pointer to allocated memory information
970  *
971  * Based on smmu stage 1 translation enablement, return corresponding dma
972  * address storage pointer.
973  *
974  * Return: dma address storage pointer
975  */
976 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
977 						       qdf_mem_info_t *mem_info)
978 {
979 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
980 }
981 
982 
983 /**
984  * qdf_mem_get_dma_size() - Return DMA memory size
985  * @osdev: parent device instance
986  * @mem_info: Pointer to allocated memory information
987  *
988  * Return: DMA memory size
989  */
990 static inline uint32_t
991 qdf_mem_get_dma_size(qdf_device_t osdev,
992 		       qdf_mem_info_t *mem_info)
993 {
994 	return __qdf_mem_get_dma_size(osdev, mem_info);
995 }
996 
997 /**
998  * qdf_mem_set_dma_size() - Set DMA memory size
999  * @osdev: parent device instance
1000  * @mem_info: Pointer to allocated memory information
1001  * @mem_size: memory size allocated
1002  *
1003  * Return: none
1004  */
1005 static inline void
1006 qdf_mem_set_dma_size(qdf_device_t osdev,
1007 		       qdf_mem_info_t *mem_info,
1008 		       uint32_t mem_size)
1009 {
1010 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
1011 }
1012 
1013 /**
1014  * qdf_mem_get_dma_size() - Return DMA physical address
1015  * @osdev: parent device instance
1016  * @mem_info: Pointer to allocated memory information
1017  *
1018  * Return: DMA physical address
1019  */
1020 static inline qdf_dma_addr_t
1021 qdf_mem_get_dma_pa(qdf_device_t osdev,
1022 		     qdf_mem_info_t *mem_info)
1023 {
1024 	return __qdf_mem_get_dma_pa(osdev, mem_info);
1025 }
1026 
1027 /**
1028  * qdf_mem_set_dma_size() - Set DMA physical address
1029  * @osdev: parent device instance
1030  * @mem_info: Pointer to allocated memory information
1031  * @dma_pa: DMA phsical address
1032  *
1033  * Return: none
1034  */
1035 static inline void
1036 qdf_mem_set_dma_pa(qdf_device_t osdev,
1037 		     qdf_mem_info_t *mem_info,
1038 		     qdf_dma_addr_t dma_pa)
1039 {
1040 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
1041 }
1042 
1043 /**
1044  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1045  * @osdev: parent device instance
1046  * @mem_info: Pointer to allocated memory information
1047  * @size: size to be allocated
1048  *
1049  * Allocate DMA memory which will be shared with external kernel module. This
1050  * information is needed for SMMU mapping.
1051  *
1052  * Return: 0 success
1053  */
1054 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1055 
1056 /**
1057  * qdf_mem_shared_mem_free() - Free shared memory
1058  * @osdev: parent device instance
1059  * @shared_mem: shared memory information storage
1060  *
1061  * Free DMA shared memory resource
1062  *
1063  * Return: None
1064  */
1065 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1066 					   qdf_shared_mem_t *shared_mem)
1067 {
1068 	if (!shared_mem) {
1069 		qdf_nofl_err("%s: NULL shared mem struct passed",
1070 			     __func__);
1071 		return;
1072 	}
1073 
1074 	if (shared_mem->vaddr) {
1075 		qdf_mem_free_consistent(osdev, osdev->dev,
1076 					qdf_mem_get_dma_size(osdev,
1077 						&shared_mem->mem_info),
1078 					shared_mem->vaddr,
1079 					qdf_mem_get_dma_addr(osdev,
1080 						&shared_mem->mem_info),
1081 					qdf_get_dma_mem_context(shared_mem,
1082 								memctx));
1083 	}
1084 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1085 	qdf_mem_free(shared_mem);
1086 }
1087 
1088 /**
1089  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1090  * host driver
1091  *
1092  * Return: Total DMA memory allocated
1093  */
1094 int32_t qdf_dma_mem_stats_read(void);
1095 
1096 /**
1097  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1098  * in host driver
1099  *
1100  * Return: Total heap memory allocated
1101  */
1102 int32_t qdf_heap_mem_stats_read(void);
1103 
1104 /**
1105  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1106  * host driver
1107  *
1108  * Return: Total SKB memory allocated
1109  */
1110 int32_t qdf_skb_mem_stats_read(void);
1111 
1112 /**
1113  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1114  * in the host driver tracked in both debug and perf builds
1115  *
1116  * Return: Total SKB memory allocated
1117  */
1118 int32_t qdf_skb_total_mem_stats_read(void);
1119 
1120 /**
1121  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1122  * allocated in host driver. This is the high watermark for the
1123  * total SKB allocated in the host driver
1124  *
1125  * Return: None
1126  */
1127 int32_t qdf_skb_max_mem_stats_read(void);
1128 
1129 /**
1130  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1131  * which are waiting on Tx completions
1132  *
1133  * Return: Outstanding Tx desc count
1134  */
1135 int32_t qdf_mem_tx_desc_cnt_read(void);
1136 
1137 /**
1138  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1139  * descs which are waiting on Tx completions. This is the high
1140  * watermark for the pending desc count
1141  *
1142  * Return: Max outstanding Tx desc count
1143  */
1144 int32_t qdf_mem_tx_desc_max_read(void);
1145 
1146 /**
1147  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1148  * creating the sysfs node
1149  *
1150  * Return: None
1151  */
1152 void qdf_mem_stats_init(void);
1153 
1154 /**
1155  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1156  * allocated for Tx data path
1157  *
1158  * Return: Tx SKB memory allocated
1159  */
1160 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1161 
1162 /**
1163  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1164  * allocated for Rx data path
1165  *
1166  * Return: Rx SKB memory allocated
1167  */
1168 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1169 
1170 /**
1171  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1172  * watermark for the SKB memory allocated for Tx data path
1173  *
1174  * Return: Max Tx SKB memory allocated
1175  */
1176 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1177 
1178 /**
1179  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1180  * watermark for the SKB memory allocated for Rx data path
1181  *
1182  * Return: Max Rx SKB memory allocated
1183  */
1184 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1185 
1186 /**
1187  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1188  * allocated in the Tx data path by the host driver or
1189  * buffers coming from the n/w stack
1190  *
1191  * Return: Number of DP Tx buffers allocated
1192  */
1193 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1194 
1195 /**
1196  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1197  * buffers allocated in the Tx data path
1198  *
1199  * Return: Max number of DP Tx buffers allocated
1200  */
1201 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1202 
1203 /**
1204  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1205  * allocated in the Rx data path
1206  *
1207  * Return: Number of DP Rx buffers allocated
1208  */
1209 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1210 
1211 /**
1212  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1213  * buffers allocated in the Rx data path
1214  *
1215  * Return: Max number of DP Rx buffers allocated
1216  */
1217 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1218 
1219 /**
1220  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1221  * count and the high watermark for pending tx desc count
1222  *
1223  * @pending_tx_descs: outstanding Tx desc count
1224  * @tx_descs_max: high watermark for outstanding Tx desc count
1225  *
1226  * Return: None
1227  */
1228 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1229 				int32_t tx_descs_max);
1230 
1231 /**
1232  * qdf_mem_vfree() - Free the virtual memory pointed to by ptr
1233  * @ptr: Pointer to the starting address of the memory to
1234  * be freed.
1235  *
1236  * Return: None
1237  */
1238 #define qdf_mem_vfree(ptr)   __qdf_mem_vfree(ptr)
1239 
1240 /**
1241  * qdf_mem_valloc() - Allocate virtual memory for the given
1242  * size
1243  * @size: Number of bytes of memory to be allocated
1244  *
1245  * Return: Pointer to the starting address of the allocated virtual memory
1246  */
1247 #define qdf_mem_valloc(size) __qdf_mem_valloc(size, __func__, __LINE__)
1248 
1249 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
1250 /*
1251  * typedef qdf_iommu_domain_t: Platform indepedent iommu domain
1252  * abstraction
1253  */
1254 typedef __qdf_iommu_domain_t qdf_iommu_domain_t;
1255 
1256 /**
1257  * qdf_iommu_domain_get_attr() - API to get iommu domain attributes
1258  * @domain: iommu domain
1259  * @attr: iommu attribute
1260  * @data: data pointer
1261  *
1262  * Return: 0 on success, else errno
1263  */
1264 int
1265 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
1266 			  enum qdf_iommu_attr attr, void *data);
1267 #endif
1268 #endif /* __QDF_MEMORY_H */
1269