xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * QCA driver framework (QDF) memory management APIs
22  */
23 
24 #if !defined(__QDF_MEMORY_H)
25 #define __QDF_MEMORY_H
26 
27 /* Include Files */
28 #include <qdf_types.h>
29 #include <i_qdf_mem.h>
30 #include <i_qdf_trace.h>
31 #include <qdf_atomic.h>
32 
33 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
34 
35 /**
36  * qdf_align() - align to the given size.
37  * @a: input that needs to be aligned.
38  * @align_size: boundary on which 'a' has to be alinged.
39  *
40  * Return: aligned value.
41  */
42 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
43 #define qdf_page_size __page_size
44 
45 /**
46  * struct qdf_mem_dma_page_t - Allocated dmaable page
47  * @page_v_addr_start: Page start virtual address
48  * @page_v_addr_end: Page end virtual address
49  * @page_p_addr: Page start physical address
50  */
51 struct qdf_mem_dma_page_t {
52 	char *page_v_addr_start;
53 	char *page_v_addr_end;
54 	qdf_dma_addr_t page_p_addr;
55 };
56 
57 /**
58  * struct qdf_mem_multi_page_t - multiple page allocation information storage
59  * @num_element_per_page: Number of element in single page
60  * @num_pages: Number of allocation needed pages
61  * @dma_pages: page information storage in case of coherent memory
62  * @cacheable_pages: page information storage in case of cacheable memory
63  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
64  */
65 struct qdf_mem_multi_page_t {
66 	uint16_t num_element_per_page;
67 	uint16_t num_pages;
68 	struct qdf_mem_dma_page_t *dma_pages;
69 	void **cacheable_pages;
70 	qdf_size_t page_size;
71 #ifdef DP_MEM_PRE_ALLOC
72 	uint8_t is_mem_prealloc;
73 #endif
74 };
75 
76 
77 /* Preprocessor definitions and constants */
78 
79 typedef __qdf_mempool_t qdf_mempool_t;
80 
81 /**
82  * qdf_mem_init() - Initialize QDF memory module
83  *
84  * Return: None
85  *
86  */
87 void qdf_mem_init(void);
88 
89 /**
90  * qdf_mem_exit() - Exit QDF memory module
91  *
92  * Return: None
93  *
94  */
95 void qdf_mem_exit(void);
96 
97 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
98 #define qdf_untracked_mem_malloc(size) \
99 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
100 
101 #define qdf_untracked_mem_free(ptr) \
102 	__qdf_untracked_mem_free(ptr)
103 #endif
104 
105 #define QDF_MEM_FUNC_NAME_SIZE 48
106 
107 #ifdef MEMORY_DEBUG
108 /**
109  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
110  *
111  * Return: value of mem_debug_disabled qdf module argument
112  */
113 bool qdf_mem_debug_config_get(void);
114 
115 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
116 /**
117  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
118  * @str_value: value of the module param
119  *
120  * This function will set qdf module param mem_debug_disabled
121  *
122  * Return: QDF_STATUS_SUCCESS on Success
123  */
124 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
125 #endif
126 
127 /**
128  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
129  * @size: Number of bytes of memory to allocate.
130  * @func: Function name of the call site
131  * @line: Line number of the call site
132  * @caller: Address of the caller function
133  * @flag: GFP flag
134  *
135  * This function will dynamicallly allocate the specified number of bytes of
136  * memory and add it to the qdf tracking list to check for memory leaks and
137  * corruptions
138  *
139  * Return: A valid memory location on success, or NULL on failure
140  */
141 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
142 			   void *caller, uint32_t flag);
143 
144 #define qdf_mem_malloc(size) \
145 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
146 
147 #define qdf_mem_malloc_fl(size, func, line) \
148 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
149 
150 #define qdf_mem_malloc_atomic(size) \
151 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC)
152 /**
153  * qdf_mem_free_debug() - debug version of qdf_mem_free
154  * @ptr: Pointer to the starting address of the memory to be freed.
155  *
156  * This function will free the memory pointed to by 'ptr'. It also checks for
157  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
158  *
159  * Return: none
160  */
161 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
162 
163 #define qdf_mem_free(ptr) \
164 	qdf_mem_free_debug(ptr, __func__, __LINE__)
165 
166 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
167 				     struct qdf_mem_multi_page_t *pages,
168 				     size_t element_size, uint16_t element_num,
169 				     qdf_dma_context_t memctxt, bool cacheable,
170 				     const char *func, uint32_t line,
171 				     void *caller);
172 
173 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
174 				  memctxt, cacheable) \
175 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
176 					element_num, memctxt, cacheable, \
177 					__func__, __LINE__, QDF_RET_IP)
178 
179 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
180 				    struct qdf_mem_multi_page_t *pages,
181 				    qdf_dma_context_t memctxt, bool cacheable,
182 				    const char *func, uint32_t line);
183 
184 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
185 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
186 				       __func__, __LINE__)
187 
188 /**
189  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
190  *
191  * Call this to ensure there are no active memory allocations being tracked
192  * against the current debug domain. For example, one should call this function
193  * immediately before a call to qdf_debug_domain_set() as a memory leak
194  * detection mechanism.
195  *
196  * e.g.
197  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
198  *
199  *	...
200  *
201  *	// memory is allocated and freed
202  *
203  *	...
204  *
205  *	// before transitioning back to inactive state,
206  *	// make sure all active memory has been freed
207  *	qdf_mem_check_for_leaks();
208  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
209  *
210  *	...
211  *
212  *	// also, before program exit, make sure init time memory is freed
213  *	qdf_mem_check_for_leaks();
214  *	exit();
215  *
216  * Return: None
217  */
218 void qdf_mem_check_for_leaks(void);
219 
220 /**
221  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
222  * @osdev: OS device handle
223  * @dev: Pointer to device handle
224  * @size: Size to be allocated
225  * @paddr: Physical address
226  * @func: Function name of the call site
227  * @line: line numbe rof the call site
228  * @caller: Address of the caller function
229  *
230  * Return: pointer of allocated memory or null if memory alloc fails
231  */
232 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
233 				     qdf_size_t size, qdf_dma_addr_t *paddr,
234 				     const char *func, uint32_t line,
235 				     void *caller);
236 
237 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
238 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
239 				       __func__, __LINE__, QDF_RET_IP)
240 
241 /**
242  * qdf_mem_free_consistent_debug() - free consistent qdf memory
243  * @osdev: OS device handle
244  * @size: Size to be allocated
245  * @vaddr: virtual address
246  * @paddr: Physical address
247  * @memctx: Pointer to DMA context
248  * @func: Function name of the call site
249  * @line: line numbe rof the call site
250  *
251  * Return: none
252  */
253 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
254 				   qdf_size_t size, void *vaddr,
255 				   qdf_dma_addr_t paddr,
256 				   qdf_dma_context_t memctx,
257 				   const char *func, uint32_t line);
258 
259 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
260 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
261 				  __func__, __LINE__)
262 #else
263 static inline bool qdf_mem_debug_config_get(void)
264 {
265 	return false;
266 }
267 
268 static inline
269 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
270 {
271 	return QDF_STATUS_SUCCESS;
272 }
273 
274 /**
275  * qdf_mem_malloc() - allocation QDF memory
276  * @size: Number of bytes of memory to allocate.
277  *
278  * This function will dynamicallly allocate the specified number of bytes of
279  * memory.
280  *
281  * Return:
282  * Upon successful allocate, returns a non-NULL pointer to the allocated
283  * memory.  If this function is unable to allocate the amount of memory
284  * specified (for any reason) it returns NULL.
285  */
286 #define qdf_mem_malloc(size) \
287 	__qdf_mem_malloc(size, __func__, __LINE__)
288 
289 #define qdf_mem_malloc_fl(size, func, line) \
290 	__qdf_mem_malloc(size, func, line)
291 
292 /**
293  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
294  * @size: Number of bytes of memory to allocate.
295  *
296  * This function will dynamicallly allocate the specified number of bytes of
297  * memory.
298  *
299  * Return:
300  * Upon successful allocate, returns a non-NULL pointer to the allocated
301  * memory.  If this function is unable to allocate the amount of memory
302  * specified (for any reason) it returns NULL.
303  */
304 #define qdf_mem_malloc_atomic(size) \
305 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
306 
307 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
308 			       const char *func,
309 			       uint32_t line);
310 
311 #define qdf_mem_free(ptr) \
312 	__qdf_mem_free(ptr)
313 
314 static inline void qdf_mem_check_for_leaks(void) { }
315 
316 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
317 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
318 
319 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
320 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
321 
322 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
323 			       struct qdf_mem_multi_page_t *pages,
324 			       size_t element_size, uint16_t element_num,
325 			       qdf_dma_context_t memctxt, bool cacheable);
326 
327 void qdf_mem_multi_pages_free(qdf_device_t osdev,
328 			      struct qdf_mem_multi_page_t *pages,
329 			      qdf_dma_context_t memctxt, bool cacheable);
330 
331 #endif /* MEMORY_DEBUG */
332 
333 /**
334  * qdf_mem_malloc_flags: Get mem allocation flags
335  *
336  * Return the flag to be use for memory allocation
337  * based on the context
338  *
339  * Returns: Based on the context, returns the GFP flag
340  * for memory alloaction
341  */
342 int qdf_mem_malloc_flags(void);
343 
344 /**
345  * qdf_prealloc_disabled_config_get() - Get the user configuration of
346  *                                      prealloc_disabled
347  *
348  * Return: value of prealloc_disabled qdf module argument
349  */
350 bool qdf_prealloc_disabled_config_get(void);
351 
352 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
353 /**
354  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
355  * @str_value: value of the module param
356  *
357  * This function will set qdf module param prealloc_disabled
358  *
359  * Return: QDF_STATUS_SUCCESS on Success
360  */
361 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
362 #endif
363 
364 /**
365  * qdf_mem_multi_pages_zero() - zero out each page memory
366  * @pages: Multi page information storage
367  * @cacheable: Coherent memory or cacheable memory
368  *
369  * This function will zero out each page memory
370  *
371  * Return: None
372  */
373 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
374 			      bool cacheable);
375 
376 /**
377  * qdf_aligned_malloc() - allocates aligned QDF memory.
378  * @size: Size to be allocated
379  * @vaddr_unaligned: Unaligned virtual address.
380  * @paddr_unaligned: Unaligned physical address.
381  * @paddr_aligned: Aligned physical address.
382  * @align: Base address alignment.
383  * @func: Function name of the call site.
384  * @line: Line number of the call site.
385  *
386  * This function will dynamically allocate the specified number of bytes of
387  * memory. Checks if the allocated base address is aligned with base_align.
388  * If not, it frees the allocated memory, adds base_align to alloc size and
389  * re-allocates the memory.
390  *
391  * Return:
392  * Upon successful allocate, returns an aligned base address of the allocated
393  * memory.  If this function is unable to allocate the amount of memory
394  * specified (for any reason) it returns NULL.
395  */
396 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
397 			   paddr_aligned, align) \
398 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
399 			   paddr_aligned, align, __func__, __LINE__)
400 
401 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
402 			    qdf_dma_addr_t *paddr_unaligned,
403 			    qdf_dma_addr_t *paddr_aligned,
404 			    uint32_t align,
405 			    const char *func, uint32_t line);
406 
407 /**
408  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
409  * @osdev: OS device handle
410  * @size: Size to be allocated
411  * @vaddr_unaligned: Unaligned virtual address.
412  * @paddr_unaligned: Unaligned physical address.
413  * @paddr_aligned: Aligned physical address.
414  * @align: Base address alignment.
415  * @func: Function name of the call site.
416  * @line: Line number of the call site.
417  *
418  * Return: pointer of allocated memory or null if memory alloc fails.
419  */
420 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
421 					 paddr_unaligned, paddr_aligned, \
422 					 align) \
423 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
424 					    paddr_unaligned, paddr_aligned, \
425 					    align, __func__, __LINE__)
426 
427 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
428 					  void **vaddr_unaligned,
429 					  qdf_dma_addr_t *paddr_unaligned,
430 					  qdf_dma_addr_t *paddr_aligned,
431 					  uint32_t align, const char *func,
432 					  uint32_t line);
433 
434 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
435 
436 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
437 
438 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
439 					   uint32_t num_bytes);
440 
441 /**
442  * qdf_mem_set() - set (fill) memory with a specified byte value.
443  * @ptr: Pointer to memory that will be set
444  * @num_bytes: Number of bytes to be set
445  * @value: Byte set in memory
446  *
447  * WARNING: parameter @num_bytes and @value are swapped comparing with
448  * standard C function "memset", please ensure correct usage of this function!
449  *
450  * Return: None
451  */
452 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
453 
454 /**
455  * qdf_mem_zero() - zero out memory
456  * @ptr: pointer to memory that will be set to zero
457  * @num_bytes: number of bytes zero
458  *
459  * This function sets the memory location to all zeros, essentially clearing
460  * the memory.
461  *
462  * Return: None
463  */
464 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
465 {
466 	qdf_mem_set(ptr, num_bytes, 0);
467 }
468 
469 /**
470  * qdf_mem_copy() - copy memory
471  * @dst_addr: Pointer to destination memory location (to copy to)
472  * @src_addr: Pointer to source memory location (to copy from)
473  * @num_bytes: Number of bytes to copy.
474  *
475  * Copy host memory from one location to another, similar to memcpy in
476  * standard C.  Note this function does not specifically handle overlapping
477  * source and destination memory locations.  Calling this function with
478  * overlapping source and destination memory locations will result in
479  * unpredictable results.  Use qdf_mem_move() if the memory locations
480  * for the source and destination are overlapping (or could be overlapping!)
481  *
482  * Return: none
483  */
484 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
485 
486 /**
487  * qdf_mem_move() - move memory
488  * @dst_addr: pointer to destination memory location (to move to)
489  * @src_addr: pointer to source memory location (to move from)
490  * @num_bytes: number of bytes to move.
491  *
492  * Move host memory from one location to another, similar to memmove in
493  * standard C.  Note this function *does* handle overlapping
494  * source and destination memory locations.
495 
496  * Return: None
497  */
498 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
499 
500 /**
501  * qdf_mem_cmp() - memory compare
502  * @left: pointer to one location in memory to compare
503  * @right: pointer to second location in memory to compare
504  * @size: the number of bytes to compare
505  *
506  * Function to compare two pieces of memory, similar to memcmp function
507  * in standard C.
508  *
509  * Return:
510  *	0 -- equal
511  *	< 0 -- *memory1 is less than *memory2
512  *	> 0 -- *memory1 is bigger than *memory2
513  */
514 int qdf_mem_cmp(const void *left, const void *right, size_t size);
515 
516 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
517 
518 /**
519  * qdf_mem_map_nbytes_single - Map memory for DMA
520  * @osdev: pomter OS device context
521  * @buf: pointer to memory to be dma mapped
522  * @dir: DMA map direction
523  * @nbytes: number of bytes to be mapped.
524  * @phy_addr: ponter to recive physical address.
525  *
526  * Return: success/failure
527  */
528 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
529 						 qdf_dma_dir_t dir, int nbytes,
530 						 qdf_dma_addr_t *phy_addr)
531 {
532 #if defined(HIF_PCI) || defined(HIF_IPCI)
533 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
534 #else
535 	return 0;
536 #endif
537 }
538 
539 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
540 					  qdf_dma_addr_t buf,
541 					  qdf_dma_dir_t dir,
542 					  int nbytes)
543 {
544 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
545 }
546 
547 /**
548  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
549  * @osdev: pomter OS device context
550  * @phy_addr: physical address of memory to be dma unmapped
551  * @dir: DMA unmap direction
552  * @nbytes: number of bytes to be unmapped.
553  *
554  * Return: none
555  */
556 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
557 					       qdf_dma_addr_t phy_addr,
558 					       qdf_dma_dir_t dir,
559 					       int nbytes)
560 {
561 #if defined(HIF_PCI) || defined(HIF_IPCI)
562 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
563 #endif
564 }
565 
566 /**
567  * qdf_mempool_init - Create and initialize memory pool
568  * @osdev: platform device object
569  * @pool_addr: address of the pool created
570  * @elem_cnt: no. of elements in pool
571  * @elem_size: size of each pool element in bytes
572  * @flags: flags
573  * Return: Handle to memory pool or NULL if allocation failed
574  */
575 static inline int qdf_mempool_init(qdf_device_t osdev,
576 				   qdf_mempool_t *pool_addr, int elem_cnt,
577 				   size_t elem_size, uint32_t flags)
578 {
579 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
580 				  flags);
581 }
582 
583 /**
584  * qdf_mempool_destroy - Destroy memory pool
585  * @osdev: platform device object
586  * @Handle: to memory pool
587  * Return: none
588  */
589 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
590 {
591 	__qdf_mempool_destroy(osdev, pool);
592 }
593 
594 /**
595  * qdf_mempool_alloc - Allocate an element memory pool
596  * @osdev: platform device object
597  * @Handle: to memory pool
598  * Return: Pointer to the allocated element or NULL if the pool is empty
599  */
600 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
601 {
602 	return (void *)__qdf_mempool_alloc(osdev, pool);
603 }
604 
605 /**
606  * qdf_mempool_free - Free a memory pool element
607  * @osdev: Platform device object
608  * @pool: Handle to memory pool
609  * @buf: Element to be freed
610  * Return: none
611  */
612 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
613 				    void *buf)
614 {
615 	__qdf_mempool_free(osdev, pool, buf);
616 }
617 
618 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
619 					qdf_dma_addr_t bus_addr,
620 					qdf_size_t size,
621 					__dma_data_direction direction);
622 
623 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
624 					qdf_dma_addr_t bus_addr,
625 					qdf_size_t size,
626 					__dma_data_direction direction);
627 
628 int qdf_mem_multi_page_link(qdf_device_t osdev,
629 		struct qdf_mem_multi_page_t *pages,
630 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
631 
632 /**
633  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
634  * @size: number of bytes to increment by
635  *
636  * Return: None
637  */
638 void qdf_mem_kmalloc_inc(qdf_size_t size);
639 
640 /**
641  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
642  * @size: number of bytes to decrement by
643  *
644  * Return: None
645  */
646 void qdf_mem_kmalloc_dec(qdf_size_t size);
647 
648 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
649 /**
650  * qdf_mem_skb_inc() - increment total skb allocation size
651  * @size: size to be added
652  *
653  * Return: none
654  */
655 void qdf_mem_skb_inc(qdf_size_t size);
656 
657 /**
658  * qdf_mem_skb_dec() - decrement total skb allocation size
659  * @size: size to be decremented
660  *
661  * Return: none
662  */
663 void qdf_mem_skb_dec(qdf_size_t size);
664 
665 /**
666  * qdf_mem_skb_total_inc() - increment total skb allocation size
667  * in host driver in both debug and perf builds
668  * @size: size to be added
669  *
670  * Return: none
671  */
672 void qdf_mem_skb_total_inc(qdf_size_t size);
673 
674 /**
675  * qdf_mem_skb_total_dec() - decrement total skb allocation size
676  * in the host driver in debug and perf flavors
677  * @size: size to be decremented
678  *
679  * Return: none
680  */
681 void qdf_mem_skb_total_dec(qdf_size_t size);
682 
683 /**
684  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
685  * @size: size to be added
686  *
687  * Return: none
688  */
689 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
690 
691 /**
692  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
693  * @size: size to be decreased
694  *
695  * Return: none
696  */
697 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
698 
699 /**
700  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
701  * @size: size to be added
702  *
703  * Return: none
704  */
705 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
706 
707 /**
708  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
709  * @size: size to be decreased
710  *
711  * Return: none
712  */
713 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
714 
715 /**
716  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
717  *
718  * Return: none
719  */
720 void qdf_mem_dp_tx_skb_cnt_inc(void);
721 
722 /**
723  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
724  *
725  * Return: none
726  */
727 void qdf_mem_dp_tx_skb_cnt_dec(void);
728 
729 /**
730  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
731  *
732  * Return: none
733  */
734 void qdf_mem_dp_rx_skb_cnt_inc(void);
735 
736 /**
737  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
738  *
739  * Return: none
740  */
741 void qdf_mem_dp_rx_skb_cnt_dec(void);
742 #else
743 
744 static inline void qdf_mem_skb_inc(qdf_size_t size)
745 {
746 }
747 
748 static inline void qdf_mem_skb_dec(qdf_size_t size)
749 {
750 }
751 
752 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
753 {
754 }
755 
756 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
757 {
758 }
759 
760 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
761 {
762 }
763 
764 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
765 {
766 }
767 
768 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
769 {
770 }
771 
772 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
773 {
774 }
775 
776 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
777 {
778 }
779 
780 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
781 {
782 }
783 
784 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
785 {
786 }
787 
788 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
789 {
790 }
791 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
792 
793 /**
794  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
795  * @num: number of required storage
796  *
797  * Allocate mapping table for DMA memory allocation. This is needed for
798  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
799  *
800  * Return: shared memory info storage table pointer
801  */
802 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
803 {
804 	qdf_mem_info_t *mem_info_arr;
805 
806 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
807 	return mem_info_arr;
808 }
809 
810 /**
811  * qdf_update_mem_map_table() - Update DMA memory map info
812  * @osdev: Parent device instance
813  * @mem_info: Pointer to shared memory information
814  * @dma_addr: dma address
815  * @mem_size: memory size allocated
816  *
817  * Store DMA shared memory information
818  *
819  * Return: none
820  */
821 static inline void qdf_update_mem_map_table(qdf_device_t osdev,
822 					    qdf_mem_info_t *mem_info,
823 					    qdf_dma_addr_t dma_addr,
824 					    uint32_t mem_size)
825 {
826 	if (!mem_info) {
827 		qdf_nofl_err("%s: NULL mem_info", __func__);
828 		return;
829 	}
830 
831 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
832 }
833 
834 /**
835  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
836  * @osdev parent device instance
837  *
838  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
839  */
840 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
841 {
842 	return __qdf_mem_smmu_s1_enabled(osdev);
843 }
844 
845 /**
846  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
847  * @osdev: Parent device instance
848  * @dma_addr: DMA/IOVA address
849  *
850  * Get actual physical address from dma_addr based on SMMU enablement status.
851  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
852  * (IOVA) otherwise returns physical address. So get SMMU physical address
853  * mapping from IOVA.
854  *
855  * Return: dmaable physical address
856  */
857 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
858 							qdf_dma_addr_t dma_addr)
859 {
860 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
861 }
862 
863 /**
864  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
865  * @dev: device instace
866  * @sgt: scatter gather table pointer
867  * @cpu_addr: HLOS virtual address
868  * @dma_addr: dma address
869  * @size: allocated memory size
870  *
871  * Return: physical address
872  */
873 static inline int
874 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
875 			qdf_dma_addr_t dma_addr, size_t size)
876 {
877 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
878 }
879 
880 /**
881  * qdf_mem_free_sgtable() - Free a previously allocated sg table
882  * @sgt: the mapped sg table header
883  *
884  * Return: None
885  */
886 static inline void
887 qdf_mem_free_sgtable(struct sg_table *sgt)
888 {
889 	__qdf_os_mem_free_sgtable(sgt);
890 }
891 
892 /**
893  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
894  * @sgt: scatter gather table pointer
895  *
896  * Return: None
897  */
898 static inline void
899 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
900 {
901 	__qdf_dma_get_sgtable_dma_addr(sgt);
902 }
903 
904 /**
905  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
906  * @osdev: Parent device instance
907  * @mem_info: Pointer to allocated memory information
908  *
909  * Get dma address based on SMMU enablement status. If SMMU Stage 1
910  * tranlation is enabled, DMA APIs return IO virtual address otherwise
911  * returns physical address.
912  *
913  * Return: dma address
914  */
915 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
916 						  qdf_mem_info_t *mem_info)
917 {
918 	return __qdf_mem_get_dma_addr(osdev, mem_info);
919 }
920 
921 /**
922  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
923  * @osdev: Parent device instance
924  * @mem_info: Pointer to allocated memory information
925  *
926  * Based on smmu stage 1 translation enablement, return corresponding dma
927  * address storage pointer.
928  *
929  * Return: dma address storage pointer
930  */
931 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
932 						       qdf_mem_info_t *mem_info)
933 {
934 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
935 }
936 
937 
938 /**
939  * qdf_mem_get_dma_size() - Return DMA memory size
940  * @osdev: parent device instance
941  * @mem_info: Pointer to allocated memory information
942  *
943  * Return: DMA memory size
944  */
945 static inline uint32_t
946 qdf_mem_get_dma_size(qdf_device_t osdev,
947 		       qdf_mem_info_t *mem_info)
948 {
949 	return __qdf_mem_get_dma_size(osdev, mem_info);
950 }
951 
952 /**
953  * qdf_mem_set_dma_size() - Set DMA memory size
954  * @osdev: parent device instance
955  * @mem_info: Pointer to allocated memory information
956  * @mem_size: memory size allocated
957  *
958  * Return: none
959  */
960 static inline void
961 qdf_mem_set_dma_size(qdf_device_t osdev,
962 		       qdf_mem_info_t *mem_info,
963 		       uint32_t mem_size)
964 {
965 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
966 }
967 
968 /**
969  * qdf_mem_get_dma_size() - Return DMA physical address
970  * @osdev: parent device instance
971  * @mem_info: Pointer to allocated memory information
972  *
973  * Return: DMA physical address
974  */
975 static inline qdf_dma_addr_t
976 qdf_mem_get_dma_pa(qdf_device_t osdev,
977 		     qdf_mem_info_t *mem_info)
978 {
979 	return __qdf_mem_get_dma_pa(osdev, mem_info);
980 }
981 
982 /**
983  * qdf_mem_set_dma_size() - Set DMA physical address
984  * @osdev: parent device instance
985  * @mem_info: Pointer to allocated memory information
986  * @dma_pa: DMA phsical address
987  *
988  * Return: none
989  */
990 static inline void
991 qdf_mem_set_dma_pa(qdf_device_t osdev,
992 		     qdf_mem_info_t *mem_info,
993 		     qdf_dma_addr_t dma_pa)
994 {
995 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
996 }
997 
998 /**
999  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1000  * @osdev: parent device instance
1001  * @mem_info: Pointer to allocated memory information
1002  * @size: size to be allocated
1003  *
1004  * Allocate DMA memory which will be shared with external kernel module. This
1005  * information is needed for SMMU mapping.
1006  *
1007  * Return: 0 success
1008  */
1009 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1010 
1011 /**
1012  * qdf_mem_shared_mem_free() - Free shared memory
1013  * @osdev: parent device instance
1014  * @shared_mem: shared memory information storage
1015  *
1016  * Free DMA shared memory resource
1017  *
1018  * Return: None
1019  */
1020 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1021 					   qdf_shared_mem_t *shared_mem)
1022 {
1023 	if (!shared_mem) {
1024 		qdf_nofl_err("%s: NULL shared mem struct passed",
1025 			     __func__);
1026 		return;
1027 	}
1028 
1029 	if (shared_mem->vaddr) {
1030 		qdf_mem_free_consistent(osdev, osdev->dev,
1031 					qdf_mem_get_dma_size(osdev,
1032 						&shared_mem->mem_info),
1033 					shared_mem->vaddr,
1034 					qdf_mem_get_dma_addr(osdev,
1035 						&shared_mem->mem_info),
1036 					qdf_get_dma_mem_context(shared_mem,
1037 								memctx));
1038 	}
1039 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1040 	qdf_mem_free(shared_mem);
1041 }
1042 
1043 /**
1044  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1045  * host driver
1046  *
1047  * Return: Total DMA memory allocated
1048  */
1049 int32_t qdf_dma_mem_stats_read(void);
1050 
1051 /**
1052  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1053  * in host driver
1054  *
1055  * Return: Total heap memory allocated
1056  */
1057 int32_t qdf_heap_mem_stats_read(void);
1058 
1059 /**
1060  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1061  * host driver
1062  *
1063  * Return: Total SKB memory allocated
1064  */
1065 int32_t qdf_skb_mem_stats_read(void);
1066 
1067 /**
1068  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1069  * in the host driver tracked in both debug and perf builds
1070  *
1071  * Return: Total SKB memory allocated
1072  */
1073 int32_t qdf_skb_total_mem_stats_read(void);
1074 
1075 /**
1076  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1077  * allocated in host driver. This is the high watermark for the
1078  * total SKB allocated in the host driver
1079  *
1080  * Return: None
1081  */
1082 int32_t qdf_skb_max_mem_stats_read(void);
1083 
1084 /**
1085  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1086  * which are waiting on Tx completions
1087  *
1088  * Return: Outstanding Tx desc count
1089  */
1090 int32_t qdf_mem_tx_desc_cnt_read(void);
1091 
1092 /**
1093  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1094  * descs which are waiting on Tx completions. This is the high
1095  * watermark for the pending desc count
1096  *
1097  * Return: Max outstanding Tx desc count
1098  */
1099 int32_t qdf_mem_tx_desc_max_read(void);
1100 
1101 /**
1102  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1103  * creating the sysfs node
1104  *
1105  * Return: None
1106  */
1107 void qdf_mem_stats_init(void);
1108 
1109 /**
1110  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1111  * allocated for Tx data path
1112  *
1113  * Return: Tx SKB memory allocated
1114  */
1115 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1116 
1117 /**
1118  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1119  * allocated for Rx data path
1120  *
1121  * Return: Rx SKB memory allocated
1122  */
1123 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1124 
1125 /**
1126  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1127  * watermark for the SKB memory allocated for Tx data path
1128  *
1129  * Return: Max Tx SKB memory allocated
1130  */
1131 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1132 
1133 /**
1134  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1135  * watermark for the SKB memory allocated for Rx data path
1136  *
1137  * Return: Max Rx SKB memory allocated
1138  */
1139 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1140 
1141 /**
1142  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1143  * allocated in the Tx data path by the host driver or
1144  * buffers coming from the n/w stack
1145  *
1146  * Return: Number of DP Tx buffers allocated
1147  */
1148 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1149 
1150 /**
1151  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1152  * buffers allocated in the Tx data path
1153  *
1154  * Return: Max number of DP Tx buffers allocated
1155  */
1156 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1157 
1158 /**
1159  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1160  * allocated in the Rx data path
1161  *
1162  * Return: Number of DP Rx buffers allocated
1163  */
1164 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1165 
1166 /**
1167  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1168  * buffers allocated in the Rx data path
1169  *
1170  * Return: Max number of DP Rx buffers allocated
1171  */
1172 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1173 
1174 /**
1175  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1176  * count and the high watermark for pending tx desc count
1177  *
1178  * @pending_tx_descs: outstanding Tx desc count
1179  * @tx_descs_max: high watermark for outstanding Tx desc count
1180  *
1181  * Return: None
1182  */
1183 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1184 				int32_t tx_descs_max);
1185 
1186 #endif /* __QDF_MEMORY_H */
1187