xref: /wlan-dirver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * QCA driver framework (QDF) memory management APIs
22  */
23 
24 #if !defined(__QDF_MEMORY_H)
25 #define __QDF_MEMORY_H
26 
27 /* Include Files */
28 #include <qdf_types.h>
29 #include <i_qdf_mem.h>
30 #include <i_qdf_trace.h>
31 #include <qdf_atomic.h>
32 
33 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
34 
35 /**
36  * qdf_align() - align to the given size.
37  * @a: input that needs to be aligned.
38  * @align_size: boundary on which 'a' has to be alinged.
39  *
40  * Return: aligned value.
41  */
42 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
43 #define qdf_page_size __page_size
44 
45 /**
46  * struct qdf_mem_dma_page_t - Allocated dmaable page
47  * @page_v_addr_start: Page start virtual address
48  * @page_v_addr_end: Page end virtual address
49  * @page_p_addr: Page start physical address
50  */
51 struct qdf_mem_dma_page_t {
52 	char *page_v_addr_start;
53 	char *page_v_addr_end;
54 	qdf_dma_addr_t page_p_addr;
55 };
56 
57 /**
58  * struct qdf_mem_multi_page_t - multiple page allocation information storage
59  * @num_element_per_page: Number of element in single page
60  * @num_pages: Number of allocation needed pages
61  * @dma_pages: page information storage in case of coherent memory
62  * @cacheable_pages: page information storage in case of cacheable memory
63  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
64  */
65 struct qdf_mem_multi_page_t {
66 	uint16_t num_element_per_page;
67 	uint16_t num_pages;
68 	struct qdf_mem_dma_page_t *dma_pages;
69 	void **cacheable_pages;
70 	qdf_size_t page_size;
71 #ifdef DP_MEM_PRE_ALLOC
72 	uint8_t is_mem_prealloc;
73 #endif
74 };
75 
76 
77 /* Preprocessor definitions and constants */
78 
79 typedef __qdf_mempool_t qdf_mempool_t;
80 
81 /**
82  * qdf_mem_init() - Initialize QDF memory module
83  *
84  * Return: None
85  *
86  */
87 void qdf_mem_init(void);
88 
89 /**
90  * qdf_mem_exit() - Exit QDF memory module
91  *
92  * Return: None
93  *
94  */
95 void qdf_mem_exit(void);
96 
97 #define QDF_MEM_FUNC_NAME_SIZE 48
98 
99 #ifdef MEMORY_DEBUG
100 /**
101  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
102  *
103  * Return: value of mem_debug_disabled qdf module argument
104  */
105 bool qdf_mem_debug_config_get(void);
106 
107 /**
108  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
109  * @size: Number of bytes of memory to allocate.
110  * @func: Function name of the call site
111  * @line: Line number of the call site
112  * @caller: Address of the caller function
113  * @flag: GFP flag
114  *
115  * This function will dynamicallly allocate the specified number of bytes of
116  * memory and add it to the qdf tracking list to check for memory leaks and
117  * corruptions
118  *
119  * Return: A valid memory location on success, or NULL on failure
120  */
121 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
122 			   void *caller, uint32_t flag);
123 
124 #define qdf_mem_malloc(size) \
125 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
126 
127 #define qdf_mem_malloc_fl(size, func, line) \
128 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
129 
130 #define qdf_mem_malloc_atomic(size) \
131 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC)
132 /**
133  * qdf_mem_free_debug() - debug version of qdf_mem_free
134  * @ptr: Pointer to the starting address of the memory to be freed.
135  *
136  * This function will free the memory pointed to by 'ptr'. It also checks for
137  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
138  *
139  * Return: none
140  */
141 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
142 
143 #define qdf_mem_free(ptr) \
144 	qdf_mem_free_debug(ptr, __func__, __LINE__)
145 
146 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
147 				     struct qdf_mem_multi_page_t *pages,
148 				     size_t element_size, uint16_t element_num,
149 				     qdf_dma_context_t memctxt, bool cacheable,
150 				     const char *func, uint32_t line,
151 				     void *caller);
152 
153 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
154 				  memctxt, cacheable) \
155 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
156 					element_num, memctxt, cacheable, \
157 					__func__, __LINE__, QDF_RET_IP)
158 
159 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
160 				    struct qdf_mem_multi_page_t *pages,
161 				    qdf_dma_context_t memctxt, bool cacheable,
162 				    const char *func, uint32_t line);
163 
164 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
165 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
166 				       __func__, __LINE__)
167 
168 /**
169  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
170  *
171  * Call this to ensure there are no active memory allocations being tracked
172  * against the current debug domain. For example, one should call this function
173  * immediately before a call to qdf_debug_domain_set() as a memory leak
174  * detection mechanism.
175  *
176  * e.g.
177  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
178  *
179  *	...
180  *
181  *	// memory is allocated and freed
182  *
183  *	...
184  *
185  *	// before transitioning back to inactive state,
186  *	// make sure all active memory has been freed
187  *	qdf_mem_check_for_leaks();
188  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
189  *
190  *	...
191  *
192  *	// also, before program exit, make sure init time memory is freed
193  *	qdf_mem_check_for_leaks();
194  *	exit();
195  *
196  * Return: None
197  */
198 void qdf_mem_check_for_leaks(void);
199 
200 /**
201  * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
202  * @osdev: OS device handle
203  * @dev: Pointer to device handle
204  * @size: Size to be allocated
205  * @paddr: Physical address
206  * @func: Function name of the call site
207  * @line: line numbe rof the call site
208  * @caller: Address of the caller function
209  *
210  * Return: pointer of allocated memory or null if memory alloc fails
211  */
212 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
213 				     qdf_size_t size, qdf_dma_addr_t *paddr,
214 				     const char *func, uint32_t line,
215 				     void *caller);
216 
217 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
218 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
219 				       __func__, __LINE__, QDF_RET_IP)
220 
221 /**
222  * qdf_mem_free_consistent_debug() - free consistent qdf memory
223  * @osdev: OS device handle
224  * @size: Size to be allocated
225  * @vaddr: virtual address
226  * @paddr: Physical address
227  * @memctx: Pointer to DMA context
228  * @func: Function name of the call site
229  * @line: line numbe rof the call site
230  *
231  * Return: none
232  */
233 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
234 				   qdf_size_t size, void *vaddr,
235 				   qdf_dma_addr_t paddr,
236 				   qdf_dma_context_t memctx,
237 				   const char *func, uint32_t line);
238 
239 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
240 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
241 				  __func__, __LINE__)
242 #else
243 static inline bool qdf_mem_debug_config_get(void)
244 {
245 	return false;
246 }
247 
248 /**
249  * qdf_mem_malloc() - allocation QDF memory
250  * @size: Number of bytes of memory to allocate.
251  *
252  * This function will dynamicallly allocate the specified number of bytes of
253  * memory.
254  *
255  * Return:
256  * Upon successful allocate, returns a non-NULL pointer to the allocated
257  * memory.  If this function is unable to allocate the amount of memory
258  * specified (for any reason) it returns NULL.
259  */
260 #define qdf_mem_malloc(size) \
261 	__qdf_mem_malloc(size, __func__, __LINE__)
262 
263 #define qdf_mem_malloc_fl(size, func, line) \
264 	__qdf_mem_malloc(size, func, line)
265 
266 /**
267  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
268  * @size: Number of bytes of memory to allocate.
269  *
270  * This function will dynamicallly allocate the specified number of bytes of
271  * memory.
272  *
273  * Return:
274  * Upon successful allocate, returns a non-NULL pointer to the allocated
275  * memory.  If this function is unable to allocate the amount of memory
276  * specified (for any reason) it returns NULL.
277  */
278 #define qdf_mem_malloc_atomic(size) \
279 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
280 
281 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
282 			       const char *func,
283 			       uint32_t line);
284 
285 #define qdf_mem_free(ptr) \
286 	__qdf_mem_free(ptr)
287 
288 static inline void qdf_mem_check_for_leaks(void) { }
289 
290 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
291 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
292 
293 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
294 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
295 
296 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
297 			       struct qdf_mem_multi_page_t *pages,
298 			       size_t element_size, uint16_t element_num,
299 			       qdf_dma_context_t memctxt, bool cacheable);
300 
301 void qdf_mem_multi_pages_free(qdf_device_t osdev,
302 			      struct qdf_mem_multi_page_t *pages,
303 			      qdf_dma_context_t memctxt, bool cacheable);
304 
305 #endif /* MEMORY_DEBUG */
306 
307 /**
308  * qdf_mem_multi_pages_zero() - zero out each page memory
309  * @pages: Multi page information storage
310  * @cacheable: Coherent memory or cacheable memory
311  *
312  * This function will zero out each page memory
313  *
314  * Return: None
315  */
316 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
317 			      bool cacheable);
318 
319 /**
320  * qdf_aligned_malloc() - allocates aligned QDF memory.
321  * @size: Size to be allocated
322  * @vaddr_unaligned: Unaligned virtual address.
323  * @paddr_unaligned: Unaligned physical address.
324  * @paddr_aligned: Aligned physical address.
325  * @align: Base address alignment.
326  * @func: Function name of the call site.
327  * @line: Line number of the call site.
328  *
329  * This function will dynamically allocate the specified number of bytes of
330  * memory. Checks if the allocated base address is aligned with base_align.
331  * If not, it frees the allocated memory, adds base_align to alloc size and
332  * re-allocates the memory.
333  *
334  * Return:
335  * Upon successful allocate, returns an aligned base address of the allocated
336  * memory.  If this function is unable to allocate the amount of memory
337  * specified (for any reason) it returns NULL.
338  */
339 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
340 			   paddr_aligned, align) \
341 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
342 			   paddr_aligned, align, __func__, __LINE__)
343 
344 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
345 			    qdf_dma_addr_t *paddr_unaligned,
346 			    qdf_dma_addr_t *paddr_aligned,
347 			    uint32_t align,
348 			    const char *func, uint32_t line);
349 
350 /**
351  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
352  * @osdev: OS device handle
353  * @size: Size to be allocated
354  * @vaddr_unaligned: Unaligned virtual address.
355  * @paddr_unaligned: Unaligned physical address.
356  * @paddr_aligned: Aligned physical address.
357  * @align: Base address alignment.
358  * @func: Function name of the call site.
359  * @line: Line number of the call site.
360  *
361  * Return: pointer of allocated memory or null if memory alloc fails.
362  */
363 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
364 					 paddr_unaligned, paddr_aligned, \
365 					 align) \
366 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
367 					    paddr_unaligned, paddr_aligned, \
368 					    align, __func__, __LINE__)
369 
370 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
371 					  void **vaddr_unaligned,
372 					  qdf_dma_addr_t *paddr_unaligned,
373 					  qdf_dma_addr_t *paddr_aligned,
374 					  uint32_t align, const char *func,
375 					  uint32_t line);
376 
377 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
378 
379 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
380 
381 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
382 					   uint32_t num_bytes);
383 
384 /**
385  * qdf_mem_set() - set (fill) memory with a specified byte value.
386  * @ptr: Pointer to memory that will be set
387  * @num_bytes: Number of bytes to be set
388  * @value: Byte set in memory
389  *
390  * WARNING: parameter @num_bytes and @value are swapped comparing with
391  * standard C function "memset", please ensure correct usage of this function!
392  *
393  * Return: None
394  */
395 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
396 
397 /**
398  * qdf_mem_zero() - zero out memory
399  * @ptr: pointer to memory that will be set to zero
400  * @num_bytes: number of bytes zero
401  *
402  * This function sets the memory location to all zeros, essentially clearing
403  * the memory.
404  *
405  * Return: None
406  */
407 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
408 {
409 	qdf_mem_set(ptr, num_bytes, 0);
410 }
411 
412 /**
413  * qdf_mem_copy() - copy memory
414  * @dst_addr: Pointer to destination memory location (to copy to)
415  * @src_addr: Pointer to source memory location (to copy from)
416  * @num_bytes: Number of bytes to copy.
417  *
418  * Copy host memory from one location to another, similar to memcpy in
419  * standard C.  Note this function does not specifically handle overlapping
420  * source and destination memory locations.  Calling this function with
421  * overlapping source and destination memory locations will result in
422  * unpredictable results.  Use qdf_mem_move() if the memory locations
423  * for the source and destination are overlapping (or could be overlapping!)
424  *
425  * Return: none
426  */
427 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
428 
429 /**
430  * qdf_mem_move() - move memory
431  * @dst_addr: pointer to destination memory location (to move to)
432  * @src_addr: pointer to source memory location (to move from)
433  * @num_bytes: number of bytes to move.
434  *
435  * Move host memory from one location to another, similar to memmove in
436  * standard C.  Note this function *does* handle overlapping
437  * source and destination memory locations.
438 
439  * Return: None
440  */
441 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
442 
443 /**
444  * qdf_mem_cmp() - memory compare
445  * @left: pointer to one location in memory to compare
446  * @right: pointer to second location in memory to compare
447  * @size: the number of bytes to compare
448  *
449  * Function to compare two pieces of memory, similar to memcmp function
450  * in standard C.
451  *
452  * Return:
453  *	0 -- equal
454  *	< 0 -- *memory1 is less than *memory2
455  *	> 0 -- *memory1 is bigger than *memory2
456  */
457 int qdf_mem_cmp(const void *left, const void *right, size_t size);
458 
459 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
460 
461 /**
462  * qdf_mem_map_nbytes_single - Map memory for DMA
463  * @osdev: pomter OS device context
464  * @buf: pointer to memory to be dma mapped
465  * @dir: DMA map direction
466  * @nbytes: number of bytes to be mapped.
467  * @phy_addr: ponter to recive physical address.
468  *
469  * Return: success/failure
470  */
471 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
472 						 qdf_dma_dir_t dir, int nbytes,
473 						 qdf_dma_addr_t *phy_addr)
474 {
475 #if defined(HIF_PCI) || defined(HIF_IPCI)
476 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
477 #else
478 	return 0;
479 #endif
480 }
481 
482 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
483 					  qdf_dma_addr_t buf,
484 					  qdf_dma_dir_t dir,
485 					  int nbytes)
486 {
487 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
488 }
489 
490 /**
491  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
492  * @osdev: pomter OS device context
493  * @phy_addr: physical address of memory to be dma unmapped
494  * @dir: DMA unmap direction
495  * @nbytes: number of bytes to be unmapped.
496  *
497  * Return: none
498  */
499 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
500 					       qdf_dma_addr_t phy_addr,
501 					       qdf_dma_dir_t dir,
502 					       int nbytes)
503 {
504 #if defined(HIF_PCI) || defined(HIF_IPCI)
505 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
506 #endif
507 }
508 
509 /**
510  * qdf_mempool_init - Create and initialize memory pool
511  * @osdev: platform device object
512  * @pool_addr: address of the pool created
513  * @elem_cnt: no. of elements in pool
514  * @elem_size: size of each pool element in bytes
515  * @flags: flags
516  * Return: Handle to memory pool or NULL if allocation failed
517  */
518 static inline int qdf_mempool_init(qdf_device_t osdev,
519 				   qdf_mempool_t *pool_addr, int elem_cnt,
520 				   size_t elem_size, uint32_t flags)
521 {
522 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
523 				  flags);
524 }
525 
526 /**
527  * qdf_mempool_destroy - Destroy memory pool
528  * @osdev: platform device object
529  * @Handle: to memory pool
530  * Return: none
531  */
532 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
533 {
534 	__qdf_mempool_destroy(osdev, pool);
535 }
536 
537 /**
538  * qdf_mempool_alloc - Allocate an element memory pool
539  * @osdev: platform device object
540  * @Handle: to memory pool
541  * Return: Pointer to the allocated element or NULL if the pool is empty
542  */
543 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
544 {
545 	return (void *)__qdf_mempool_alloc(osdev, pool);
546 }
547 
548 /**
549  * qdf_mempool_free - Free a memory pool element
550  * @osdev: Platform device object
551  * @pool: Handle to memory pool
552  * @buf: Element to be freed
553  * Return: none
554  */
555 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
556 				    void *buf)
557 {
558 	__qdf_mempool_free(osdev, pool, buf);
559 }
560 
561 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
562 					qdf_dma_addr_t bus_addr,
563 					qdf_size_t size,
564 					__dma_data_direction direction);
565 
566 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
567 					qdf_dma_addr_t bus_addr,
568 					qdf_size_t size,
569 					__dma_data_direction direction);
570 
571 int qdf_mem_multi_page_link(qdf_device_t osdev,
572 		struct qdf_mem_multi_page_t *pages,
573 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
574 
575 /**
576  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
577  * @size: number of bytes to increment by
578  *
579  * Return: None
580  */
581 void qdf_mem_kmalloc_inc(qdf_size_t size);
582 
583 /**
584  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
585  * @size: number of bytes to decrement by
586  *
587  * Return: None
588  */
589 void qdf_mem_kmalloc_dec(qdf_size_t size);
590 
591 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
592 /**
593  * qdf_mem_skb_inc() - increment total skb allocation size
594  * @size: size to be added
595  *
596  * Return: none
597  */
598 void qdf_mem_skb_inc(qdf_size_t size);
599 
600 /**
601  * qdf_mem_skb_dec() - decrement total skb allocation size
602  * @size: size to be decremented
603  *
604  * Return: none
605  */
606 void qdf_mem_skb_dec(qdf_size_t size);
607 
608 /**
609  * qdf_mem_skb_total_inc() - increment total skb allocation size
610  * in host driver in both debug and perf builds
611  * @size: size to be added
612  *
613  * Return: none
614  */
615 void qdf_mem_skb_total_inc(qdf_size_t size);
616 
617 /**
618  * qdf_mem_skb_total_dec() - decrement total skb allocation size
619  * in the host driver in debug and perf flavors
620  * @size: size to be decremented
621  *
622  * Return: none
623  */
624 void qdf_mem_skb_total_dec(qdf_size_t size);
625 
626 /**
627  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
628  * @size: size to be added
629  *
630  * Return: none
631  */
632 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
633 
634 /**
635  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
636  * @size: size to be decreased
637  *
638  * Return: none
639  */
640 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
641 
642 /**
643  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
644  * @size: size to be added
645  *
646  * Return: none
647  */
648 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
649 
650 /**
651  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
652  * @size: size to be decreased
653  *
654  * Return: none
655  */
656 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
657 
658 /**
659  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
660  *
661  * Return: none
662  */
663 void qdf_mem_dp_tx_skb_cnt_inc(void);
664 
665 /**
666  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
667  *
668  * Return: none
669  */
670 void qdf_mem_dp_tx_skb_cnt_dec(void);
671 
672 /**
673  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
674  *
675  * Return: none
676  */
677 void qdf_mem_dp_rx_skb_cnt_inc(void);
678 
679 /**
680  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
681  *
682  * Return: none
683  */
684 void qdf_mem_dp_rx_skb_cnt_dec(void);
685 #else
686 
687 static inline void qdf_mem_skb_inc(qdf_size_t size)
688 {
689 }
690 
691 static inline void qdf_mem_skb_dec(qdf_size_t size)
692 {
693 }
694 
695 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
696 {
697 }
698 
699 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
700 {
701 }
702 
703 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
704 {
705 }
706 
707 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
708 {
709 }
710 
711 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
712 {
713 }
714 
715 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
716 {
717 }
718 
719 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
720 {
721 }
722 
723 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
724 {
725 }
726 
727 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
728 {
729 }
730 
731 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
732 {
733 }
734 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
735 
736 /**
737  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
738  * @num: number of required storage
739  *
740  * Allocate mapping table for DMA memory allocation. This is needed for
741  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
742  *
743  * Return: shared memory info storage table pointer
744  */
745 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
746 {
747 	qdf_mem_info_t *mem_info_arr;
748 
749 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
750 	return mem_info_arr;
751 }
752 
753 /**
754  * qdf_update_mem_map_table() - Update DMA memory map info
755  * @osdev: Parent device instance
756  * @mem_info: Pointer to shared memory information
757  * @dma_addr: dma address
758  * @mem_size: memory size allocated
759  *
760  * Store DMA shared memory information
761  *
762  * Return: none
763  */
764 static inline void qdf_update_mem_map_table(qdf_device_t osdev,
765 					    qdf_mem_info_t *mem_info,
766 					    qdf_dma_addr_t dma_addr,
767 					    uint32_t mem_size)
768 {
769 	if (!mem_info) {
770 		qdf_nofl_err("%s: NULL mem_info", __func__);
771 		return;
772 	}
773 
774 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
775 }
776 
777 /**
778  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
779  * @osdev parent device instance
780  *
781  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
782  */
783 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
784 {
785 	return __qdf_mem_smmu_s1_enabled(osdev);
786 }
787 
788 /**
789  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
790  * @osdev: Parent device instance
791  * @dma_addr: DMA/IOVA address
792  *
793  * Get actual physical address from dma_addr based on SMMU enablement status.
794  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
795  * (IOVA) otherwise returns physical address. So get SMMU physical address
796  * mapping from IOVA.
797  *
798  * Return: dmaable physical address
799  */
800 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
801 							qdf_dma_addr_t dma_addr)
802 {
803 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
804 }
805 
806 /**
807  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
808  * @dev: device instace
809  * @sgt: scatter gather table pointer
810  * @cpu_addr: HLOS virtual address
811  * @dma_addr: dma address
812  * @size: allocated memory size
813  *
814  * Return: physical address
815  */
816 static inline int
817 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
818 			qdf_dma_addr_t dma_addr, size_t size)
819 {
820 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
821 }
822 
823 /**
824  * qdf_mem_free_sgtable() - Free a previously allocated sg table
825  * @sgt: the mapped sg table header
826  *
827  * Return: None
828  */
829 static inline void
830 qdf_mem_free_sgtable(struct sg_table *sgt)
831 {
832 	__qdf_os_mem_free_sgtable(sgt);
833 }
834 
835 /**
836  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
837  * @sgt: scatter gather table pointer
838  *
839  * Return: None
840  */
841 static inline void
842 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
843 {
844 	__qdf_dma_get_sgtable_dma_addr(sgt);
845 }
846 
847 /**
848  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
849  * @osdev: Parent device instance
850  * @mem_info: Pointer to allocated memory information
851  *
852  * Get dma address based on SMMU enablement status. If SMMU Stage 1
853  * tranlation is enabled, DMA APIs return IO virtual address otherwise
854  * returns physical address.
855  *
856  * Return: dma address
857  */
858 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
859 						  qdf_mem_info_t *mem_info)
860 {
861 	return __qdf_mem_get_dma_addr(osdev, mem_info);
862 }
863 
864 /**
865  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
866  * @osdev: Parent device instance
867  * @mem_info: Pointer to allocated memory information
868  *
869  * Based on smmu stage 1 translation enablement, return corresponding dma
870  * address storage pointer.
871  *
872  * Return: dma address storage pointer
873  */
874 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
875 						       qdf_mem_info_t *mem_info)
876 {
877 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
878 }
879 
880 
881 /**
882  * qdf_mem_get_dma_size() - Return DMA memory size
883  * @osdev: parent device instance
884  * @mem_info: Pointer to allocated memory information
885  *
886  * Return: DMA memory size
887  */
888 static inline uint32_t
889 qdf_mem_get_dma_size(qdf_device_t osdev,
890 		       qdf_mem_info_t *mem_info)
891 {
892 	return __qdf_mem_get_dma_size(osdev, mem_info);
893 }
894 
895 /**
896  * qdf_mem_set_dma_size() - Set DMA memory size
897  * @osdev: parent device instance
898  * @mem_info: Pointer to allocated memory information
899  * @mem_size: memory size allocated
900  *
901  * Return: none
902  */
903 static inline void
904 qdf_mem_set_dma_size(qdf_device_t osdev,
905 		       qdf_mem_info_t *mem_info,
906 		       uint32_t mem_size)
907 {
908 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
909 }
910 
911 /**
912  * qdf_mem_get_dma_size() - Return DMA physical address
913  * @osdev: parent device instance
914  * @mem_info: Pointer to allocated memory information
915  *
916  * Return: DMA physical address
917  */
918 static inline qdf_dma_addr_t
919 qdf_mem_get_dma_pa(qdf_device_t osdev,
920 		     qdf_mem_info_t *mem_info)
921 {
922 	return __qdf_mem_get_dma_pa(osdev, mem_info);
923 }
924 
925 /**
926  * qdf_mem_set_dma_size() - Set DMA physical address
927  * @osdev: parent device instance
928  * @mem_info: Pointer to allocated memory information
929  * @dma_pa: DMA phsical address
930  *
931  * Return: none
932  */
933 static inline void
934 qdf_mem_set_dma_pa(qdf_device_t osdev,
935 		     qdf_mem_info_t *mem_info,
936 		     qdf_dma_addr_t dma_pa)
937 {
938 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
939 }
940 
941 /**
942  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
943  * @osdev: parent device instance
944  * @mem_info: Pointer to allocated memory information
945  * @size: size to be allocated
946  *
947  * Allocate DMA memory which will be shared with external kernel module. This
948  * information is needed for SMMU mapping.
949  *
950  * Return: 0 success
951  */
952 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
953 
954 /**
955  * qdf_mem_shared_mem_free() - Free shared memory
956  * @osdev: parent device instance
957  * @shared_mem: shared memory information storage
958  *
959  * Free DMA shared memory resource
960  *
961  * Return: None
962  */
963 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
964 					   qdf_shared_mem_t *shared_mem)
965 {
966 	if (!shared_mem) {
967 		qdf_nofl_err("%s: NULL shared mem struct passed",
968 			     __func__);
969 		return;
970 	}
971 
972 	if (shared_mem->vaddr) {
973 		qdf_mem_free_consistent(osdev, osdev->dev,
974 					qdf_mem_get_dma_size(osdev,
975 						&shared_mem->mem_info),
976 					shared_mem->vaddr,
977 					qdf_mem_get_dma_addr(osdev,
978 						&shared_mem->mem_info),
979 					qdf_get_dma_mem_context(shared_mem,
980 								memctx));
981 	}
982 	qdf_mem_free_sgtable(&shared_mem->sgtable);
983 	qdf_mem_free(shared_mem);
984 }
985 
986 /**
987  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
988  * host driver
989  *
990  * Return: Total DMA memory allocated
991  */
992 int32_t qdf_dma_mem_stats_read(void);
993 
994 /**
995  * qdf_heap_mem_stats_read() - Return the heap memory allocated
996  * in host driver
997  *
998  * Return: Total heap memory allocated
999  */
1000 int32_t qdf_heap_mem_stats_read(void);
1001 
1002 /**
1003  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1004  * host driver
1005  *
1006  * Return: Total SKB memory allocated
1007  */
1008 int32_t qdf_skb_mem_stats_read(void);
1009 
1010 /**
1011  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1012  * in the host driver tracked in both debug and perf builds
1013  *
1014  * Return: Total SKB memory allocated
1015  */
1016 int32_t qdf_skb_total_mem_stats_read(void);
1017 
1018 /**
1019  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1020  * allocated in host driver. This is the high watermark for the
1021  * total SKB allocated in the host driver
1022  *
1023  * Return: None
1024  */
1025 int32_t qdf_skb_max_mem_stats_read(void);
1026 
1027 /**
1028  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1029  * which are waiting on Tx completions
1030  *
1031  * Return: Outstanding Tx desc count
1032  */
1033 int32_t qdf_mem_tx_desc_cnt_read(void);
1034 
1035 /**
1036  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1037  * descs which are waiting on Tx completions. This is the high
1038  * watermark for the pending desc count
1039  *
1040  * Return: Max outstanding Tx desc count
1041  */
1042 int32_t qdf_mem_tx_desc_max_read(void);
1043 
1044 /**
1045  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1046  * creating the sysfs node
1047  *
1048  * Return: None
1049  */
1050 void qdf_mem_stats_init(void);
1051 
1052 /**
1053  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1054  * allocated for Tx data path
1055  *
1056  * Return: Tx SKB memory allocated
1057  */
1058 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1059 
1060 /**
1061  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1062  * allocated for Rx data path
1063  *
1064  * Return: Rx SKB memory allocated
1065  */
1066 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1067 
1068 /**
1069  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1070  * watermark for the SKB memory allocated for Tx data path
1071  *
1072  * Return: Max Tx SKB memory allocated
1073  */
1074 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1075 
1076 /**
1077  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1078  * watermark for the SKB memory allocated for Rx data path
1079  *
1080  * Return: Max Rx SKB memory allocated
1081  */
1082 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1083 
1084 /**
1085  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1086  * allocated in the Tx data path by the host driver or
1087  * buffers coming from the n/w stack
1088  *
1089  * Return: Number of DP Tx buffers allocated
1090  */
1091 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1092 
1093 /**
1094  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1095  * buffers allocated in the Tx data path
1096  *
1097  * Return: Max number of DP Tx buffers allocated
1098  */
1099 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1100 
1101 /**
1102  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1103  * allocated in the Rx data path
1104  *
1105  * Return: Number of DP Rx buffers allocated
1106  */
1107 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1108 
1109 /**
1110  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1111  * buffers allocated in the Rx data path
1112  *
1113  * Return: Max number of DP Rx buffers allocated
1114  */
1115 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1116 
1117 /**
1118  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1119  * count and the high watermark for pending tx desc count
1120  *
1121  * @pending_tx_descs: outstanding Tx desc count
1122  * @tx_descs_max: high watermark for outstanding Tx desc count
1123  *
1124  * Return: None
1125  */
1126 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1127 				int32_t tx_descs_max);
1128 
1129 #endif /* __QDF_MEMORY_H */
1130