xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision ccf6794c7efeda37a9772e5eb4d4dab2ab5af07a)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_mem.h
22  * Linux-specific definitions for QDF memory API's
23  */
24 
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27 
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41 
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44 
45 #else
46 /*
47  * Provide dummy defs for kernel data types, functions, and enums
48  * used in this header file.
49  */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define kzalloc(size, flags) NULL
53 #define vmalloc(size)        NULL
54 #define kfree(buf)
55 #define vfree(buf)
56 #define pci_alloc_consistent(dev, size, paddr) NULL
57 #define __qdf_mempool_t void*
58 #define QDF_RET_IP NULL
59 #endif /* __KERNEL__ */
60 #include <qdf_status.h>
61 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && defined(MSM_PLATFORM)
62 #include <linux/qcom-iommu-util.h>
63 #endif
64 
65 #if IS_ENABLED(CONFIG_ARM_SMMU)
66 #include <pld_common.h>
67 #ifdef ENABLE_SMMU_S1_TRANSLATION
68 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
69 #include <asm/dma-iommu.h>
70 #endif
71 #endif
72 #include <linux/iommu.h>
73 #endif
74 
75 #ifdef __KERNEL__
76 typedef struct mempool_elem {
77 	STAILQ_ENTRY(mempool_elem) mempool_entry;
78 } mempool_elem_t;
79 
80 /**
81  * typedef __qdf_mempool_ctxt_t - Memory pool context
82  * @pool_id: pool identifier
83  * @flags: flags
84  * @elem_size: size of each pool element in bytes
85  * @pool_mem: pool_addr address of the pool created
86  * @mem_size: Total size of the pool in bytes
87  * @free_list: free pool list
88  * @lock: spinlock object
89  * @max_elem: Maximum number of elements in tha pool
90  * @free_cnt: Number of free elements available
91  */
92 typedef struct __qdf_mempool_ctxt {
93 	int pool_id;
94 	u_int32_t flags;
95 	size_t elem_size;
96 	void *pool_mem;
97 	u_int32_t mem_size;
98 
99 	STAILQ_HEAD(, mempool_elem) free_list;
100 	spinlock_t lock;
101 	u_int32_t max_elem;
102 	u_int32_t free_cnt;
103 } __qdf_mempool_ctxt_t;
104 
105 #endif /* __KERNEL__ */
106 
107 #define __page_size ((size_t)PAGE_SIZE)
108 #define __qdf_align(a, mask) ALIGN(a, mask)
109 
110 #ifdef DISABLE_MEMDEBUG_PANIC
111 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
112 	do { \
113 		/* no-op */ \
114 	} while (false)
115 #else
116 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
117 	QDF_DEBUG_PANIC(reason_fmt, ## args)
118 #endif
119 
120 /* typedef for dma_data_direction */
121 typedef enum dma_data_direction __dma_data_direction;
122 
123 /**
124  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
125  * @dir: QDF DMA data direction
126  *
127  * Return:
128  * enum dma_data_direction
129  */
130 static inline
131 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
132 {
133 	switch (qdf_dir) {
134 	case QDF_DMA_BIDIRECTIONAL:
135 		return DMA_BIDIRECTIONAL;
136 	case QDF_DMA_TO_DEVICE:
137 		return DMA_TO_DEVICE;
138 	case QDF_DMA_FROM_DEVICE:
139 		return DMA_FROM_DEVICE;
140 	default:
141 		return DMA_NONE;
142 	}
143 }
144 
145 
146 /**
147  * __qdf_mem_map_nbytes_single - Map memory for DMA
148  * @osdev: pomter OS device context
149  * @buf: pointer to memory to be dma mapped
150  * @dir: DMA map direction
151  * @nbytes: number of bytes to be mapped.
152  * @phy_addr: ponter to recive physical address.
153  *
154  * Return: success/failure
155  */
156 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
157 						  void *buf, qdf_dma_dir_t dir,
158 						  int nbytes,
159 						  qdf_dma_addr_t *phy_addr)
160 {
161 	/* assume that the OS only provides a single fragment */
162 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
163 					__qdf_dma_dir_to_os(dir));
164 	return dma_mapping_error(osdev->dev, *phy_addr) ?
165 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
166 }
167 
168 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
169 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
170 					    qdf_dma_addr_t buf,
171 					    qdf_dma_dir_t dir,
172 					    int nbytes)
173 {
174 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
175 }
176 #else
177 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
178 					    qdf_dma_addr_t buf,
179 					    qdf_dma_dir_t dir,
180 					    int nbytes)
181 {
182 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
183 				__qdf_dma_dir_to_os(dir));
184 }
185 #endif
186 
187 /**
188  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
189  *
190  * @osdev: pomter OS device context
191  * @phy_addr: physical address of memory to be dma unmapped
192  * @dir: DMA unmap direction
193  * @nbytes: number of bytes to be unmapped.
194  *
195  * Return - none
196  */
197 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
198 						 qdf_dma_addr_t phy_addr,
199 						 qdf_dma_dir_t dir, int nbytes)
200 {
201 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
202 				__qdf_dma_dir_to_os(dir));
203 }
204 #ifdef __KERNEL__
205 
206 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
207 
208 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
209 		       size_t pool_entry_size, u_int32_t flags);
210 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
211 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
212 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
213 #define QDF_RET_IP ((void *)_RET_IP_)
214 
215 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
216 #endif
217 
218 /**
219  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
220  * @osdev parent device instance
221  *
222  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
223  */
224 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
225 {
226 	return osdev->smmu_s1_enabled;
227 }
228 
229 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
230 /*
231  * typedef __qdf_iommu_domain_t: abstraction for struct iommu_domain
232  */
233 typedef struct iommu_domain __qdf_iommu_domain_t;
234 
235 /**
236  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
237  * @attr: QDF iommu attribute
238  *
239  * Return: enum iommu_attr
240  */
241 static inline
242 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
243 {
244 	switch (attr) {
245 	case QDF_DOMAIN_ATTR_GEOMETRY:
246 		return DOMAIN_ATTR_GEOMETRY;
247 	case QDF_DOMAIN_ATTR_PAGING:
248 		return DOMAIN_ATTR_PAGING;
249 	case QDF_DOMAIN_ATTR_WINDOWS:
250 		return DOMAIN_ATTR_WINDOWS;
251 	case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
252 		return DOMAIN_ATTR_FSL_PAMU_STASH;
253 	case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
254 		return DOMAIN_ATTR_FSL_PAMU_ENABLE;
255 	case QDF_DOMAIN_ATTR_FSL_PAMUV1:
256 		return DOMAIN_ATTR_FSL_PAMUV1;
257 	case QDF_DOMAIN_ATTR_NESTING:
258 		return DOMAIN_ATTR_NESTING;
259 	case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
260 		return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
261 	case QDF_DOMAIN_ATTR_CONTEXT_BANK:
262 		return DOMAIN_ATTR_CONTEXT_BANK;
263 	case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
264 		return DOMAIN_ATTR_NON_FATAL_FAULTS;
265 	case QDF_DOMAIN_ATTR_S1_BYPASS:
266 		return DOMAIN_ATTR_S1_BYPASS;
267 	case QDF_DOMAIN_ATTR_ATOMIC:
268 		return DOMAIN_ATTR_ATOMIC;
269 	case QDF_DOMAIN_ATTR_SECURE_VMID:
270 		return DOMAIN_ATTR_SECURE_VMID;
271 	case QDF_DOMAIN_ATTR_FAST:
272 		return DOMAIN_ATTR_FAST;
273 	case QDF_DOMAIN_ATTR_PGTBL_INFO:
274 		return DOMAIN_ATTR_PGTBL_INFO;
275 	case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
276 		return DOMAIN_ATTR_USE_UPSTREAM_HINT;
277 	case QDF_DOMAIN_ATTR_EARLY_MAP:
278 		return DOMAIN_ATTR_EARLY_MAP;
279 	case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
280 		return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
281 	case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
282 		return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
283 	case QDF_DOMAIN_ATTR_USE_LLC_NWA:
284 		return DOMAIN_ATTR_USE_LLC_NWA;
285 	case QDF_DOMAIN_ATTR_SPLIT_TABLES:
286 		return DOMAIN_ATTR_SPLIT_TABLES;
287 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
288 		return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
289 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
290 		return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
291 	case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
292 		return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
293 	default:
294 		return DOMAIN_ATTR_EXTENDED_MAX;
295 	}
296 }
297 
298 /**
299  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
300  *
301  * @domain: iommu domain
302  * @attr: iommu attribute
303  * @data: data pointer
304  *
305  * Return: iommu domain attr
306  */
307 static inline int
308 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
309 			    enum qdf_iommu_attr attr, void *data)
310 {
311 	return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
312 				     data);
313 }
314 
315 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
316 /**
317  * __qdf_dev_get_domain() - get iommu domain from osdev
318  * @osdev: parent device instance
319  *
320  * Return: iommu domain
321  */
322 static inline struct iommu_domain *
323 __qdf_dev_get_domain(qdf_device_t osdev)
324 {
325 	return osdev->domain;
326 }
327 #else
328 static inline struct iommu_domain *
329 __qdf_dev_get_domain(qdf_device_t osdev)
330 {
331 	if (osdev->iommu_mapping)
332 		return osdev->iommu_mapping->domain;
333 
334 	return NULL;
335 }
336 #endif
337 
338 /**
339  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
340  * @osdev: parent device instance
341  * @dma_addr: dma_addr
342  *
343  * Get actual physical address from dma_addr based on SMMU enablement status.
344  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
345  * (IOVA) otherwise returns physical address. So get SMMU physical address
346  * mapping from IOVA.
347  *
348  * Return: dmaable physical address
349  */
350 static inline unsigned long
351 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
352 			     qdf_dma_addr_t dma_addr)
353 {
354 	struct iommu_domain *domain;
355 
356 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
357 		domain = __qdf_dev_get_domain(osdev);
358 		if (domain)
359 			return iommu_iova_to_phys(domain, dma_addr);
360 	}
361 
362 	return dma_addr;
363 }
364 #else
365 static inline unsigned long
366 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
367 			     qdf_dma_addr_t dma_addr)
368 {
369 	return dma_addr;
370 }
371 #endif
372 
373 /**
374  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
375  * @dev: device instace
376  * @sgt: scatter gather table pointer
377  * @cpu_addr: HLOS virtual address
378  * @dma_addr: dma/iova
379  * @size: allocated memory size
380  *
381  * Return: physical address
382  */
383 static inline int
384 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
385 			     qdf_dma_addr_t dma_addr, size_t size)
386 {
387 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
388 				size);
389 }
390 
391 /**
392  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
393  * @sgt: the mapped sg table header
394  *
395  * Return: None
396  */
397 static inline void
398 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
399 {
400 	sg_free_table(sgt);
401 }
402 
403 /**
404  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
405  * @sgt: scatter gather table pointer
406  *
407  * Return: None
408  */
409 static inline void
410 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
411 {
412 	struct scatterlist *sg;
413 	int i;
414 
415 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
416 		if (!sg)
417 			break;
418 
419 		sg->dma_address = sg_phys(sg);
420 	}
421 }
422 
423 /**
424  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
425  * @osdev: parent device instance
426  * @mem_info: Pointer to allocated memory information
427  *
428  * Based on smmu stage 1 translation enablement status, return corresponding dma
429  * address from qdf_mem_info_t. If stage 1 translation enabled, return
430  * IO virtual address otherwise return physical address.
431  *
432  * Return: dma address
433  */
434 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
435 						    qdf_mem_info_t *mem_info)
436 {
437 	if (__qdf_mem_smmu_s1_enabled(osdev))
438 		return (qdf_dma_addr_t)mem_info->iova;
439 	else
440 		return (qdf_dma_addr_t)mem_info->pa;
441 }
442 
443 /**
444  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
445  * @osdev: parent device instance
446  * @mem_info: Pointer to allocated memory information
447  *
448  * Based on smmu stage 1 translation enablement status, return corresponding
449  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
450  * enabled, return pointer to IO virtual address otherwise return pointer to
451  * physical address
452  *
453  * Return: dma address storage pointer
454  */
455 static inline qdf_dma_addr_t *
456 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
457 			   qdf_mem_info_t *mem_info)
458 {
459 	if (__qdf_mem_smmu_s1_enabled(osdev))
460 		return (qdf_dma_addr_t *)(&mem_info->iova);
461 	else
462 		return (qdf_dma_addr_t *)(&mem_info->pa);
463 }
464 
465 /**
466  * __qdf_update_mem_map_table() - Update DMA memory map info
467  * @osdev: Parent device instance
468  * @mem_info: Pointer to shared memory information
469  * @dma_addr: dma address
470  * @mem_size: memory size allocated
471  *
472  * Store DMA shared memory information
473  *
474  * Return: none
475  */
476 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
477 					      qdf_mem_info_t *mem_info,
478 					      qdf_dma_addr_t dma_addr,
479 					      uint32_t mem_size)
480 {
481 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
482 	mem_info->iova = dma_addr;
483 	mem_info->size = mem_size;
484 }
485 
486 /**
487  * __qdf_mem_get_dma_size() - Return DMA memory size
488  * @osdev: parent device instance
489  * @mem_info: Pointer to allocated memory information
490  *
491  * Return: DMA memory size
492  */
493 static inline uint32_t
494 __qdf_mem_get_dma_size(qdf_device_t osdev,
495 		       qdf_mem_info_t *mem_info)
496 {
497 	return mem_info->size;
498 }
499 
500 /**
501  * __qdf_mem_set_dma_size() - Set DMA memory size
502  * @osdev: parent device instance
503  * @mem_info: Pointer to allocated memory information
504  * @mem_size: memory size allocated
505  *
506  * Return: none
507  */
508 static inline void
509 __qdf_mem_set_dma_size(qdf_device_t osdev,
510 		       qdf_mem_info_t *mem_info,
511 		       uint32_t mem_size)
512 {
513 	mem_info->size = mem_size;
514 }
515 
516 /**
517  * __qdf_mem_get_dma_size() - Return DMA physical address
518  * @osdev: parent device instance
519  * @mem_info: Pointer to allocated memory information
520  *
521  * Return: DMA physical address
522  */
523 static inline qdf_dma_addr_t
524 __qdf_mem_get_dma_pa(qdf_device_t osdev,
525 		     qdf_mem_info_t *mem_info)
526 {
527 	return mem_info->pa;
528 }
529 
530 /**
531  * __qdf_mem_set_dma_size() - Set DMA physical address
532  * @osdev: parent device instance
533  * @mem_info: Pointer to allocated memory information
534  * @dma_pa: DMA phsical address
535  *
536  * Return: none
537  */
538 static inline void
539 __qdf_mem_set_dma_pa(qdf_device_t osdev,
540 		     qdf_mem_info_t *mem_info,
541 		     qdf_dma_addr_t dma_pa)
542 {
543 	mem_info->pa = dma_pa;
544 }
545 
546 /**
547  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
548  * @osdev: OS device handle
549  * @dev: Pointer to device handle
550  * @size: Size to be allocated
551  * @paddr: Physical address
552  * @func: Function name of the call site
553  * @line: line numbe rof the call site
554  *
555  * Return: pointer of allocated memory or null if memory alloc fails
556  */
557 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
558 				 qdf_size_t size, qdf_dma_addr_t *paddr,
559 				 const char *func, uint32_t line);
560 
561 /**
562  * __qdf_mem_malloc() - allocates QDF memory
563  * @size: Number of bytes of memory to allocate.
564  *
565  * @func: Function name of the call site
566  * @line: line numbe rof the call site
567  *
568  * This function will dynamicallly allocate the specified number of bytes of
569  * memory.
570  *
571  * Return:
572  * Upon successful allocate, returns a non-NULL pointer to the allocated
573  * memory.  If this function is unable to allocate the amount of memory
574  * specified (for any reason) it returns NULL.
575  */
576 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
577 
578 /**
579  * __qdf_mem_free() - free QDF memory
580  * @ptr: Pointer to the starting address of the memory to be freed.
581  *
582  * This function will free the memory pointed to by 'ptr'.
583  * Return: None
584  */
585 void __qdf_mem_free(void *ptr);
586 
587 /**
588  * __qdf_mem_valloc() - QDF virtual memory allocation API
589  * @size: Number of bytes of virtual memory to allocate.
590  * @func: Caller function name
591  * @line: Line number
592  *
593  * Return: A valid memory location on success, or NULL on failure
594  */
595 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
596 
597 /**
598  * __qdf_mem_vfree() - QDF API to free virtual memory
599  * @ptr: Pointer to the virtual memory to free
600  *
601  * Return: None
602  */
603 void __qdf_mem_vfree(void *ptr);
604 
605 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
606 /**
607  * __qdf_untracked_mem_malloc() - allocates non-QDF memory
608  * @size: Number of bytes of memory to allocate.
609  *
610  * @func: Function name of the call site
611  * @line: line number of the call site
612  *
613  * This function will dynamically allocate the specified number of bytes of
614  * memory. Memory allocated is not tracked by qdf memory debug framework.
615  *
616  * Return:
617  * Upon successful allocation, returns a non-NULL pointer to the allocated
618  * memory.  If this function is unable to allocate the amount of memory
619  * specified (for any reason) it returns NULL.
620  */
621 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
622 				 uint32_t line);
623 
624 /**
625  * __qdf_untracked_mem_free() - free non-QDF memory
626  * @ptr: Pointer to the starting address of the memory to be freed.
627  *
628  * This function will free the memory pointed to by 'ptr'.
629  * Return: None
630  */
631 
632 void __qdf_untracked_mem_free(void *ptr);
633 #endif
634 
635 /**
636  * __qdf_mem_free_consistent() - free consistent qdf memory
637  * @osdev: OS device handle
638  * @dev: Pointer to device handle
639  * @size: Size to be allocated
640  * @vaddr: virtual address
641  * @paddr: Physical address
642  * @memctx: Pointer to DMA context
643  *
644  * Return: none
645  */
646 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
647 			       qdf_size_t size, void *vaddr,
648 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
649 
650 #endif /* __I_QDF_MEM_H */
651