xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_mem.h
22  * Linux-specific definitions for QDF memory API's
23  */
24 
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27 
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41 
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44 
45 #else
46 /*
47  * Provide dummy defs for kernel data types, functions, and enums
48  * used in this header file.
49  */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define kzalloc(size, flags) NULL
53 #define vmalloc(size)        NULL
54 #define kfree(buf)
55 #define vfree(buf)
56 #define pci_alloc_consistent(dev, size, paddr) NULL
57 #define __qdf_mempool_t void*
58 #define QDF_RET_IP NULL
59 #endif /* __KERNEL__ */
60 #include <qdf_status.h>
61 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && defined(MSM_PLATFORM)
62 #include <linux/qcom-iommu-util.h>
63 #endif
64 
65 #if IS_ENABLED(CONFIG_ARM_SMMU)
66 #include <pld_common.h>
67 #ifdef ENABLE_SMMU_S1_TRANSLATION
68 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
69 #include <asm/dma-iommu.h>
70 #endif
71 #endif
72 #include <linux/iommu.h>
73 #endif
74 
75 #ifdef __KERNEL__
76 typedef struct mempool_elem {
77 	STAILQ_ENTRY(mempool_elem) mempool_entry;
78 } mempool_elem_t;
79 
80 /**
81  * typedef __qdf_mempool_ctxt_t - Memory pool context
82  * @pool_id: pool identifier
83  * @flags: flags
84  * @elem_size: size of each pool element in bytes
85  * @pool_mem: pool_addr address of the pool created
86  * @mem_size: Total size of the pool in bytes
87  * @free_list: free pool list
88  * @lock: spinlock object
89  * @max_elem: Maximum number of elements in tha pool
90  * @free_cnt: Number of free elements available
91  */
92 typedef struct __qdf_mempool_ctxt {
93 	int pool_id;
94 	u_int32_t flags;
95 	size_t elem_size;
96 	void *pool_mem;
97 	u_int32_t mem_size;
98 
99 	STAILQ_HEAD(, mempool_elem) free_list;
100 	spinlock_t lock;
101 	u_int32_t max_elem;
102 	u_int32_t free_cnt;
103 } __qdf_mempool_ctxt_t;
104 
105 #endif /* __KERNEL__ */
106 
107 #define __page_size ((size_t)PAGE_SIZE)
108 #define __qdf_align(a, mask) ALIGN(a, mask)
109 
110 #ifdef DISABLE_MEMDEBUG_PANIC
111 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
112 	do { \
113 		/* no-op */ \
114 	} while (false)
115 #else
116 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
117 	QDF_DEBUG_PANIC(reason_fmt, ## args)
118 #endif
119 
120 /* typedef for dma_data_direction */
121 typedef enum dma_data_direction __dma_data_direction;
122 
123 /**
124  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
125  * @dir: QDF DMA data direction
126  *
127  * Return:
128  * enum dma_data_direction
129  */
130 static inline
131 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
132 {
133 	switch (qdf_dir) {
134 	case QDF_DMA_BIDIRECTIONAL:
135 		return DMA_BIDIRECTIONAL;
136 	case QDF_DMA_TO_DEVICE:
137 		return DMA_TO_DEVICE;
138 	case QDF_DMA_FROM_DEVICE:
139 		return DMA_FROM_DEVICE;
140 	default:
141 		return DMA_NONE;
142 	}
143 }
144 
145 
146 /**
147  * __qdf_mem_map_nbytes_single - Map memory for DMA
148  * @osdev: pomter OS device context
149  * @buf: pointer to memory to be dma mapped
150  * @dir: DMA map direction
151  * @nbytes: number of bytes to be mapped.
152  * @phy_addr: ponter to recive physical address.
153  *
154  * Return: success/failure
155  */
156 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
157 						  void *buf, qdf_dma_dir_t dir,
158 						  int nbytes,
159 						  qdf_dma_addr_t *phy_addr)
160 {
161 	/* assume that the OS only provides a single fragment */
162 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
163 					__qdf_dma_dir_to_os(dir));
164 	return dma_mapping_error(osdev->dev, *phy_addr) ?
165 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
166 }
167 
168 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
169 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
170 					    qdf_dma_addr_t buf,
171 					    qdf_dma_dir_t dir,
172 					    int nbytes)
173 {
174 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
175 }
176 #else
177 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
178 					    qdf_dma_addr_t buf,
179 					    qdf_dma_dir_t dir,
180 					    int nbytes)
181 {
182 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
183 				__qdf_dma_dir_to_os(dir));
184 }
185 #endif
186 
187 /**
188  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
189  *
190  * @osdev: pomter OS device context
191  * @phy_addr: physical address of memory to be dma unmapped
192  * @dir: DMA unmap direction
193  * @nbytes: number of bytes to be unmapped.
194  *
195  * Return - none
196  */
197 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
198 						 qdf_dma_addr_t phy_addr,
199 						 qdf_dma_dir_t dir, int nbytes)
200 {
201 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
202 				__qdf_dma_dir_to_os(dir));
203 }
204 #ifdef __KERNEL__
205 
206 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
207 
208 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
209 		       size_t pool_entry_size, u_int32_t flags);
210 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
211 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
212 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
213 #define QDF_RET_IP ((void *)_RET_IP_)
214 
215 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
216 #endif
217 
218 /**
219  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
220  * @osdev parent device instance
221  *
222  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
223  */
224 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
225 {
226 	return osdev->smmu_s1_enabled;
227 }
228 
229 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
230 /*
231  * typedef __qdf_iommu_domain_t: abstraction for struct iommu_domain
232  */
233 typedef struct iommu_domain __qdf_iommu_domain_t;
234 
235 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
236 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
237 /**
238  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
239  *			      configurations bitmap
240  * @attr: QDF iommu attribute
241  *
242  * Return: IOMMU mapping configuration bitmaps
243  */
244 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
245 {
246 	switch (attr) {
247 	case QDF_DOMAIN_ATTR_S1_BYPASS:
248 		return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
249 	case QDF_DOMAIN_ATTR_ATOMIC:
250 		return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
251 	case QDF_DOMAIN_ATTR_FAST:
252 		return QCOM_IOMMU_MAPPING_CONF_FAST;
253 	default:
254 		return -EINVAL;
255 	}
256 }
257 
258 /**
259  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
260  *
261  * @domain: iommu domain
262  * @attr: iommu attribute
263  * @data: data pointer
264  *
265  * Return: 0 for success, and negative values otherwise
266  */
267 static inline int
268 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
269 			    enum qdf_iommu_attr attr, void *data)
270 {
271 	int mapping_config;
272 	int mapping_bitmap;
273 	int *value;
274 
275 	mapping_bitmap = __qdf_iommu_attr_to_os(attr);
276 	if (mapping_bitmap < 0)
277 		return -EINVAL;
278 
279 	mapping_config = qcom_iommu_get_mappings_configuration(domain);
280 	if (mapping_config < 0)
281 		return -EINVAL;
282 
283 	value = data;
284 	*value = (mapping_config & mapping_bitmap) ? 1 : 0;
285 
286 	return 0;
287 }
288 #else /* !CONFIG_QCOM_IOMMU_UTIL */
289 static inline int
290 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
291 			    enum qdf_iommu_attr attr, void *data)
292 {
293 	return -ENOTSUPP;
294 }
295 #endif /* CONFIG_QCOM_IOMMU_UTIL */
296 #else
297 /**
298  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
299  * @attr: QDF iommu attribute
300  *
301  * Return: enum iommu_attr
302  */
303 static inline
304 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
305 {
306 	switch (attr) {
307 	case QDF_DOMAIN_ATTR_GEOMETRY:
308 		return DOMAIN_ATTR_GEOMETRY;
309 	case QDF_DOMAIN_ATTR_PAGING:
310 		return DOMAIN_ATTR_PAGING;
311 	case QDF_DOMAIN_ATTR_WINDOWS:
312 		return DOMAIN_ATTR_WINDOWS;
313 	case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
314 		return DOMAIN_ATTR_FSL_PAMU_STASH;
315 	case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
316 		return DOMAIN_ATTR_FSL_PAMU_ENABLE;
317 	case QDF_DOMAIN_ATTR_FSL_PAMUV1:
318 		return DOMAIN_ATTR_FSL_PAMUV1;
319 	case QDF_DOMAIN_ATTR_NESTING:
320 		return DOMAIN_ATTR_NESTING;
321 	case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
322 		return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
323 	case QDF_DOMAIN_ATTR_CONTEXT_BANK:
324 		return DOMAIN_ATTR_CONTEXT_BANK;
325 	case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
326 		return DOMAIN_ATTR_NON_FATAL_FAULTS;
327 	case QDF_DOMAIN_ATTR_S1_BYPASS:
328 		return DOMAIN_ATTR_S1_BYPASS;
329 	case QDF_DOMAIN_ATTR_ATOMIC:
330 		return DOMAIN_ATTR_ATOMIC;
331 	case QDF_DOMAIN_ATTR_SECURE_VMID:
332 		return DOMAIN_ATTR_SECURE_VMID;
333 	case QDF_DOMAIN_ATTR_FAST:
334 		return DOMAIN_ATTR_FAST;
335 	case QDF_DOMAIN_ATTR_PGTBL_INFO:
336 		return DOMAIN_ATTR_PGTBL_INFO;
337 	case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
338 		return DOMAIN_ATTR_USE_UPSTREAM_HINT;
339 	case QDF_DOMAIN_ATTR_EARLY_MAP:
340 		return DOMAIN_ATTR_EARLY_MAP;
341 	case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
342 		return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
343 	case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
344 		return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
345 	case QDF_DOMAIN_ATTR_USE_LLC_NWA:
346 		return DOMAIN_ATTR_USE_LLC_NWA;
347 	case QDF_DOMAIN_ATTR_SPLIT_TABLES:
348 		return DOMAIN_ATTR_SPLIT_TABLES;
349 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
350 		return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
351 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
352 		return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
353 	case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
354 		return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
355 	default:
356 		return DOMAIN_ATTR_EXTENDED_MAX;
357 	}
358 }
359 
360 /**
361  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
362  *
363  * @domain: iommu domain
364  * @attr: iommu attribute
365  * @data: data pointer
366  *
367  * Return: iommu domain attr
368  */
369 static inline int
370 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
371 			    enum qdf_iommu_attr attr, void *data)
372 {
373 	return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
374 				     data);
375 }
376 #endif
377 
378 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
379 /**
380  * __qdf_dev_get_domain() - get iommu domain from osdev
381  * @osdev: parent device instance
382  *
383  * Return: iommu domain
384  */
385 static inline struct iommu_domain *
386 __qdf_dev_get_domain(qdf_device_t osdev)
387 {
388 	return osdev->domain;
389 }
390 #else
391 static inline struct iommu_domain *
392 __qdf_dev_get_domain(qdf_device_t osdev)
393 {
394 	if (osdev->iommu_mapping)
395 		return osdev->iommu_mapping->domain;
396 
397 	return NULL;
398 }
399 #endif
400 
401 /**
402  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
403  * @osdev: parent device instance
404  * @dma_addr: dma_addr
405  *
406  * Get actual physical address from dma_addr based on SMMU enablement status.
407  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
408  * (IOVA) otherwise returns physical address. So get SMMU physical address
409  * mapping from IOVA.
410  *
411  * Return: dmaable physical address
412  */
413 static inline unsigned long
414 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
415 			     qdf_dma_addr_t dma_addr)
416 {
417 	struct iommu_domain *domain;
418 
419 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
420 		domain = __qdf_dev_get_domain(osdev);
421 		if (domain)
422 			return iommu_iova_to_phys(domain, dma_addr);
423 	}
424 
425 	return dma_addr;
426 }
427 #else
428 static inline unsigned long
429 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
430 			     qdf_dma_addr_t dma_addr)
431 {
432 	return dma_addr;
433 }
434 #endif
435 
436 /**
437  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
438  * @dev: device instace
439  * @sgt: scatter gather table pointer
440  * @cpu_addr: HLOS virtual address
441  * @dma_addr: dma/iova
442  * @size: allocated memory size
443  *
444  * Return: physical address
445  */
446 static inline int
447 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
448 			     qdf_dma_addr_t dma_addr, size_t size)
449 {
450 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
451 				size);
452 }
453 
454 /**
455  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
456  * @sgt: the mapped sg table header
457  *
458  * Return: None
459  */
460 static inline void
461 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
462 {
463 	sg_free_table(sgt);
464 }
465 
466 /**
467  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
468  * @sgt: scatter gather table pointer
469  *
470  * Return: None
471  */
472 static inline void
473 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
474 {
475 	struct scatterlist *sg;
476 	int i;
477 
478 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
479 		if (!sg)
480 			break;
481 
482 		sg->dma_address = sg_phys(sg);
483 	}
484 }
485 
486 /**
487  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
488  * @osdev: parent device instance
489  * @mem_info: Pointer to allocated memory information
490  *
491  * Based on smmu stage 1 translation enablement status, return corresponding dma
492  * address from qdf_mem_info_t. If stage 1 translation enabled, return
493  * IO virtual address otherwise return physical address.
494  *
495  * Return: dma address
496  */
497 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
498 						    qdf_mem_info_t *mem_info)
499 {
500 	if (__qdf_mem_smmu_s1_enabled(osdev))
501 		return (qdf_dma_addr_t)mem_info->iova;
502 	else
503 		return (qdf_dma_addr_t)mem_info->pa;
504 }
505 
506 /**
507  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
508  * @osdev: parent device instance
509  * @mem_info: Pointer to allocated memory information
510  *
511  * Based on smmu stage 1 translation enablement status, return corresponding
512  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
513  * enabled, return pointer to IO virtual address otherwise return pointer to
514  * physical address
515  *
516  * Return: dma address storage pointer
517  */
518 static inline qdf_dma_addr_t *
519 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
520 			   qdf_mem_info_t *mem_info)
521 {
522 	if (__qdf_mem_smmu_s1_enabled(osdev))
523 		return (qdf_dma_addr_t *)(&mem_info->iova);
524 	else
525 		return (qdf_dma_addr_t *)(&mem_info->pa);
526 }
527 
528 /**
529  * __qdf_update_mem_map_table() - Update DMA memory map info
530  * @osdev: Parent device instance
531  * @mem_info: Pointer to shared memory information
532  * @dma_addr: dma address
533  * @mem_size: memory size allocated
534  *
535  * Store DMA shared memory information
536  *
537  * Return: none
538  */
539 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
540 					      qdf_mem_info_t *mem_info,
541 					      qdf_dma_addr_t dma_addr,
542 					      uint32_t mem_size)
543 {
544 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
545 	mem_info->iova = dma_addr;
546 	mem_info->size = mem_size;
547 }
548 
549 /**
550  * __qdf_mem_get_dma_size() - Return DMA memory size
551  * @osdev: parent device instance
552  * @mem_info: Pointer to allocated memory information
553  *
554  * Return: DMA memory size
555  */
556 static inline uint32_t
557 __qdf_mem_get_dma_size(qdf_device_t osdev,
558 		       qdf_mem_info_t *mem_info)
559 {
560 	return mem_info->size;
561 }
562 
563 /**
564  * __qdf_mem_set_dma_size() - Set DMA memory size
565  * @osdev: parent device instance
566  * @mem_info: Pointer to allocated memory information
567  * @mem_size: memory size allocated
568  *
569  * Return: none
570  */
571 static inline void
572 __qdf_mem_set_dma_size(qdf_device_t osdev,
573 		       qdf_mem_info_t *mem_info,
574 		       uint32_t mem_size)
575 {
576 	mem_info->size = mem_size;
577 }
578 
579 /**
580  * __qdf_mem_get_dma_size() - Return DMA physical address
581  * @osdev: parent device instance
582  * @mem_info: Pointer to allocated memory information
583  *
584  * Return: DMA physical address
585  */
586 static inline qdf_dma_addr_t
587 __qdf_mem_get_dma_pa(qdf_device_t osdev,
588 		     qdf_mem_info_t *mem_info)
589 {
590 	return mem_info->pa;
591 }
592 
593 /**
594  * __qdf_mem_set_dma_size() - Set DMA physical address
595  * @osdev: parent device instance
596  * @mem_info: Pointer to allocated memory information
597  * @dma_pa: DMA phsical address
598  *
599  * Return: none
600  */
601 static inline void
602 __qdf_mem_set_dma_pa(qdf_device_t osdev,
603 		     qdf_mem_info_t *mem_info,
604 		     qdf_dma_addr_t dma_pa)
605 {
606 	mem_info->pa = dma_pa;
607 }
608 
609 /**
610  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
611  * @osdev: OS device handle
612  * @dev: Pointer to device handle
613  * @size: Size to be allocated
614  * @paddr: Physical address
615  * @func: Function name of the call site
616  * @line: line numbe rof the call site
617  *
618  * Return: pointer of allocated memory or null if memory alloc fails
619  */
620 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
621 				 qdf_size_t size, qdf_dma_addr_t *paddr,
622 				 const char *func, uint32_t line);
623 
624 /**
625  * __qdf_mem_malloc() - allocates QDF memory
626  * @size: Number of bytes of memory to allocate.
627  *
628  * @func: Function name of the call site
629  * @line: line numbe rof the call site
630  *
631  * This function will dynamicallly allocate the specified number of bytes of
632  * memory.
633  *
634  * Return:
635  * Upon successful allocate, returns a non-NULL pointer to the allocated
636  * memory.  If this function is unable to allocate the amount of memory
637  * specified (for any reason) it returns NULL.
638  */
639 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
640 
641 /**
642  * __qdf_mem_free() - free QDF memory
643  * @ptr: Pointer to the starting address of the memory to be freed.
644  *
645  * This function will free the memory pointed to by 'ptr'.
646  * Return: None
647  */
648 void __qdf_mem_free(void *ptr);
649 
650 /**
651  * __qdf_mem_valloc() - QDF virtual memory allocation API
652  * @size: Number of bytes of virtual memory to allocate.
653  * @func: Caller function name
654  * @line: Line number
655  *
656  * Return: A valid memory location on success, or NULL on failure
657  */
658 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
659 
660 /**
661  * __qdf_mem_vfree() - QDF API to free virtual memory
662  * @ptr: Pointer to the virtual memory to free
663  *
664  * Return: None
665  */
666 void __qdf_mem_vfree(void *ptr);
667 
668 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
669 /**
670  * __qdf_untracked_mem_malloc() - allocates non-QDF memory
671  * @size: Number of bytes of memory to allocate.
672  *
673  * @func: Function name of the call site
674  * @line: line number of the call site
675  *
676  * This function will dynamically allocate the specified number of bytes of
677  * memory. Memory allocated is not tracked by qdf memory debug framework.
678  *
679  * Return:
680  * Upon successful allocation, returns a non-NULL pointer to the allocated
681  * memory.  If this function is unable to allocate the amount of memory
682  * specified (for any reason) it returns NULL.
683  */
684 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
685 				 uint32_t line);
686 
687 /**
688  * __qdf_untracked_mem_free() - free non-QDF memory
689  * @ptr: Pointer to the starting address of the memory to be freed.
690  *
691  * This function will free the memory pointed to by 'ptr'.
692  * Return: None
693  */
694 
695 void __qdf_untracked_mem_free(void *ptr);
696 #endif
697 
698 /**
699  * __qdf_mem_free_consistent() - free consistent qdf memory
700  * @osdev: OS device handle
701  * @dev: Pointer to device handle
702  * @size: Size to be allocated
703  * @vaddr: virtual address
704  * @paddr: Physical address
705  * @memctx: Pointer to DMA context
706  *
707  * Return: none
708  */
709 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
710 			       qdf_size_t size, void *vaddr,
711 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
712 
713 #endif /* __I_QDF_MEM_H */
714