xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_mem.h
22  * Linux-specific definitions for QDF memory API's
23  */
24 
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27 
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41 
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44 
45 #else
46 /*
47  * Provide dummy defs for kernel data types, functions, and enums
48  * used in this header file.
49  */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define kzalloc(size, flags) NULL
53 #define vmalloc(size)        NULL
54 #define kfree(buf)
55 #define vfree(buf)
56 #define pci_alloc_consistent(dev, size, paddr) NULL
57 #define __qdf_mempool_t void*
58 #define QDF_RET_IP NULL
59 #endif /* __KERNEL__ */
60 #include <qdf_status.h>
61 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && defined(MSM_PLATFORM)
62 #include <linux/qcom-iommu-util.h>
63 #endif
64 
65 #if IS_ENABLED(CONFIG_ARM_SMMU)
66 #include <pld_common.h>
67 #ifdef ENABLE_SMMU_S1_TRANSLATION
68 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
69 #include <asm/dma-iommu.h>
70 #endif
71 #endif
72 #include <linux/iommu.h>
73 #endif
74 
75 #ifdef __KERNEL__
76 typedef struct mempool_elem {
77 	STAILQ_ENTRY(mempool_elem) mempool_entry;
78 } mempool_elem_t;
79 
80 /**
81  * typedef __qdf_mempool_ctxt_t - Memory pool context
82  * @pool_id: pool identifier
83  * @flags: flags
84  * @elem_size: size of each pool element in bytes
85  * @pool_mem: pool_addr address of the pool created
86  * @mem_size: Total size of the pool in bytes
87  * @free_list: free pool list
88  * @lock: spinlock object
89  * @max_elem: Maximum number of elements in the pool
90  * @free_cnt: Number of free elements available
91  */
92 typedef struct __qdf_mempool_ctxt {
93 	int pool_id;
94 	u_int32_t flags;
95 	size_t elem_size;
96 	void *pool_mem;
97 	u_int32_t mem_size;
98 
99 	STAILQ_HEAD(, mempool_elem) free_list;
100 	spinlock_t lock;
101 	u_int32_t max_elem;
102 	u_int32_t free_cnt;
103 } __qdf_mempool_ctxt_t;
104 
105 typedef struct kmem_cache *qdf_kmem_cache_t;
106 #endif /* __KERNEL__ */
107 
108 #define __page_size ((size_t)PAGE_SIZE)
109 #define __qdf_align(a, mask) ALIGN(a, mask)
110 
111 #ifdef DISABLE_MEMDEBUG_PANIC
112 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
113 	do { \
114 		/* no-op */ \
115 	} while (false)
116 #else
117 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
118 	QDF_DEBUG_PANIC(reason_fmt, ## args)
119 #endif
120 
121 /* typedef for dma_data_direction */
122 typedef enum dma_data_direction __dma_data_direction;
123 
124 /**
125  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
126  * @dir: QDF DMA data direction
127  *
128  * Return:
129  * enum dma_data_direction
130  */
131 static inline
132 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
133 {
134 	switch (qdf_dir) {
135 	case QDF_DMA_BIDIRECTIONAL:
136 		return DMA_BIDIRECTIONAL;
137 	case QDF_DMA_TO_DEVICE:
138 		return DMA_TO_DEVICE;
139 	case QDF_DMA_FROM_DEVICE:
140 		return DMA_FROM_DEVICE;
141 	default:
142 		return DMA_NONE;
143 	}
144 }
145 
146 
147 /**
148  * __qdf_mem_map_nbytes_single - Map memory for DMA
149  * @osdev: pomter OS device context
150  * @buf: pointer to memory to be dma mapped
151  * @dir: DMA map direction
152  * @nbytes: number of bytes to be mapped.
153  * @phy_addr: pointer to receive physical address.
154  *
155  * Return: success/failure
156  */
157 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
158 						  void *buf, qdf_dma_dir_t dir,
159 						  int nbytes,
160 						  qdf_dma_addr_t *phy_addr)
161 {
162 	/* assume that the OS only provides a single fragment */
163 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
164 					__qdf_dma_dir_to_os(dir));
165 	return dma_mapping_error(osdev->dev, *phy_addr) ?
166 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
167 }
168 
169 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
170 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
171 					    qdf_dma_addr_t buf,
172 					    qdf_dma_dir_t dir,
173 					    int nbytes)
174 {
175 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
176 }
177 #else
178 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
179 					    qdf_dma_addr_t buf,
180 					    qdf_dma_dir_t dir,
181 					    int nbytes)
182 {
183 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
184 				__qdf_dma_dir_to_os(dir));
185 }
186 #endif
187 
188 /**
189  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
190  *
191  * @osdev: pomter OS device context
192  * @phy_addr: physical address of memory to be dma unmapped
193  * @dir: DMA unmap direction
194  * @nbytes: number of bytes to be unmapped.
195  *
196  * Return - none
197  */
198 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
199 						 qdf_dma_addr_t phy_addr,
200 						 qdf_dma_dir_t dir, int nbytes)
201 {
202 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
203 				__qdf_dma_dir_to_os(dir));
204 }
205 #ifdef __KERNEL__
206 
207 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
208 
209 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
210 		       size_t pool_entry_size, u_int32_t flags);
211 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
212 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
213 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
214 qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name,
215 					 qdf_size_t size);
216 void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache);
217 void* __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache);
218 void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node);
219 #define QDF_RET_IP ((void *)_RET_IP_)
220 
221 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
222 #endif
223 
224 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
225 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
226 		   ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
227 #else
228 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
229 		   ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE)
230 #endif
231 
232 /**
233  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
234  * @osdev parent device instance
235  *
236  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
237  */
238 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
239 {
240 	return osdev->smmu_s1_enabled;
241 }
242 
243 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
244 /*
245  * typedef __qdf_iommu_domain_t: abstraction for struct iommu_domain
246  */
247 typedef struct iommu_domain __qdf_iommu_domain_t;
248 
249 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
250 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
251 /**
252  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
253  *			      configurations bitmap
254  * @attr: QDF iommu attribute
255  *
256  * Return: IOMMU mapping configuration bitmaps
257  */
258 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
259 {
260 	switch (attr) {
261 	case QDF_DOMAIN_ATTR_S1_BYPASS:
262 		return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
263 	case QDF_DOMAIN_ATTR_ATOMIC:
264 		return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
265 	case QDF_DOMAIN_ATTR_FAST:
266 		return QCOM_IOMMU_MAPPING_CONF_FAST;
267 	default:
268 		return -EINVAL;
269 	}
270 }
271 
272 /**
273  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
274  *
275  * @domain: iommu domain
276  * @attr: iommu attribute
277  * @data: data pointer
278  *
279  * Return: 0 for success, and negative values otherwise
280  */
281 static inline int
282 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
283 			    enum qdf_iommu_attr attr, void *data)
284 {
285 	int mapping_config;
286 	int mapping_bitmap;
287 	int *value;
288 
289 	mapping_bitmap = __qdf_iommu_attr_to_os(attr);
290 	if (mapping_bitmap < 0)
291 		return -EINVAL;
292 
293 	mapping_config = qcom_iommu_get_mappings_configuration(domain);
294 	if (mapping_config < 0)
295 		return -EINVAL;
296 
297 	value = data;
298 	*value = (mapping_config & mapping_bitmap) ? 1 : 0;
299 
300 	return 0;
301 }
302 #else /* !CONFIG_QCOM_IOMMU_UTIL */
303 static inline int
304 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
305 			    enum qdf_iommu_attr attr, void *data)
306 {
307 	return -ENOTSUPP;
308 }
309 #endif /* CONFIG_QCOM_IOMMU_UTIL */
310 #else
311 /**
312  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
313  * @attr: QDF iommu attribute
314  *
315  * Return: enum iommu_attr
316  */
317 static inline
318 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
319 {
320 	switch (attr) {
321 	case QDF_DOMAIN_ATTR_GEOMETRY:
322 		return DOMAIN_ATTR_GEOMETRY;
323 	case QDF_DOMAIN_ATTR_PAGING:
324 		return DOMAIN_ATTR_PAGING;
325 	case QDF_DOMAIN_ATTR_WINDOWS:
326 		return DOMAIN_ATTR_WINDOWS;
327 	case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
328 		return DOMAIN_ATTR_FSL_PAMU_STASH;
329 	case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
330 		return DOMAIN_ATTR_FSL_PAMU_ENABLE;
331 	case QDF_DOMAIN_ATTR_FSL_PAMUV1:
332 		return DOMAIN_ATTR_FSL_PAMUV1;
333 	case QDF_DOMAIN_ATTR_NESTING:
334 		return DOMAIN_ATTR_NESTING;
335 	case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
336 		return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
337 	case QDF_DOMAIN_ATTR_CONTEXT_BANK:
338 		return DOMAIN_ATTR_CONTEXT_BANK;
339 	case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
340 		return DOMAIN_ATTR_NON_FATAL_FAULTS;
341 	case QDF_DOMAIN_ATTR_S1_BYPASS:
342 		return DOMAIN_ATTR_S1_BYPASS;
343 	case QDF_DOMAIN_ATTR_ATOMIC:
344 		return DOMAIN_ATTR_ATOMIC;
345 	case QDF_DOMAIN_ATTR_SECURE_VMID:
346 		return DOMAIN_ATTR_SECURE_VMID;
347 	case QDF_DOMAIN_ATTR_FAST:
348 		return DOMAIN_ATTR_FAST;
349 	case QDF_DOMAIN_ATTR_PGTBL_INFO:
350 		return DOMAIN_ATTR_PGTBL_INFO;
351 	case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
352 		return DOMAIN_ATTR_USE_UPSTREAM_HINT;
353 	case QDF_DOMAIN_ATTR_EARLY_MAP:
354 		return DOMAIN_ATTR_EARLY_MAP;
355 	case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
356 		return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
357 	case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
358 		return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
359 	case QDF_DOMAIN_ATTR_USE_LLC_NWA:
360 		return DOMAIN_ATTR_USE_LLC_NWA;
361 	case QDF_DOMAIN_ATTR_SPLIT_TABLES:
362 		return DOMAIN_ATTR_SPLIT_TABLES;
363 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
364 		return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
365 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
366 		return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
367 	case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
368 		return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
369 	default:
370 		return DOMAIN_ATTR_EXTENDED_MAX;
371 	}
372 }
373 
374 /**
375  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
376  *
377  * @domain: iommu domain
378  * @attr: iommu attribute
379  * @data: data pointer
380  *
381  * Return: iommu domain attr
382  */
383 static inline int
384 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
385 			    enum qdf_iommu_attr attr, void *data)
386 {
387 	return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
388 				     data);
389 }
390 #endif
391 
392 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
393 /**
394  * __qdf_dev_get_domain() - get iommu domain from osdev
395  * @osdev: parent device instance
396  *
397  * Return: iommu domain
398  */
399 static inline struct iommu_domain *
400 __qdf_dev_get_domain(qdf_device_t osdev)
401 {
402 	return osdev->domain;
403 }
404 #else
405 static inline struct iommu_domain *
406 __qdf_dev_get_domain(qdf_device_t osdev)
407 {
408 	if (osdev->iommu_mapping)
409 		return osdev->iommu_mapping->domain;
410 
411 	return NULL;
412 }
413 #endif
414 
415 /**
416  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
417  * @osdev: parent device instance
418  * @dma_addr: dma_addr
419  *
420  * Get actual physical address from dma_addr based on SMMU enablement status.
421  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
422  * (IOVA) otherwise returns physical address. So get SMMU physical address
423  * mapping from IOVA.
424  *
425  * Return: dmaable physical address
426  */
427 static inline unsigned long
428 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
429 			     qdf_dma_addr_t dma_addr)
430 {
431 	struct iommu_domain *domain;
432 
433 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
434 		domain = __qdf_dev_get_domain(osdev);
435 		if (domain)
436 			return iommu_iova_to_phys(domain, dma_addr);
437 	}
438 
439 	return dma_addr;
440 }
441 #else
442 static inline unsigned long
443 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
444 			     qdf_dma_addr_t dma_addr)
445 {
446 	return dma_addr;
447 }
448 #endif
449 
450 /**
451  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
452  * @dev: device instance
453  * @sgt: scatter gather table pointer
454  * @cpu_addr: HLOS virtual address
455  * @dma_addr: dma/iova
456  * @size: allocated memory size
457  *
458  * Return: physical address
459  */
460 static inline int
461 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
462 			     qdf_dma_addr_t dma_addr, size_t size)
463 {
464 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
465 				size);
466 }
467 
468 /**
469  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
470  * @sgt: the mapped sg table header
471  *
472  * Return: None
473  */
474 static inline void
475 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
476 {
477 	sg_free_table(sgt);
478 }
479 
480 /**
481  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
482  * @sgt: scatter gather table pointer
483  *
484  * Return: None
485  */
486 static inline void
487 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
488 {
489 	struct scatterlist *sg;
490 	int i;
491 
492 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
493 		if (!sg)
494 			break;
495 
496 		sg->dma_address = sg_phys(sg);
497 	}
498 }
499 
500 /**
501  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
502  * @osdev: parent device instance
503  * @mem_info: Pointer to allocated memory information
504  *
505  * Based on smmu stage 1 translation enablement status, return corresponding dma
506  * address from qdf_mem_info_t. If stage 1 translation enabled, return
507  * IO virtual address otherwise return physical address.
508  *
509  * Return: dma address
510  */
511 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
512 						    qdf_mem_info_t *mem_info)
513 {
514 	if (__qdf_mem_smmu_s1_enabled(osdev))
515 		return (qdf_dma_addr_t)mem_info->iova;
516 	else
517 		return (qdf_dma_addr_t)mem_info->pa;
518 }
519 
520 /**
521  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
522  * @osdev: parent device instance
523  * @mem_info: Pointer to allocated memory information
524  *
525  * Based on smmu stage 1 translation enablement status, return corresponding
526  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
527  * enabled, return pointer to IO virtual address otherwise return pointer to
528  * physical address
529  *
530  * Return: dma address storage pointer
531  */
532 static inline qdf_dma_addr_t *
533 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
534 			   qdf_mem_info_t *mem_info)
535 {
536 	if (__qdf_mem_smmu_s1_enabled(osdev))
537 		return (qdf_dma_addr_t *)(&mem_info->iova);
538 	else
539 		return (qdf_dma_addr_t *)(&mem_info->pa);
540 }
541 
542 /**
543  * __qdf_update_mem_map_table() - Update DMA memory map info
544  * @osdev: Parent device instance
545  * @mem_info: Pointer to shared memory information
546  * @dma_addr: dma address
547  * @mem_size: memory size allocated
548  *
549  * Store DMA shared memory information
550  *
551  * Return: none
552  */
553 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
554 					      qdf_mem_info_t *mem_info,
555 					      qdf_dma_addr_t dma_addr,
556 					      uint32_t mem_size)
557 {
558 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
559 	mem_info->iova = dma_addr;
560 	mem_info->size = mem_size;
561 }
562 
563 /**
564  * __qdf_mem_get_dma_size() - Return DMA memory size
565  * @osdev: parent device instance
566  * @mem_info: Pointer to allocated memory information
567  *
568  * Return: DMA memory size
569  */
570 static inline uint32_t
571 __qdf_mem_get_dma_size(qdf_device_t osdev,
572 		       qdf_mem_info_t *mem_info)
573 {
574 	return mem_info->size;
575 }
576 
577 /**
578  * __qdf_mem_set_dma_size() - Set DMA memory size
579  * @osdev: parent device instance
580  * @mem_info: Pointer to allocated memory information
581  * @mem_size: memory size allocated
582  *
583  * Return: none
584  */
585 static inline void
586 __qdf_mem_set_dma_size(qdf_device_t osdev,
587 		       qdf_mem_info_t *mem_info,
588 		       uint32_t mem_size)
589 {
590 	mem_info->size = mem_size;
591 }
592 
593 /**
594  * __qdf_mem_get_dma_size() - Return DMA physical address
595  * @osdev: parent device instance
596  * @mem_info: Pointer to allocated memory information
597  *
598  * Return: DMA physical address
599  */
600 static inline qdf_dma_addr_t
601 __qdf_mem_get_dma_pa(qdf_device_t osdev,
602 		     qdf_mem_info_t *mem_info)
603 {
604 	return mem_info->pa;
605 }
606 
607 /**
608  * __qdf_mem_set_dma_size() - Set DMA physical address
609  * @osdev: parent device instance
610  * @mem_info: Pointer to allocated memory information
611  * @dma_pa: DMA phsical address
612  *
613  * Return: none
614  */
615 static inline void
616 __qdf_mem_set_dma_pa(qdf_device_t osdev,
617 		     qdf_mem_info_t *mem_info,
618 		     qdf_dma_addr_t dma_pa)
619 {
620 	mem_info->pa = dma_pa;
621 }
622 
623 
624 /**
625  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
626  * @osdev: OS device handle
627  * @dev: Pointer to device handle
628  * @size: Size to be allocated
629  * @paddr: Physical address
630  * @func: Function name of the call site
631  * @line: line numbe rof the call site
632  *
633  * Return: pointer of allocated memory or null if memory alloc fails
634  */
635 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
636 				 qdf_size_t size, qdf_dma_addr_t *paddr,
637 				 const char *func, uint32_t line);
638 
639 /**
640  * __qdf_mem_malloc() - allocates QDF memory
641  * @size: Number of bytes of memory to allocate.
642  *
643  * @func: Function name of the call site
644  * @line: line numbe rof the call site
645  *
646  * This function will dynamicallly allocate the specified number of bytes of
647  * memory.
648  *
649  * Return:
650  * Upon successful allocate, returns a non-NULL pointer to the allocated
651  * memory.  If this function is unable to allocate the amount of memory
652  * specified (for any reason) it returns NULL.
653  */
654 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
655 
656 /**
657  * __qdf_mem_free() - free QDF memory
658  * @ptr: Pointer to the starting address of the memory to be freed.
659  *
660  * This function will free the memory pointed to by 'ptr'.
661  * Return: None
662  */
663 void __qdf_mem_free(void *ptr);
664 
665 /**
666  * __qdf_mem_valloc() - QDF virtual memory allocation API
667  * @size: Number of bytes of virtual memory to allocate.
668  * @func: Caller function name
669  * @line: Line number
670  *
671  * Return: A valid memory location on success, or NULL on failure
672  */
673 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
674 
675 /**
676  * __qdf_mem_vfree() - QDF API to free virtual memory
677  * @ptr: Pointer to the virtual memory to free
678  *
679  * Return: None
680  */
681 void __qdf_mem_vfree(void *ptr);
682 
683 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
684 /**
685  * __qdf_untracked_mem_malloc() - allocates non-QDF memory
686  * @size: Number of bytes of memory to allocate.
687  *
688  * @func: Function name of the call site
689  * @line: line number of the call site
690  *
691  * This function will dynamically allocate the specified number of bytes of
692  * memory. Memory allocated is not tracked by qdf memory debug framework.
693  *
694  * Return:
695  * Upon successful allocation, returns a non-NULL pointer to the allocated
696  * memory.  If this function is unable to allocate the amount of memory
697  * specified (for any reason) it returns NULL.
698  */
699 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
700 				 uint32_t line);
701 
702 /**
703  * __qdf_untracked_mem_free() - free non-QDF memory
704  * @ptr: Pointer to the starting address of the memory to be freed.
705  *
706  * This function will free the memory pointed to by 'ptr'.
707  * Return: None
708  */
709 
710 void __qdf_untracked_mem_free(void *ptr);
711 #endif
712 
713 /**
714  * __qdf_mem_free_consistent() - free consistent qdf memory
715  * @osdev: OS device handle
716  * @dev: Pointer to device handle
717  * @size: Size to be allocated
718  * @vaddr: virtual address
719  * @paddr: Physical address
720  * @memctx: Pointer to DMA context
721  *
722  * Return: none
723  */
724 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
725 			       qdf_size_t size, void *vaddr,
726 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
727 
728 #endif /* __I_QDF_MEM_H */
729