xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision c7eaf5ac989ac229214b8317faa3e981d261e7db)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_mem.h
22  * Linux-specific definitions for QDF memory API's
23  */
24 
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27 
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41 
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44 
45 #else
46 /*
47  * Provide dummy defs for kernel data types, functions, and enums
48  * used in this header file.
49  */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define kzalloc(size, flags) NULL
53 #define vmalloc(size)        NULL
54 #define kfree(buf)
55 #define vfree(buf)
56 #define pci_alloc_consistent(dev, size, paddr) NULL
57 #define __qdf_mempool_t void*
58 #define QDF_RET_IP NULL
59 #endif /* __KERNEL__ */
60 #include <qdf_status.h>
61 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && \
62 		(IS_ENABLED(CONFIG_ARCH_MSM) || IS_ENABLED(CONFIG_ARCH_QCOM)))
63 #include <linux/qcom-iommu-util.h>
64 #endif
65 
66 #if IS_ENABLED(CONFIG_ARM_SMMU)
67 #include <pld_common.h>
68 #ifdef ENABLE_SMMU_S1_TRANSLATION
69 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
70 #include <asm/dma-iommu.h>
71 #endif
72 #endif
73 #include <linux/iommu.h>
74 #endif
75 
76 #ifdef __KERNEL__
77 typedef struct mempool_elem {
78 	STAILQ_ENTRY(mempool_elem) mempool_entry;
79 } mempool_elem_t;
80 
81 /**
82  * typedef __qdf_mempool_ctxt_t - Memory pool context
83  * @pool_id: pool identifier
84  * @flags: flags
85  * @elem_size: size of each pool element in bytes
86  * @pool_mem: pool_addr address of the pool created
87  * @mem_size: Total size of the pool in bytes
88  * @free_list: free pool list
89  * @lock: spinlock object
90  * @max_elem: Maximum number of elements in the pool
91  * @free_cnt: Number of free elements available
92  */
93 typedef struct __qdf_mempool_ctxt {
94 	int pool_id;
95 	u_int32_t flags;
96 	size_t elem_size;
97 	void *pool_mem;
98 	u_int32_t mem_size;
99 
100 	STAILQ_HEAD(, mempool_elem) free_list;
101 	spinlock_t lock;
102 	u_int32_t max_elem;
103 	u_int32_t free_cnt;
104 } __qdf_mempool_ctxt_t;
105 
106 typedef struct kmem_cache *qdf_kmem_cache_t;
107 #endif /* __KERNEL__ */
108 
109 #define __page_size ((size_t)PAGE_SIZE)
110 #define __qdf_align(a, mask) ALIGN(a, mask)
111 
112 #ifdef DISABLE_MEMDEBUG_PANIC
113 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
114 	do { \
115 		/* no-op */ \
116 	} while (false)
117 #else
118 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
119 	QDF_DEBUG_PANIC(reason_fmt, ## args)
120 #endif
121 
122 /**
123  * typedef __dma_data_direction - typedef for dma_data_direction
124  */
125 typedef enum dma_data_direction __dma_data_direction;
126 
127 /**
128  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
129  * @qdf_dir: QDF DMA data direction
130  *
131  * Return:
132  * enum dma_data_direction
133  */
134 static inline
135 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
136 {
137 	switch (qdf_dir) {
138 	case QDF_DMA_BIDIRECTIONAL:
139 		return DMA_BIDIRECTIONAL;
140 	case QDF_DMA_TO_DEVICE:
141 		return DMA_TO_DEVICE;
142 	case QDF_DMA_FROM_DEVICE:
143 		return DMA_FROM_DEVICE;
144 	default:
145 		return DMA_NONE;
146 	}
147 }
148 
149 
150 /**
151  * __qdf_mem_map_nbytes_single - Map memory for DMA
152  * @osdev: pomter OS device context
153  * @buf: pointer to memory to be dma mapped
154  * @dir: DMA map direction
155  * @nbytes: number of bytes to be mapped.
156  * @phy_addr: pointer to receive physical address.
157  *
158  * Return: success/failure
159  */
160 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
161 						  void *buf, qdf_dma_dir_t dir,
162 						  int nbytes,
163 						  qdf_dma_addr_t *phy_addr)
164 {
165 	/* assume that the OS only provides a single fragment */
166 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
167 					__qdf_dma_dir_to_os(dir));
168 	return dma_mapping_error(osdev->dev, *phy_addr) ?
169 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
170 }
171 
172 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
173 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
174 					    qdf_dma_addr_t buf,
175 					    qdf_dma_dir_t dir,
176 					    int nbytes)
177 {
178 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
179 }
180 #else
181 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
182 					    qdf_dma_addr_t buf,
183 					    qdf_dma_dir_t dir,
184 					    int nbytes)
185 {
186 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
187 				__qdf_dma_dir_to_os(dir));
188 }
189 #endif
190 
191 /**
192  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
193  *
194  * @osdev: pomter OS device context
195  * @phy_addr: physical address of memory to be dma unmapped
196  * @dir: DMA unmap direction
197  * @nbytes: number of bytes to be unmapped.
198  *
199  * Return - none
200  */
201 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
202 						 qdf_dma_addr_t phy_addr,
203 						 qdf_dma_dir_t dir, int nbytes)
204 {
205 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
206 				__qdf_dma_dir_to_os(dir));
207 }
208 #ifdef __KERNEL__
209 
210 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
211 
212 /**
213  * __qdf_mempool_init() - Create and initialize memory pool
214  * @osdev: platform device object
215  * @pool_addr: address of the pool created
216  * @elem_cnt: no. of elements in pool
217  * @elem_size: size of each pool element in bytes
218  * @flags: flags
219  *
220  * Return: Handle to memory pool or NULL if allocation failed
221  */
222 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
223 		       int elem_cnt, size_t elem_size, u_int32_t flags);
224 
225 /**
226  * __qdf_mempool_destroy() - Destroy memory pool
227  * @osdev: platform device object
228  * @pool: memory pool
229  *
230  * Returns: none
231  */
232 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
233 
234 /**
235  * __qdf_mempool_alloc() - Allocate an element memory pool
236  * @osdev: platform device object
237  * @pool: to memory pool
238  *
239  * Return: Pointer to the allocated element or NULL if the pool is empty
240  */
241 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
242 
243 /**
244  * __qdf_mempool_free() - Free a memory pool element
245  * @osdev: Platform device object
246  * @pool: Handle to memory pool
247  * @buf: Element to be freed
248  *
249  * Return: none
250  */
251 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
252 
253 /**
254  * __qdf_kmem_cache_create() - OS abstraction for cache creation
255  * @cache_name: Cache name
256  * @size: Size of the object to be created
257  *
258  * Return: Cache address on successful creation, else NULL
259  */
260 qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name,
261 					 qdf_size_t size);
262 
263 /**
264  * __qdf_kmem_cache_destroy() - OS abstraction for cache destruction
265  * @cache: Cache pointer
266  *
267  * Return: void
268  */
269 void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache);
270 
271 /**
272  * __qdf_kmem_cache_alloc() - Function to allocation object from a cache
273  * @cache: Cache address
274  *
275  * Return: Object from cache
276  *
277  */
278 void *__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache);
279 
280 /**
281  * __qdf_kmem_cache_free() - Function to free cache object
282  * @cache: Cache address
283  * @node: Object to be returned to cache
284  *
285  * Return: void
286  */
287 void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node);
288 
289 #define QDF_RET_IP ((void *)_RET_IP_)
290 
291 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
292 #endif
293 
294 /**
295  * __qdf_ioremap() - map bus memory into cpu space
296  * @HOST_CE_ADDRESS: bus address of the memory
297  * @HOST_CE_SIZE: memory size to map
298  */
299 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
300 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
301 		   ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
302 #else
303 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
304 		   ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE)
305 #endif
306 
307 /**
308  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
309  * @osdev: parent device instance
310  *
311  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
312  */
313 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
314 {
315 	return osdev->smmu_s1_enabled;
316 }
317 
318 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
319 /**
320  * typedef __qdf_iommu_domain_t - abstraction for struct iommu_domain
321  */
322 typedef struct iommu_domain __qdf_iommu_domain_t;
323 
324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
325 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
326 /**
327  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
328  *			      configurations bitmap
329  * @attr: QDF iommu attribute
330  *
331  * Return: IOMMU mapping configuration bitmaps
332  */
333 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
334 {
335 	switch (attr) {
336 	case QDF_DOMAIN_ATTR_S1_BYPASS:
337 		return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
338 	case QDF_DOMAIN_ATTR_ATOMIC:
339 		return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
340 	case QDF_DOMAIN_ATTR_FAST:
341 		return QCOM_IOMMU_MAPPING_CONF_FAST;
342 	default:
343 		return -EINVAL;
344 	}
345 }
346 
347 /**
348  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
349  *
350  * @domain: iommu domain
351  * @attr: iommu attribute
352  * @data: data pointer
353  *
354  * Return: 0 for success, and negative values otherwise
355  */
356 static inline int
357 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
358 			    enum qdf_iommu_attr attr, void *data)
359 {
360 	int mapping_config;
361 	int mapping_bitmap;
362 	int *value;
363 
364 	mapping_bitmap = __qdf_iommu_attr_to_os(attr);
365 	if (mapping_bitmap < 0)
366 		return -EINVAL;
367 
368 	mapping_config = qcom_iommu_get_mappings_configuration(domain);
369 	if (mapping_config < 0)
370 		return -EINVAL;
371 
372 	value = data;
373 	*value = (mapping_config & mapping_bitmap) ? 1 : 0;
374 
375 	return 0;
376 }
377 #else /* !CONFIG_QCOM_IOMMU_UTIL */
378 static inline int
379 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
380 			    enum qdf_iommu_attr attr, void *data)
381 {
382 	return -ENOTSUPP;
383 }
384 #endif /* CONFIG_QCOM_IOMMU_UTIL */
385 #else
386 /**
387  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
388  * @attr: QDF iommu attribute
389  *
390  * Return: enum iommu_attr
391  */
392 static inline
393 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
394 {
395 	switch (attr) {
396 	case QDF_DOMAIN_ATTR_GEOMETRY:
397 		return DOMAIN_ATTR_GEOMETRY;
398 	case QDF_DOMAIN_ATTR_PAGING:
399 		return DOMAIN_ATTR_PAGING;
400 	case QDF_DOMAIN_ATTR_WINDOWS:
401 		return DOMAIN_ATTR_WINDOWS;
402 	case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
403 		return DOMAIN_ATTR_FSL_PAMU_STASH;
404 	case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
405 		return DOMAIN_ATTR_FSL_PAMU_ENABLE;
406 	case QDF_DOMAIN_ATTR_FSL_PAMUV1:
407 		return DOMAIN_ATTR_FSL_PAMUV1;
408 	case QDF_DOMAIN_ATTR_NESTING:
409 		return DOMAIN_ATTR_NESTING;
410 	case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
411 		return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
412 	case QDF_DOMAIN_ATTR_CONTEXT_BANK:
413 		return DOMAIN_ATTR_CONTEXT_BANK;
414 	case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
415 		return DOMAIN_ATTR_NON_FATAL_FAULTS;
416 	case QDF_DOMAIN_ATTR_S1_BYPASS:
417 		return DOMAIN_ATTR_S1_BYPASS;
418 	case QDF_DOMAIN_ATTR_ATOMIC:
419 		return DOMAIN_ATTR_ATOMIC;
420 	case QDF_DOMAIN_ATTR_SECURE_VMID:
421 		return DOMAIN_ATTR_SECURE_VMID;
422 	case QDF_DOMAIN_ATTR_FAST:
423 		return DOMAIN_ATTR_FAST;
424 	case QDF_DOMAIN_ATTR_PGTBL_INFO:
425 		return DOMAIN_ATTR_PGTBL_INFO;
426 	case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
427 		return DOMAIN_ATTR_USE_UPSTREAM_HINT;
428 	case QDF_DOMAIN_ATTR_EARLY_MAP:
429 		return DOMAIN_ATTR_EARLY_MAP;
430 	case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
431 		return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
432 	case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
433 		return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
434 	case QDF_DOMAIN_ATTR_USE_LLC_NWA:
435 		return DOMAIN_ATTR_USE_LLC_NWA;
436 	case QDF_DOMAIN_ATTR_SPLIT_TABLES:
437 		return DOMAIN_ATTR_SPLIT_TABLES;
438 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
439 		return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
440 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
441 		return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
442 	case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
443 		return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
444 	default:
445 		return DOMAIN_ATTR_EXTENDED_MAX;
446 	}
447 }
448 
449 /**
450  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
451  *
452  * @domain: iommu domain
453  * @attr: iommu attribute
454  * @data: data pointer
455  *
456  * Return: iommu domain attr
457  */
458 static inline int
459 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
460 			    enum qdf_iommu_attr attr, void *data)
461 {
462 	return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
463 				     data);
464 }
465 #endif
466 
467 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
468 /**
469  * __qdf_dev_get_domain() - get iommu domain from osdev
470  * @osdev: parent device instance
471  *
472  * Return: iommu domain
473  */
474 static inline struct iommu_domain *
475 __qdf_dev_get_domain(qdf_device_t osdev)
476 {
477 	return osdev->domain;
478 }
479 #else
480 static inline struct iommu_domain *
481 __qdf_dev_get_domain(qdf_device_t osdev)
482 {
483 	if (osdev->iommu_mapping)
484 		return osdev->iommu_mapping->domain;
485 
486 	return NULL;
487 }
488 #endif
489 
490 /**
491  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
492  * @osdev: parent device instance
493  * @dma_addr: dma_addr
494  *
495  * Get actual physical address from dma_addr based on SMMU enablement status.
496  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
497  * (IOVA) otherwise returns physical address. So get SMMU physical address
498  * mapping from IOVA.
499  *
500  * Return: dmaable physical address
501  */
502 static inline unsigned long
503 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
504 			     qdf_dma_addr_t dma_addr)
505 {
506 	struct iommu_domain *domain;
507 
508 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
509 		domain = __qdf_dev_get_domain(osdev);
510 		if (domain)
511 			return iommu_iova_to_phys(domain, dma_addr);
512 	}
513 
514 	return dma_addr;
515 }
516 #else
517 static inline unsigned long
518 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
519 			     qdf_dma_addr_t dma_addr)
520 {
521 	return dma_addr;
522 }
523 #endif
524 
525 /**
526  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
527  * @dev: device instance
528  * @sgt: scatter gather table pointer
529  * @cpu_addr: HLOS virtual address
530  * @dma_addr: dma/iova
531  * @size: allocated memory size
532  *
533  * Return: physical address
534  */
535 static inline int
536 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
537 			     qdf_dma_addr_t dma_addr, size_t size)
538 {
539 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
540 				size);
541 }
542 
543 /**
544  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
545  * @sgt: the mapped sg table header
546  *
547  * Return: None
548  */
549 static inline void
550 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
551 {
552 	sg_free_table(sgt);
553 }
554 
555 /**
556  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
557  * @sgt: scatter gather table pointer
558  *
559  * Return: None
560  */
561 static inline void
562 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
563 {
564 	struct scatterlist *sg;
565 	int i;
566 
567 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
568 		if (!sg)
569 			break;
570 
571 		sg->dma_address = sg_phys(sg);
572 	}
573 }
574 
575 /**
576  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
577  * @osdev: parent device instance
578  * @mem_info: Pointer to allocated memory information
579  *
580  * Based on smmu stage 1 translation enablement status, return corresponding dma
581  * address from qdf_mem_info_t. If stage 1 translation enabled, return
582  * IO virtual address otherwise return physical address.
583  *
584  * Return: dma address
585  */
586 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
587 						    qdf_mem_info_t *mem_info)
588 {
589 	if (__qdf_mem_smmu_s1_enabled(osdev))
590 		return (qdf_dma_addr_t)mem_info->iova;
591 	else
592 		return (qdf_dma_addr_t)mem_info->pa;
593 }
594 
595 /**
596  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
597  * @osdev: parent device instance
598  * @mem_info: Pointer to allocated memory information
599  *
600  * Based on smmu stage 1 translation enablement status, return corresponding
601  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
602  * enabled, return pointer to IO virtual address otherwise return pointer to
603  * physical address
604  *
605  * Return: dma address storage pointer
606  */
607 static inline qdf_dma_addr_t *
608 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
609 			   qdf_mem_info_t *mem_info)
610 {
611 	if (__qdf_mem_smmu_s1_enabled(osdev))
612 		return (qdf_dma_addr_t *)(&mem_info->iova);
613 	else
614 		return (qdf_dma_addr_t *)(&mem_info->pa);
615 }
616 
617 /**
618  * __qdf_update_mem_map_table() - Update DMA memory map info
619  * @osdev: Parent device instance
620  * @mem_info: Pointer to shared memory information
621  * @dma_addr: dma address
622  * @mem_size: memory size allocated
623  *
624  * Store DMA shared memory information
625  *
626  * Return: none
627  */
628 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
629 					      qdf_mem_info_t *mem_info,
630 					      qdf_dma_addr_t dma_addr,
631 					      uint32_t mem_size)
632 {
633 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
634 	mem_info->iova = dma_addr;
635 	mem_info->size = mem_size;
636 }
637 
638 /**
639  * __qdf_mem_get_dma_size() - Return DMA memory size
640  * @osdev: parent device instance
641  * @mem_info: Pointer to allocated memory information
642  *
643  * Return: DMA memory size
644  */
645 static inline uint32_t
646 __qdf_mem_get_dma_size(qdf_device_t osdev,
647 		       qdf_mem_info_t *mem_info)
648 {
649 	return mem_info->size;
650 }
651 
652 /**
653  * __qdf_mem_set_dma_size() - Set DMA memory size
654  * @osdev: parent device instance
655  * @mem_info: Pointer to allocated memory information
656  * @mem_size: memory size allocated
657  *
658  * Return: none
659  */
660 static inline void
661 __qdf_mem_set_dma_size(qdf_device_t osdev,
662 		       qdf_mem_info_t *mem_info,
663 		       uint32_t mem_size)
664 {
665 	mem_info->size = mem_size;
666 }
667 
668 /**
669  * __qdf_mem_get_dma_pa() - Return DMA physical address
670  * @osdev: parent device instance
671  * @mem_info: Pointer to allocated memory information
672  *
673  * Return: DMA physical address
674  */
675 static inline qdf_dma_addr_t
676 __qdf_mem_get_dma_pa(qdf_device_t osdev,
677 		     qdf_mem_info_t *mem_info)
678 {
679 	return mem_info->pa;
680 }
681 
682 /**
683  * __qdf_mem_set_dma_pa() - Set DMA physical address
684  * @osdev: parent device instance
685  * @mem_info: Pointer to allocated memory information
686  * @dma_pa: DMA phsical address
687  *
688  * Return: none
689  */
690 static inline void
691 __qdf_mem_set_dma_pa(qdf_device_t osdev,
692 		     qdf_mem_info_t *mem_info,
693 		     qdf_dma_addr_t dma_pa)
694 {
695 	mem_info->pa = dma_pa;
696 }
697 
698 
699 /**
700  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
701  * @osdev: OS device handle
702  * @dev: Pointer to device handle
703  * @size: Size to be allocated
704  * @paddr: Physical address
705  * @func: Function name of the call site
706  * @line: line numbe rof the call site
707  *
708  * Return: pointer of allocated memory or null if memory alloc fails
709  */
710 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
711 				 qdf_size_t size, qdf_dma_addr_t *paddr,
712 				 const char *func, uint32_t line);
713 
714 /**
715  * __qdf_mem_malloc() - allocates QDF memory
716  * @size: Number of bytes of memory to allocate.
717  *
718  * @func: Function name of the call site
719  * @line: line numbe rof the call site
720  *
721  * This function will dynamicallly allocate the specified number of bytes of
722  * memory.
723  *
724  * Return:
725  * Upon successful allocate, returns a non-NULL pointer to the allocated
726  * memory.  If this function is unable to allocate the amount of memory
727  * specified (for any reason) it returns NULL.
728  */
729 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
730 
731 /**
732  * __qdf_mem_free() - free QDF memory
733  * @ptr: Pointer to the starting address of the memory to be freed.
734  *
735  * This function will free the memory pointed to by 'ptr'.
736  * Return: None
737  */
738 void __qdf_mem_free(void *ptr);
739 
740 /**
741  * __qdf_mem_valloc() - QDF virtual memory allocation API
742  * @size: Number of bytes of virtual memory to allocate.
743  * @func: Caller function name
744  * @line: Line number
745  *
746  * Return: A valid memory location on success, or NULL on failure
747  */
748 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
749 
750 /**
751  * __qdf_mem_vfree() - QDF API to free virtual memory
752  * @ptr: Pointer to the virtual memory to free
753  *
754  * Return: None
755  */
756 void __qdf_mem_vfree(void *ptr);
757 
758 /**
759  * __qdf_mem_virt_to_phys() - Convert virtual address to physical
760  * @vaddr: virtual address
761  *
762  * Return: physical address
763  */
764 #define __qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
765 
766 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
767 /**
768  * __qdf_untracked_mem_malloc() - allocates non-QDF memory
769  * @size: Number of bytes of memory to allocate.
770  * @func: Function name of the call site
771  * @line: line number of the call site
772  *
773  * This function will dynamically allocate the specified number of bytes of
774  * memory. Memory allocated is not tracked by qdf memory debug framework.
775  *
776  * Return:
777  * Upon successful allocation, returns a non-NULL pointer to the allocated
778  * memory.  If this function is unable to allocate the amount of memory
779  * specified (for any reason) it returns NULL.
780  */
781 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
782 				 uint32_t line);
783 
784 /**
785  * __qdf_untracked_mem_free() - free non-QDF memory
786  * @ptr: Pointer to the starting address of the memory to be freed.
787  *
788  * This function will free the memory pointed to by 'ptr'.
789  * Return: None
790  */
791 
792 void __qdf_untracked_mem_free(void *ptr);
793 #endif
794 
795 /**
796  * __qdf_mem_free_consistent() - free consistent qdf memory
797  * @osdev: OS device handle
798  * @dev: Pointer to device handle
799  * @size: Size to be allocated
800  * @vaddr: virtual address
801  * @paddr: Physical address
802  * @memctx: Pointer to DMA context
803  *
804  * Return: none
805  */
806 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
807 			       qdf_size_t size, void *vaddr,
808 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
809 
810 #endif /* __I_QDF_MEM_H */
811