xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision 10fe6982eb7194813d8d8335da336bbcd531b22e)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: i_qdf_mem.h
22  * Linux-specific definitions for QDF memory API's
23  */
24 
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27 
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41 
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44 
45 #else
46 /*
47  * Provide dummy defs for kernel data types, functions, and enums
48  * used in this header file.
49  */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define kzalloc(size, flags) NULL
53 #define vmalloc(size)        NULL
54 #define kfree(buf)
55 #define vfree(buf)
56 #define pci_alloc_consistent(dev, size, paddr) NULL
57 #define __qdf_mempool_t void*
58 #define QDF_RET_IP NULL
59 #endif /* __KERNEL__ */
60 #include <qdf_status.h>
61 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && defined(MSM_PLATFORM)
62 #include <linux/qcom-iommu-util.h>
63 #endif
64 
65 #if IS_ENABLED(CONFIG_ARM_SMMU)
66 #include <pld_common.h>
67 #ifdef ENABLE_SMMU_S1_TRANSLATION
68 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
69 #include <asm/dma-iommu.h>
70 #endif
71 #endif
72 #include <linux/iommu.h>
73 #endif
74 
75 #ifdef __KERNEL__
76 typedef struct mempool_elem {
77 	STAILQ_ENTRY(mempool_elem) mempool_entry;
78 } mempool_elem_t;
79 
80 /**
81  * typedef __qdf_mempool_ctxt_t - Memory pool context
82  * @pool_id: pool identifier
83  * @flags: flags
84  * @elem_size: size of each pool element in bytes
85  * @pool_mem: pool_addr address of the pool created
86  * @mem_size: Total size of the pool in bytes
87  * @free_list: free pool list
88  * @lock: spinlock object
89  * @max_elem: Maximum number of elements in the pool
90  * @free_cnt: Number of free elements available
91  */
92 typedef struct __qdf_mempool_ctxt {
93 	int pool_id;
94 	u_int32_t flags;
95 	size_t elem_size;
96 	void *pool_mem;
97 	u_int32_t mem_size;
98 
99 	STAILQ_HEAD(, mempool_elem) free_list;
100 	spinlock_t lock;
101 	u_int32_t max_elem;
102 	u_int32_t free_cnt;
103 } __qdf_mempool_ctxt_t;
104 
105 typedef struct kmem_cache *qdf_kmem_cache_t;
106 #endif /* __KERNEL__ */
107 
108 #define __page_size ((size_t)PAGE_SIZE)
109 #define __qdf_align(a, mask) ALIGN(a, mask)
110 
111 #ifdef DISABLE_MEMDEBUG_PANIC
112 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
113 	do { \
114 		/* no-op */ \
115 	} while (false)
116 #else
117 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
118 	QDF_DEBUG_PANIC(reason_fmt, ## args)
119 #endif
120 
121 /**
122  * typedef __dma_data_direction - typedef for dma_data_direction
123  */
124 typedef enum dma_data_direction __dma_data_direction;
125 
126 /**
127  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
128  * @qdf_dir: QDF DMA data direction
129  *
130  * Return:
131  * enum dma_data_direction
132  */
133 static inline
134 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
135 {
136 	switch (qdf_dir) {
137 	case QDF_DMA_BIDIRECTIONAL:
138 		return DMA_BIDIRECTIONAL;
139 	case QDF_DMA_TO_DEVICE:
140 		return DMA_TO_DEVICE;
141 	case QDF_DMA_FROM_DEVICE:
142 		return DMA_FROM_DEVICE;
143 	default:
144 		return DMA_NONE;
145 	}
146 }
147 
148 
149 /**
150  * __qdf_mem_map_nbytes_single - Map memory for DMA
151  * @osdev: pomter OS device context
152  * @buf: pointer to memory to be dma mapped
153  * @dir: DMA map direction
154  * @nbytes: number of bytes to be mapped.
155  * @phy_addr: pointer to receive physical address.
156  *
157  * Return: success/failure
158  */
159 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
160 						  void *buf, qdf_dma_dir_t dir,
161 						  int nbytes,
162 						  qdf_dma_addr_t *phy_addr)
163 {
164 	/* assume that the OS only provides a single fragment */
165 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
166 					__qdf_dma_dir_to_os(dir));
167 	return dma_mapping_error(osdev->dev, *phy_addr) ?
168 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
169 }
170 
171 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
172 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
173 					    qdf_dma_addr_t buf,
174 					    qdf_dma_dir_t dir,
175 					    int nbytes)
176 {
177 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
178 }
179 #else
180 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
181 					    qdf_dma_addr_t buf,
182 					    qdf_dma_dir_t dir,
183 					    int nbytes)
184 {
185 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
186 				__qdf_dma_dir_to_os(dir));
187 }
188 #endif
189 
190 /**
191  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
192  *
193  * @osdev: pomter OS device context
194  * @phy_addr: physical address of memory to be dma unmapped
195  * @dir: DMA unmap direction
196  * @nbytes: number of bytes to be unmapped.
197  *
198  * Return - none
199  */
200 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
201 						 qdf_dma_addr_t phy_addr,
202 						 qdf_dma_dir_t dir, int nbytes)
203 {
204 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
205 				__qdf_dma_dir_to_os(dir));
206 }
207 #ifdef __KERNEL__
208 
209 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
210 
211 /**
212  * __qdf_mempool_init() - Create and initialize memory pool
213  * @osdev: platform device object
214  * @pool_addr: address of the pool created
215  * @elem_cnt: no. of elements in pool
216  * @elem_size: size of each pool element in bytes
217  * @flags: flags
218  *
219  * Return: Handle to memory pool or NULL if allocation failed
220  */
221 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
222 		       int elem_cnt, size_t elem_size, u_int32_t flags);
223 
224 /**
225  * __qdf_mempool_destroy() - Destroy memory pool
226  * @osdev: platform device object
227  * @pool: memory pool
228  *
229  * Returns: none
230  */
231 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
232 
233 /**
234  * __qdf_mempool_alloc() - Allocate an element memory pool
235  * @osdev: platform device object
236  * @pool: to memory pool
237  *
238  * Return: Pointer to the allocated element or NULL if the pool is empty
239  */
240 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
241 
242 /**
243  * __qdf_mempool_free() - Free a memory pool element
244  * @osdev: Platform device object
245  * @pool: Handle to memory pool
246  * @buf: Element to be freed
247  *
248  * Return: none
249  */
250 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
251 
252 /**
253  * __qdf_kmem_cache_create() - OS abstraction for cache creation
254  * @cache_name: Cache name
255  * @size: Size of the object to be created
256  *
257  * Return: Cache address on successful creation, else NULL
258  */
259 qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name,
260 					 qdf_size_t size);
261 
262 /**
263  * __qdf_kmem_cache_destroy() - OS abstraction for cache destruction
264  * @cache: Cache pointer
265  *
266  * Return: void
267  */
268 void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache);
269 
270 /**
271  * __qdf_kmem_cache_alloc() - Function to allocation object from a cache
272  * @cache: Cache address
273  *
274  * Return: Object from cache
275  *
276  */
277 void *__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache);
278 
279 /**
280  * __qdf_kmem_cache_free() - Function to free cache object
281  * @cache: Cache address
282  * @node: Object to be returned to cache
283  *
284  * Return: void
285  */
286 void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node);
287 
288 #define QDF_RET_IP ((void *)_RET_IP_)
289 
290 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
291 #endif
292 
293 /**
294  * __qdf_ioremap() - map bus memory into cpu space
295  * @HOST_CE_ADDRESS: bus address of the memory
296  * @HOST_CE_SIZE: memory size to map
297  */
298 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
299 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
300 		   ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
301 #else
302 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
303 		   ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE)
304 #endif
305 
306 /**
307  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
308  * @osdev: parent device instance
309  *
310  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
311  */
312 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
313 {
314 	return osdev->smmu_s1_enabled;
315 }
316 
317 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
318 /**
319  * typedef __qdf_iommu_domain_t - abstraction for struct iommu_domain
320  */
321 typedef struct iommu_domain __qdf_iommu_domain_t;
322 
323 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
324 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
325 /**
326  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
327  *			      configurations bitmap
328  * @attr: QDF iommu attribute
329  *
330  * Return: IOMMU mapping configuration bitmaps
331  */
332 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
333 {
334 	switch (attr) {
335 	case QDF_DOMAIN_ATTR_S1_BYPASS:
336 		return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
337 	case QDF_DOMAIN_ATTR_ATOMIC:
338 		return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
339 	case QDF_DOMAIN_ATTR_FAST:
340 		return QCOM_IOMMU_MAPPING_CONF_FAST;
341 	default:
342 		return -EINVAL;
343 	}
344 }
345 
346 /**
347  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
348  *
349  * @domain: iommu domain
350  * @attr: iommu attribute
351  * @data: data pointer
352  *
353  * Return: 0 for success, and negative values otherwise
354  */
355 static inline int
356 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
357 			    enum qdf_iommu_attr attr, void *data)
358 {
359 	int mapping_config;
360 	int mapping_bitmap;
361 	int *value;
362 
363 	mapping_bitmap = __qdf_iommu_attr_to_os(attr);
364 	if (mapping_bitmap < 0)
365 		return -EINVAL;
366 
367 	mapping_config = qcom_iommu_get_mappings_configuration(domain);
368 	if (mapping_config < 0)
369 		return -EINVAL;
370 
371 	value = data;
372 	*value = (mapping_config & mapping_bitmap) ? 1 : 0;
373 
374 	return 0;
375 }
376 #else /* !CONFIG_QCOM_IOMMU_UTIL */
377 static inline int
378 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
379 			    enum qdf_iommu_attr attr, void *data)
380 {
381 	return -ENOTSUPP;
382 }
383 #endif /* CONFIG_QCOM_IOMMU_UTIL */
384 #else
385 /**
386  * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
387  * @attr: QDF iommu attribute
388  *
389  * Return: enum iommu_attr
390  */
391 static inline
392 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
393 {
394 	switch (attr) {
395 	case QDF_DOMAIN_ATTR_GEOMETRY:
396 		return DOMAIN_ATTR_GEOMETRY;
397 	case QDF_DOMAIN_ATTR_PAGING:
398 		return DOMAIN_ATTR_PAGING;
399 	case QDF_DOMAIN_ATTR_WINDOWS:
400 		return DOMAIN_ATTR_WINDOWS;
401 	case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
402 		return DOMAIN_ATTR_FSL_PAMU_STASH;
403 	case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
404 		return DOMAIN_ATTR_FSL_PAMU_ENABLE;
405 	case QDF_DOMAIN_ATTR_FSL_PAMUV1:
406 		return DOMAIN_ATTR_FSL_PAMUV1;
407 	case QDF_DOMAIN_ATTR_NESTING:
408 		return DOMAIN_ATTR_NESTING;
409 	case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
410 		return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
411 	case QDF_DOMAIN_ATTR_CONTEXT_BANK:
412 		return DOMAIN_ATTR_CONTEXT_BANK;
413 	case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
414 		return DOMAIN_ATTR_NON_FATAL_FAULTS;
415 	case QDF_DOMAIN_ATTR_S1_BYPASS:
416 		return DOMAIN_ATTR_S1_BYPASS;
417 	case QDF_DOMAIN_ATTR_ATOMIC:
418 		return DOMAIN_ATTR_ATOMIC;
419 	case QDF_DOMAIN_ATTR_SECURE_VMID:
420 		return DOMAIN_ATTR_SECURE_VMID;
421 	case QDF_DOMAIN_ATTR_FAST:
422 		return DOMAIN_ATTR_FAST;
423 	case QDF_DOMAIN_ATTR_PGTBL_INFO:
424 		return DOMAIN_ATTR_PGTBL_INFO;
425 	case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
426 		return DOMAIN_ATTR_USE_UPSTREAM_HINT;
427 	case QDF_DOMAIN_ATTR_EARLY_MAP:
428 		return DOMAIN_ATTR_EARLY_MAP;
429 	case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
430 		return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
431 	case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
432 		return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
433 	case QDF_DOMAIN_ATTR_USE_LLC_NWA:
434 		return DOMAIN_ATTR_USE_LLC_NWA;
435 	case QDF_DOMAIN_ATTR_SPLIT_TABLES:
436 		return DOMAIN_ATTR_SPLIT_TABLES;
437 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
438 		return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
439 	case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
440 		return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
441 	case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
442 		return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
443 	default:
444 		return DOMAIN_ATTR_EXTENDED_MAX;
445 	}
446 }
447 
448 /**
449  * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
450  *
451  * @domain: iommu domain
452  * @attr: iommu attribute
453  * @data: data pointer
454  *
455  * Return: iommu domain attr
456  */
457 static inline int
458 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
459 			    enum qdf_iommu_attr attr, void *data)
460 {
461 	return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
462 				     data);
463 }
464 #endif
465 
466 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
467 /**
468  * __qdf_dev_get_domain() - get iommu domain from osdev
469  * @osdev: parent device instance
470  *
471  * Return: iommu domain
472  */
473 static inline struct iommu_domain *
474 __qdf_dev_get_domain(qdf_device_t osdev)
475 {
476 	return osdev->domain;
477 }
478 #else
479 static inline struct iommu_domain *
480 __qdf_dev_get_domain(qdf_device_t osdev)
481 {
482 	if (osdev->iommu_mapping)
483 		return osdev->iommu_mapping->domain;
484 
485 	return NULL;
486 }
487 #endif
488 
489 /**
490  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
491  * @osdev: parent device instance
492  * @dma_addr: dma_addr
493  *
494  * Get actual physical address from dma_addr based on SMMU enablement status.
495  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
496  * (IOVA) otherwise returns physical address. So get SMMU physical address
497  * mapping from IOVA.
498  *
499  * Return: dmaable physical address
500  */
501 static inline unsigned long
502 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
503 			     qdf_dma_addr_t dma_addr)
504 {
505 	struct iommu_domain *domain;
506 
507 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
508 		domain = __qdf_dev_get_domain(osdev);
509 		if (domain)
510 			return iommu_iova_to_phys(domain, dma_addr);
511 	}
512 
513 	return dma_addr;
514 }
515 #else
516 static inline unsigned long
517 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
518 			     qdf_dma_addr_t dma_addr)
519 {
520 	return dma_addr;
521 }
522 #endif
523 
524 /**
525  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
526  * @dev: device instance
527  * @sgt: scatter gather table pointer
528  * @cpu_addr: HLOS virtual address
529  * @dma_addr: dma/iova
530  * @size: allocated memory size
531  *
532  * Return: physical address
533  */
534 static inline int
535 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
536 			     qdf_dma_addr_t dma_addr, size_t size)
537 {
538 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
539 				size);
540 }
541 
542 /**
543  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
544  * @sgt: the mapped sg table header
545  *
546  * Return: None
547  */
548 static inline void
549 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
550 {
551 	sg_free_table(sgt);
552 }
553 
554 /**
555  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
556  * @sgt: scatter gather table pointer
557  *
558  * Return: None
559  */
560 static inline void
561 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
562 {
563 	struct scatterlist *sg;
564 	int i;
565 
566 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
567 		if (!sg)
568 			break;
569 
570 		sg->dma_address = sg_phys(sg);
571 	}
572 }
573 
574 /**
575  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
576  * @osdev: parent device instance
577  * @mem_info: Pointer to allocated memory information
578  *
579  * Based on smmu stage 1 translation enablement status, return corresponding dma
580  * address from qdf_mem_info_t. If stage 1 translation enabled, return
581  * IO virtual address otherwise return physical address.
582  *
583  * Return: dma address
584  */
585 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
586 						    qdf_mem_info_t *mem_info)
587 {
588 	if (__qdf_mem_smmu_s1_enabled(osdev))
589 		return (qdf_dma_addr_t)mem_info->iova;
590 	else
591 		return (qdf_dma_addr_t)mem_info->pa;
592 }
593 
594 /**
595  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
596  * @osdev: parent device instance
597  * @mem_info: Pointer to allocated memory information
598  *
599  * Based on smmu stage 1 translation enablement status, return corresponding
600  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
601  * enabled, return pointer to IO virtual address otherwise return pointer to
602  * physical address
603  *
604  * Return: dma address storage pointer
605  */
606 static inline qdf_dma_addr_t *
607 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
608 			   qdf_mem_info_t *mem_info)
609 {
610 	if (__qdf_mem_smmu_s1_enabled(osdev))
611 		return (qdf_dma_addr_t *)(&mem_info->iova);
612 	else
613 		return (qdf_dma_addr_t *)(&mem_info->pa);
614 }
615 
616 /**
617  * __qdf_update_mem_map_table() - Update DMA memory map info
618  * @osdev: Parent device instance
619  * @mem_info: Pointer to shared memory information
620  * @dma_addr: dma address
621  * @mem_size: memory size allocated
622  *
623  * Store DMA shared memory information
624  *
625  * Return: none
626  */
627 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
628 					      qdf_mem_info_t *mem_info,
629 					      qdf_dma_addr_t dma_addr,
630 					      uint32_t mem_size)
631 {
632 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
633 	mem_info->iova = dma_addr;
634 	mem_info->size = mem_size;
635 }
636 
637 /**
638  * __qdf_mem_get_dma_size() - Return DMA memory size
639  * @osdev: parent device instance
640  * @mem_info: Pointer to allocated memory information
641  *
642  * Return: DMA memory size
643  */
644 static inline uint32_t
645 __qdf_mem_get_dma_size(qdf_device_t osdev,
646 		       qdf_mem_info_t *mem_info)
647 {
648 	return mem_info->size;
649 }
650 
651 /**
652  * __qdf_mem_set_dma_size() - Set DMA memory size
653  * @osdev: parent device instance
654  * @mem_info: Pointer to allocated memory information
655  * @mem_size: memory size allocated
656  *
657  * Return: none
658  */
659 static inline void
660 __qdf_mem_set_dma_size(qdf_device_t osdev,
661 		       qdf_mem_info_t *mem_info,
662 		       uint32_t mem_size)
663 {
664 	mem_info->size = mem_size;
665 }
666 
667 /**
668  * __qdf_mem_get_dma_pa() - Return DMA physical address
669  * @osdev: parent device instance
670  * @mem_info: Pointer to allocated memory information
671  *
672  * Return: DMA physical address
673  */
674 static inline qdf_dma_addr_t
675 __qdf_mem_get_dma_pa(qdf_device_t osdev,
676 		     qdf_mem_info_t *mem_info)
677 {
678 	return mem_info->pa;
679 }
680 
681 /**
682  * __qdf_mem_set_dma_pa() - Set DMA physical address
683  * @osdev: parent device instance
684  * @mem_info: Pointer to allocated memory information
685  * @dma_pa: DMA phsical address
686  *
687  * Return: none
688  */
689 static inline void
690 __qdf_mem_set_dma_pa(qdf_device_t osdev,
691 		     qdf_mem_info_t *mem_info,
692 		     qdf_dma_addr_t dma_pa)
693 {
694 	mem_info->pa = dma_pa;
695 }
696 
697 
698 /**
699  * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
700  * @osdev: OS device handle
701  * @dev: Pointer to device handle
702  * @size: Size to be allocated
703  * @paddr: Physical address
704  * @func: Function name of the call site
705  * @line: line numbe rof the call site
706  *
707  * Return: pointer of allocated memory or null if memory alloc fails
708  */
709 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
710 				 qdf_size_t size, qdf_dma_addr_t *paddr,
711 				 const char *func, uint32_t line);
712 
713 /**
714  * __qdf_mem_malloc() - allocates QDF memory
715  * @size: Number of bytes of memory to allocate.
716  *
717  * @func: Function name of the call site
718  * @line: line numbe rof the call site
719  *
720  * This function will dynamicallly allocate the specified number of bytes of
721  * memory.
722  *
723  * Return:
724  * Upon successful allocate, returns a non-NULL pointer to the allocated
725  * memory.  If this function is unable to allocate the amount of memory
726  * specified (for any reason) it returns NULL.
727  */
728 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
729 
730 /**
731  * __qdf_mem_free() - free QDF memory
732  * @ptr: Pointer to the starting address of the memory to be freed.
733  *
734  * This function will free the memory pointed to by 'ptr'.
735  * Return: None
736  */
737 void __qdf_mem_free(void *ptr);
738 
739 /**
740  * __qdf_mem_valloc() - QDF virtual memory allocation API
741  * @size: Number of bytes of virtual memory to allocate.
742  * @func: Caller function name
743  * @line: Line number
744  *
745  * Return: A valid memory location on success, or NULL on failure
746  */
747 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
748 
749 /**
750  * __qdf_mem_vfree() - QDF API to free virtual memory
751  * @ptr: Pointer to the virtual memory to free
752  *
753  * Return: None
754  */
755 void __qdf_mem_vfree(void *ptr);
756 
757 /**
758  * __qdf_mem_virt_to_phys() - Convert virtual address to physical
759  * @vaddr: virtual address
760  *
761  * Return: physical address
762  */
763 #define __qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
764 
765 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
766 /**
767  * __qdf_untracked_mem_malloc() - allocates non-QDF memory
768  * @size: Number of bytes of memory to allocate.
769  * @func: Function name of the call site
770  * @line: line number of the call site
771  *
772  * This function will dynamically allocate the specified number of bytes of
773  * memory. Memory allocated is not tracked by qdf memory debug framework.
774  *
775  * Return:
776  * Upon successful allocation, returns a non-NULL pointer to the allocated
777  * memory.  If this function is unable to allocate the amount of memory
778  * specified (for any reason) it returns NULL.
779  */
780 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
781 				 uint32_t line);
782 
783 /**
784  * __qdf_untracked_mem_free() - free non-QDF memory
785  * @ptr: Pointer to the starting address of the memory to be freed.
786  *
787  * This function will free the memory pointed to by 'ptr'.
788  * Return: None
789  */
790 
791 void __qdf_untracked_mem_free(void *ptr);
792 #endif
793 
794 /**
795  * __qdf_mem_free_consistent() - free consistent qdf memory
796  * @osdev: OS device handle
797  * @dev: Pointer to device handle
798  * @size: Size to be allocated
799  * @vaddr: virtual address
800  * @paddr: Physical address
801  * @memctx: Pointer to DMA context
802  *
803  * Return: none
804  */
805 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
806 			       qdf_size_t size, void *vaddr,
807 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
808 
809 #endif /* __I_QDF_MEM_H */
810