xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_mem.h
21  * Linux-specific definitions for QDF memory API's
22  */
23 
24 #ifndef __I_QDF_MEM_H
25 #define __I_QDF_MEM_H
26 
27 #ifdef __KERNEL__
28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
30 #include <linux/autoconf.h>
31 #else
32 #include <generated/autoconf.h>
33 #endif
34 #endif
35 #include <linux/slab.h>
36 #include <linux/hardirq.h>
37 #include <linux/vmalloc.h>
38 #include <linux/pci.h> /* pci_alloc_consistent */
39 #include <linux/cache.h> /* L1_CACHE_BYTES */
40 
41 #define __qdf_cache_line_sz L1_CACHE_BYTES
42 #include "queue.h"
43 
44 #else
45 /*
46  * Provide dummy defs for kernel data types, functions, and enums
47  * used in this header file.
48  */
49 #define GFP_KERNEL 0
50 #define GFP_ATOMIC 0
51 #define kzalloc(size, flags) NULL
52 #define vmalloc(size)        NULL
53 #define kfree(buf)
54 #define vfree(buf)
55 #define pci_alloc_consistent(dev, size, paddr) NULL
56 #define __qdf_mempool_t void*
57 #define QDF_RET_IP NULL
58 #endif /* __KERNEL__ */
59 #include <qdf_status.h>
60 
61 #ifdef CONFIG_ARM_SMMU
62 #include <pld_common.h>
63 #ifdef ENABLE_SMMU_S1_TRANSLATION
64 #include <asm/dma-iommu.h>
65 #endif
66 #include <linux/iommu.h>
67 #endif
68 
69 #ifdef __KERNEL__
70 typedef struct mempool_elem {
71 	STAILQ_ENTRY(mempool_elem) mempool_entry;
72 } mempool_elem_t;
73 
74 /**
75  * typedef __qdf_mempool_ctxt_t - Memory pool context
76  * @pool_id: pool identifier
77  * @flags: flags
78  * @elem_size: size of each pool element in bytes
79  * @pool_mem: pool_addr address of the pool created
80  * @mem_size: Total size of the pool in bytes
81  * @free_list: free pool list
82  * @lock: spinlock object
83  * @max_elem: Maximum number of elements in tha pool
84  * @free_cnt: Number of free elements available
85  */
86 typedef struct __qdf_mempool_ctxt {
87 	int pool_id;
88 	u_int32_t flags;
89 	size_t elem_size;
90 	void *pool_mem;
91 	u_int32_t mem_size;
92 
93 	STAILQ_HEAD(, mempool_elem) free_list;
94 	spinlock_t lock;
95 	u_int32_t max_elem;
96 	u_int32_t free_cnt;
97 } __qdf_mempool_ctxt_t;
98 
99 #endif /* __KERNEL__ */
100 
101 #define __qdf_align(a, mask) ALIGN(a, mask)
102 
103 /* typedef for dma_data_direction */
104 typedef enum dma_data_direction __dma_data_direction;
105 
106 /**
107  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
108  * @dir: QDF DMA data direction
109  *
110  * Return:
111  * enum dma_data_direction
112  */
113 static inline
114 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
115 {
116 	switch (qdf_dir) {
117 	case QDF_DMA_BIDIRECTIONAL:
118 		return DMA_BIDIRECTIONAL;
119 	case QDF_DMA_TO_DEVICE:
120 		return DMA_TO_DEVICE;
121 	case QDF_DMA_FROM_DEVICE:
122 		return DMA_FROM_DEVICE;
123 	default:
124 		return DMA_NONE;
125 	}
126 }
127 
128 
129 /**
130  * __qdf_mem_map_nbytes_single - Map memory for DMA
131  * @osdev: pomter OS device context
132  * @buf: pointer to memory to be dma mapped
133  * @dir: DMA map direction
134  * @nbytes: number of bytes to be mapped.
135  * @phy_addr: ponter to recive physical address.
136  *
137  * Return: success/failure
138  */
139 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
140 						  void *buf, qdf_dma_dir_t dir,
141 						  int nbytes,
142 						  qdf_dma_addr_t *phy_addr)
143 {
144 	/* assume that the OS only provides a single fragment */
145 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
146 					__qdf_dma_dir_to_os(dir));
147 	return dma_mapping_error(osdev->dev, *phy_addr) ?
148 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
149 }
150 
151 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
152 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
153 					    qdf_dma_addr_t buf,
154 					    qdf_dma_dir_t dir,
155 					    int nbytes)
156 {
157 	dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
158 }
159 #else
160 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
161 					    qdf_dma_addr_t buf,
162 					    qdf_dma_dir_t dir,
163 					    int nbytes)
164 {
165 	dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
166 				__qdf_dma_dir_to_os(dir));
167 }
168 #endif
169 
170 /**
171  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
172  *
173  * @osdev: pomter OS device context
174  * @phy_addr: physical address of memory to be dma unmapped
175  * @dir: DMA unmap direction
176  * @nbytes: number of bytes to be unmapped.
177  *
178  * Return - none
179  */
180 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
181 						 qdf_dma_addr_t phy_addr,
182 						 qdf_dma_dir_t dir, int nbytes)
183 {
184 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
185 				__qdf_dma_dir_to_os(dir));
186 }
187 #ifdef __KERNEL__
188 
189 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
190 
191 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
192 		       size_t pool_entry_size, u_int32_t flags);
193 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
194 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
195 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
196 #define QDF_RET_IP ((void *)_RET_IP_)
197 
198 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
199 #endif
200 
201 /**
202  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
203  * @osdev parent device instance
204  *
205  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
206  */
207 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
208 {
209 	return osdev->smmu_s1_enabled;
210 }
211 
212 #if defined(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
213 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
214 /**
215  * __qdf_dev_get_domain() - get iommu domain from osdev
216  * @osdev: parent device instance
217  *
218  * Return: iommu domain
219  */
220 static inline struct iommu_domain *
221 __qdf_dev_get_domain(qdf_device_t osdev)
222 {
223 	return osdev->domain;
224 }
225 #else
226 static inline struct iommu_domain *
227 __qdf_dev_get_domain(qdf_device_t osdev)
228 {
229 	if (osdev->iommu_mapping)
230 		return osdev->iommu_mapping->domain;
231 
232 	return NULL;
233 }
234 #endif
235 
236 /**
237  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
238  * @osdev: parent device instance
239  * @dma_addr: dma_addr
240  *
241  * Get actual physical address from dma_addr based on SMMU enablement status.
242  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
243  * (IOVA) otherwise returns physical address. So get SMMU physical address
244  * mapping from IOVA.
245  *
246  * Return: dmaable physical address
247  */
248 static inline unsigned long
249 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
250 			     qdf_dma_addr_t dma_addr)
251 {
252 	struct iommu_domain *domain;
253 
254 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
255 		domain = __qdf_dev_get_domain(osdev);
256 		if (domain)
257 			return iommu_iova_to_phys(domain, dma_addr);
258 	}
259 
260 	return dma_addr;
261 }
262 #else
263 static inline unsigned long
264 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
265 			     qdf_dma_addr_t dma_addr)
266 {
267 	return dma_addr;
268 }
269 #endif
270 
271 /**
272  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
273  * @dev: device instace
274  * @sgt: scatter gather table pointer
275  * @cpu_addr: HLOS virtual address
276  * @dma_addr: dma/iova
277  * @size: allocated memory size
278  *
279  * Return: physical address
280  */
281 static inline int
282 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
283 			     qdf_dma_addr_t dma_addr, size_t size)
284 {
285 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
286 				size);
287 }
288 
289 /**
290  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
291  * @sgt: the mapped sg table header
292  *
293  * Return: None
294  */
295 static inline void
296 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
297 {
298 	sg_free_table(sgt);
299 }
300 
301 /**
302  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
303  * @sgt: scatter gather table pointer
304  *
305  * Return: None
306  */
307 static inline void
308 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
309 {
310 	struct scatterlist *sg;
311 	int i;
312 
313 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
314 		if (!sg)
315 			break;
316 
317 		sg->dma_address = sg_phys(sg);
318 	}
319 }
320 
321 /**
322  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
323  * @osdev: parent device instance
324  * @mem_info: Pointer to allocated memory information
325  *
326  * Based on smmu stage 1 translation enablement status, return corresponding dma
327  * address from qdf_mem_info_t. If stage 1 translation enabled, return
328  * IO virtual address otherwise return physical address.
329  *
330  * Return: dma address
331  */
332 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
333 						    qdf_mem_info_t *mem_info)
334 {
335 	if (__qdf_mem_smmu_s1_enabled(osdev))
336 		return (qdf_dma_addr_t)mem_info->iova;
337 	else
338 		return (qdf_dma_addr_t)mem_info->pa;
339 }
340 
341 /**
342  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
343  * @osdev: parent device instance
344  * @mem_info: Pointer to allocated memory information
345  *
346  * Based on smmu stage 1 translation enablement status, return corresponding
347  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
348  * enabled, return pointer to IO virtual address otherwise return pointer to
349  * physical address
350  *
351  * Return: dma address storage pointer
352  */
353 static inline qdf_dma_addr_t *
354 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
355 			   qdf_mem_info_t *mem_info)
356 {
357 	if (__qdf_mem_smmu_s1_enabled(osdev))
358 		return (qdf_dma_addr_t *)(&mem_info->iova);
359 	else
360 		return (qdf_dma_addr_t *)(&mem_info->pa);
361 }
362 
363 /**
364  * __qdf_update_mem_map_table() - Update DMA memory map info
365  * @osdev: Parent device instance
366  * @mem_info: Pointer to shared memory information
367  * @dma_addr: dma address
368  * @mem_size: memory size allocated
369  *
370  * Store DMA shared memory information
371  *
372  * Return: none
373  */
374 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
375 					      qdf_mem_info_t *mem_info,
376 					      qdf_dma_addr_t dma_addr,
377 					      uint32_t mem_size)
378 {
379 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
380 	mem_info->iova = dma_addr;
381 	mem_info->size = mem_size;
382 }
383 
384 /**
385  * __qdf_mem_get_dma_size() - Return DMA memory size
386  * @osdev: parent device instance
387  * @mem_info: Pointer to allocated memory information
388  *
389  * Return: DMA memory size
390  */
391 static inline uint32_t
392 __qdf_mem_get_dma_size(qdf_device_t osdev,
393 		       qdf_mem_info_t *mem_info)
394 {
395 	return mem_info->size;
396 }
397 
398 /**
399  * __qdf_mem_set_dma_size() - Set DMA memory size
400  * @osdev: parent device instance
401  * @mem_info: Pointer to allocated memory information
402  * @mem_size: memory size allocated
403  *
404  * Return: none
405  */
406 static inline void
407 __qdf_mem_set_dma_size(qdf_device_t osdev,
408 		       qdf_mem_info_t *mem_info,
409 		       uint32_t mem_size)
410 {
411 	mem_info->size = mem_size;
412 }
413 
414 /**
415  * __qdf_mem_get_dma_size() - Return DMA physical address
416  * @osdev: parent device instance
417  * @mem_info: Pointer to allocated memory information
418  *
419  * Return: DMA physical address
420  */
421 static inline qdf_dma_addr_t
422 __qdf_mem_get_dma_pa(qdf_device_t osdev,
423 		     qdf_mem_info_t *mem_info)
424 {
425 	return mem_info->pa;
426 }
427 
428 /**
429  * __qdf_mem_set_dma_size() - Set DMA physical address
430  * @osdev: parent device instance
431  * @mem_info: Pointer to allocated memory information
432  * @dma_pa: DMA phsical address
433  *
434  * Return: none
435  */
436 static inline void
437 __qdf_mem_set_dma_pa(qdf_device_t osdev,
438 		     qdf_mem_info_t *mem_info,
439 		     qdf_dma_addr_t dma_pa)
440 {
441 	mem_info->pa = dma_pa;
442 }
443 #endif /* __I_QDF_MEM_H */
444