xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/i_qdf_mem.h (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: i_qdf_mem.h
21  * Linux-specific definitions for QDF memory API's
22  */
23 
24 #ifndef __I_QDF_MEM_H
25 #define __I_QDF_MEM_H
26 
27 #ifdef __KERNEL__
28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
30 #include <linux/autoconf.h>
31 #else
32 #include <generated/autoconf.h>
33 #endif
34 #endif
35 #include <linux/slab.h>
36 #include <linux/hardirq.h>
37 #include <linux/vmalloc.h>
38 #include <linux/pci.h> /* pci_alloc_consistent */
39 #include <linux/cache.h> /* L1_CACHE_BYTES */
40 
41 #define __qdf_cache_line_sz L1_CACHE_BYTES
42 #if CONFIG_MCL
43 #include <cds_queue.h>
44 #else
45 #include <sys/queue.h>
46 #endif
47 #else
48 /*
49  * Provide dummy defs for kernel data types, functions, and enums
50  * used in this header file.
51  */
52 #define GFP_KERNEL 0
53 #define GFP_ATOMIC 0
54 #define kzalloc(size, flags) NULL
55 #define vmalloc(size)        NULL
56 #define kfree(buf)
57 #define vfree(buf)
58 #define pci_alloc_consistent(dev, size, paddr) NULL
59 #define __qdf_mempool_t void*
60 #define QDF_RET_IP NULL
61 #endif /* __KERNEL__ */
62 #include <qdf_status.h>
63 
64 #ifdef CONFIG_ARM_SMMU
65 #include <pld_common.h>
66 #include <asm/dma-iommu.h>
67 #include <linux/iommu.h>
68 #endif
69 
70 #ifdef __KERNEL__
71 typedef struct mempool_elem {
72 	STAILQ_ENTRY(mempool_elem) mempool_entry;
73 } mempool_elem_t;
74 
75 /**
76  * typedef __qdf_mempool_ctxt_t - Memory pool context
77  * @pool_id: pool identifier
78  * @flags: flags
79  * @elem_size: size of each pool element in bytes
80  * @pool_mem: pool_addr address of the pool created
81  * @mem_size: Total size of the pool in bytes
82  * @free_list: free pool list
83  * @lock: spinlock object
84  * @max_elem: Maximum number of elements in tha pool
85  * @free_cnt: Number of free elements available
86  */
87 typedef struct __qdf_mempool_ctxt {
88 	int pool_id;
89 	u_int32_t flags;
90 	size_t elem_size;
91 	void *pool_mem;
92 	u_int32_t mem_size;
93 
94 	STAILQ_HEAD(, mempool_elem) free_list;
95 	spinlock_t lock;
96 	u_int32_t max_elem;
97 	u_int32_t free_cnt;
98 } __qdf_mempool_ctxt_t;
99 
100 #endif /* __KERNEL__ */
101 
102 #define __qdf_align(a, mask) ALIGN(a, mask)
103 
104 /* typedef for dma_data_direction */
105 typedef enum dma_data_direction __dma_data_direction;
106 
107 /**
108  * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
109  * @dir: QDF DMA data direction
110  *
111  * Return:
112  * enum dma_data_direction
113  */
114 static inline
115 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
116 {
117 	switch (qdf_dir) {
118 	case QDF_DMA_BIDIRECTIONAL:
119 		return DMA_BIDIRECTIONAL;
120 	case QDF_DMA_TO_DEVICE:
121 		return DMA_TO_DEVICE;
122 	case QDF_DMA_FROM_DEVICE:
123 		return DMA_FROM_DEVICE;
124 	default:
125 		return DMA_NONE;
126 	}
127 }
128 
129 
130 /**
131  * __qdf_mem_map_nbytes_single - Map memory for DMA
132  * @osdev: pomter OS device context
133  * @buf: pointer to memory to be dma mapped
134  * @dir: DMA map direction
135  * @nbytes: number of bytes to be mapped.
136  * @phy_addr: ponter to recive physical address.
137  *
138  * Return: success/failure
139  */
140 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
141 						  void *buf, qdf_dma_dir_t dir,
142 						  int nbytes,
143 						  qdf_dma_addr_t *phy_addr)
144 {
145 	/* assume that the OS only provides a single fragment */
146 	*phy_addr = dma_map_single(osdev->dev, buf, nbytes,
147 					__qdf_dma_dir_to_os(dir));
148 	return dma_mapping_error(osdev->dev, *phy_addr) ?
149 	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
150 }
151 
152 /**
153  * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
154  *
155  * @osdev: pomter OS device context
156  * @phy_addr: physical address of memory to be dma unmapped
157  * @dir: DMA unmap direction
158  * @nbytes: number of bytes to be unmapped.
159  *
160  * Return - none
161  */
162 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
163 						 qdf_dma_addr_t phy_addr,
164 						 qdf_dma_dir_t dir, int nbytes)
165 {
166 	dma_unmap_single(osdev->dev, phy_addr, nbytes,
167 				__qdf_dma_dir_to_os(dir));
168 }
169 #ifdef __KERNEL__
170 
171 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
172 
173 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
174 		       size_t pool_entry_size, u_int32_t flags);
175 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
176 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
177 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
178 #define QDF_RET_IP ((void *)_RET_IP_)
179 
180 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
181 #endif
182 
183 /**
184  * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
185  * @osdev parent device instance
186  *
187  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
188  */
189 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
190 {
191 	return osdev->smmu_s1_enabled;
192 }
193 
194 #ifdef CONFIG_ARM_SMMU
195 /**
196  * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
197  * @osdev: parent device instance
198  * @dma_addr: dma_addr
199  *
200  * Get actual physical address from dma_addr based on SMMU enablement status.
201  * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
202  * (IOVA) otherwise returns physical address. So get SMMU physical address
203  * mapping from IOVA.
204  *
205  * Return: dmaable physical address
206  */
207 static inline unsigned long
208 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
209 			     qdf_dma_addr_t dma_addr)
210 {
211 	struct dma_iommu_mapping *mapping;
212 
213 	if (__qdf_mem_smmu_s1_enabled(osdev)) {
214 		mapping = osdev->iommu_mapping;
215 		if (mapping)
216 			return iommu_iova_to_phys(mapping->domain, dma_addr);
217 	}
218 
219 	return dma_addr;
220 }
221 #else
222 static inline unsigned long
223 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
224 			     qdf_dma_addr_t dma_addr)
225 {
226 	return dma_addr;
227 }
228 #endif
229 
230 /**
231  * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
232  * @dev: device instace
233  * @sgt: scatter gather table pointer
234  * @cpu_addr: HLOS virtual address
235  * @dma_addr: dma/iova
236  * @size: allocated memory size
237  *
238  * Return: physical address
239  */
240 static inline int
241 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
242 			     qdf_dma_addr_t dma_addr, size_t size)
243 {
244 	return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
245 				size);
246 }
247 
248 /**
249  * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
250  * @sgt: the mapped sg table header
251  *
252  * Return: None
253  */
254 static inline void
255 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
256 {
257 	sg_free_table(sgt);
258 }
259 
260 /**
261  * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
262  * @sgt: scatter gather table pointer
263  *
264  * Return: None
265  */
266 static inline void
267 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
268 {
269 	struct scatterlist *sg;
270 	int i;
271 
272 	for_each_sg(sgt->sgl, sg, sgt->nents, i)
273 		sg->dma_address = sg_phys(sg);
274 }
275 
276 /**
277  * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
278  * @osdev: parent device instance
279  * @mem_info: Pointer to allocated memory information
280  *
281  * Based on smmu stage 1 translation enablement status, return corresponding dma
282  * address from qdf_mem_info_t. If stage 1 translation enabled, return
283  * IO virtual address otherwise return physical address.
284  *
285  * Return: dma address
286  */
287 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
288 						    qdf_mem_info_t *mem_info)
289 {
290 	if (__qdf_mem_smmu_s1_enabled(osdev))
291 		return (qdf_dma_addr_t)mem_info->iova;
292 	else
293 		return (qdf_dma_addr_t)mem_info->pa;
294 }
295 
296 /**
297  * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
298  * @osdev: parent device instance
299  * @mem_info: Pointer to allocated memory information
300  *
301  * Based on smmu stage 1 translation enablement status, return corresponding
302  * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
303  * enabled, return pointer to IO virtual address otherwise return pointer to
304  * physical address
305  *
306  * Return: dma address storage pointer
307  */
308 static inline qdf_dma_addr_t *
309 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
310 			   qdf_mem_info_t *mem_info)
311 {
312 	if (__qdf_mem_smmu_s1_enabled(osdev))
313 		return (qdf_dma_addr_t *)(&mem_info->iova);
314 	else
315 		return (qdf_dma_addr_t *)(&mem_info->pa);
316 }
317 
318 /**
319  * __qdf_update_mem_map_table() - Update DMA memory map info
320  * @osdev: Parent device instance
321  * @mem_info: Pointer to shared memory information
322  * @dma_addr: dma address
323  * @mem_size: memory size allocated
324  *
325  * Store DMA shared memory information
326  *
327  * Return: none
328  */
329 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
330 					      qdf_mem_info_t *mem_info,
331 					      qdf_dma_addr_t dma_addr,
332 					      uint32_t mem_size)
333 {
334 	mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
335 	mem_info->iova = dma_addr;
336 	mem_info->size = mem_size;
337 }
338 
339 /**
340  * __qdf_mem_get_dma_size() - Return DMA memory size
341  * @osdev: parent device instance
342  * @mem_info: Pointer to allocated memory information
343  *
344  * Return: DMA memory size
345  */
346 static inline uint32_t
347 __qdf_mem_get_dma_size(qdf_device_t osdev,
348 		       qdf_mem_info_t *mem_info)
349 {
350 	return mem_info->size;
351 }
352 
353 /**
354  * __qdf_mem_set_dma_size() - Set DMA memory size
355  * @osdev: parent device instance
356  * @mem_info: Pointer to allocated memory information
357  * @mem_size: memory size allocated
358  *
359  * Return: none
360  */
361 static inline void
362 __qdf_mem_set_dma_size(qdf_device_t osdev,
363 		       qdf_mem_info_t *mem_info,
364 		       uint32_t mem_size)
365 {
366 	mem_info->size = mem_size;
367 }
368 
369 /**
370  * __qdf_mem_get_dma_size() - Return DMA physical address
371  * @osdev: parent device instance
372  * @mem_info: Pointer to allocated memory information
373  *
374  * Return: DMA physical address
375  */
376 static inline qdf_dma_addr_t
377 __qdf_mem_get_dma_pa(qdf_device_t osdev,
378 		     qdf_mem_info_t *mem_info)
379 {
380 	return mem_info->pa;
381 }
382 
383 /**
384  * __qdf_mem_set_dma_size() - Set DMA physical address
385  * @osdev: parent device instance
386  * @mem_info: Pointer to allocated memory information
387  * @dma_pa: DMA phsical address
388  *
389  * Return: none
390  */
391 static inline void
392 __qdf_mem_set_dma_pa(qdf_device_t osdev,
393 		     qdf_mem_info_t *mem_info,
394 		     qdf_dma_addr_t dma_pa)
395 {
396 	mem_info->pa = dma_pa;
397 }
398 #endif /* __I_QDF_MEM_H */
399