1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: i_qdf_mem.h 21 * Linux-specific definitions for QDF memory API's 22 */ 23 24 #ifndef __I_QDF_MEM_H 25 #define __I_QDF_MEM_H 26 27 #ifdef __KERNEL__ 28 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17) 29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) 30 #include <linux/autoconf.h> 31 #else 32 #include <generated/autoconf.h> 33 #endif 34 #endif 35 #include <linux/slab.h> 36 #include <linux/hardirq.h> 37 #include <linux/vmalloc.h> 38 #include <linux/pci.h> /* pci_alloc_consistent */ 39 #include <linux/cache.h> /* L1_CACHE_BYTES */ 40 41 #define __qdf_cache_line_sz L1_CACHE_BYTES 42 #if CONFIG_MCL 43 #include <cds_queue.h> 44 #else 45 #include <sys/queue.h> 46 #endif 47 #else 48 /* 49 * Provide dummy defs for kernel data types, functions, and enums 50 * used in this header file. 51 */ 52 #define GFP_KERNEL 0 53 #define GFP_ATOMIC 0 54 #define kzalloc(size, flags) NULL 55 #define vmalloc(size) NULL 56 #define kfree(buf) 57 #define vfree(buf) 58 #define pci_alloc_consistent(dev, size, paddr) NULL 59 #define __qdf_mempool_t void* 60 #define QDF_RET_IP NULL 61 #endif /* __KERNEL__ */ 62 #include <qdf_status.h> 63 64 #ifdef CONFIG_ARM_SMMU 65 #include <pld_common.h> 66 #include <asm/dma-iommu.h> 67 #include <linux/iommu.h> 68 #endif 69 70 #ifdef __KERNEL__ 71 typedef struct mempool_elem { 72 STAILQ_ENTRY(mempool_elem) mempool_entry; 73 } mempool_elem_t; 74 75 /** 76 * typedef __qdf_mempool_ctxt_t - Memory pool context 77 * @pool_id: pool identifier 78 * @flags: flags 79 * @elem_size: size of each pool element in bytes 80 * @pool_mem: pool_addr address of the pool created 81 * @mem_size: Total size of the pool in bytes 82 * @free_list: free pool list 83 * @lock: spinlock object 84 * @max_elem: Maximum number of elements in tha pool 85 * @free_cnt: Number of free elements available 86 */ 87 typedef struct __qdf_mempool_ctxt { 88 int pool_id; 89 u_int32_t flags; 90 size_t elem_size; 91 void *pool_mem; 92 u_int32_t mem_size; 93 94 STAILQ_HEAD(, mempool_elem) free_list; 95 spinlock_t lock; 96 u_int32_t max_elem; 97 u_int32_t free_cnt; 98 } __qdf_mempool_ctxt_t; 99 100 #endif /* __KERNEL__ */ 101 102 #define __qdf_align(a, mask) ALIGN(a, mask) 103 104 /* typedef for dma_data_direction */ 105 typedef enum dma_data_direction __dma_data_direction; 106 107 /** 108 * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum 109 * @dir: QDF DMA data direction 110 * 111 * Return: 112 * enum dma_data_direction 113 */ 114 static inline 115 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir) 116 { 117 switch (qdf_dir) { 118 case QDF_DMA_BIDIRECTIONAL: 119 return DMA_BIDIRECTIONAL; 120 case QDF_DMA_TO_DEVICE: 121 return DMA_TO_DEVICE; 122 case QDF_DMA_FROM_DEVICE: 123 return DMA_FROM_DEVICE; 124 default: 125 return DMA_NONE; 126 } 127 } 128 129 130 /** 131 * __qdf_mem_map_nbytes_single - Map memory for DMA 132 * @osdev: pomter OS device context 133 * @buf: pointer to memory to be dma mapped 134 * @dir: DMA map direction 135 * @nbytes: number of bytes to be mapped. 136 * @phy_addr: ponter to recive physical address. 137 * 138 * Return: success/failure 139 */ 140 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev, 141 void *buf, qdf_dma_dir_t dir, 142 int nbytes, 143 qdf_dma_addr_t *phy_addr) 144 { 145 /* assume that the OS only provides a single fragment */ 146 *phy_addr = dma_map_single(osdev->dev, buf, nbytes, 147 __qdf_dma_dir_to_os(dir)); 148 return dma_mapping_error(osdev->dev, *phy_addr) ? 149 QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 150 } 151 152 /** 153 * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA 154 * 155 * @osdev: pomter OS device context 156 * @phy_addr: physical address of memory to be dma unmapped 157 * @dir: DMA unmap direction 158 * @nbytes: number of bytes to be unmapped. 159 * 160 * Return - none 161 */ 162 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev, 163 qdf_dma_addr_t phy_addr, 164 qdf_dma_dir_t dir, int nbytes) 165 { 166 dma_unmap_single(osdev->dev, phy_addr, nbytes, 167 __qdf_dma_dir_to_os(dir)); 168 } 169 #ifdef __KERNEL__ 170 171 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t; 172 173 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt, 174 size_t pool_entry_size, u_int32_t flags); 175 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool); 176 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool); 177 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf); 178 #define QDF_RET_IP ((void *)_RET_IP_) 179 180 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size) 181 #endif 182 183 /** 184 * __qdf_mem_cmp() - memory compare 185 * @memory1: pointer to one location in memory to compare. 186 * @memory2: pointer to second location in memory to compare. 187 * @num_bytes: the number of bytes to compare. 188 * 189 * Function to compare two pieces of memory, similar to memcmp function 190 * in standard C. 191 * Return: 192 * int32_t - returns an int value that tells if the memory 193 * locations are equal or not equal. 194 * 0 -- equal 195 * < 0 -- *memory1 is less than *memory2 196 * > 0 -- *memory1 is bigger than *memory2 197 */ 198 static inline int32_t __qdf_mem_cmp(const void *memory1, const void *memory2, 199 uint32_t num_bytes) 200 { 201 return (int32_t) memcmp(memory1, memory2, num_bytes); 202 } 203 204 /** 205 * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status 206 * @osdev parent device instance 207 * 208 * Return: true if smmu s1 enabled, false if smmu s1 is bypassed 209 */ 210 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev) 211 { 212 return osdev->smmu_s1_enabled; 213 } 214 215 #ifdef CONFIG_ARM_SMMU 216 /** 217 * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr 218 * @osdev: parent device instance 219 * @dma_addr: dma_addr 220 * 221 * Get actual physical address from dma_addr based on SMMU enablement status. 222 * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address 223 * (IOVA) otherwise returns physical address. So get SMMU physical address 224 * mapping from IOVA. 225 * 226 * Return: dmaable physical address 227 */ 228 static inline unsigned long 229 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 230 qdf_dma_addr_t dma_addr) 231 { 232 struct dma_iommu_mapping *mapping; 233 234 if (__qdf_mem_smmu_s1_enabled(osdev)) { 235 mapping = osdev->iommu_mapping; 236 if (mapping) 237 return iommu_iova_to_phys(mapping->domain, dma_addr); 238 } 239 240 return dma_addr; 241 } 242 #else 243 static inline unsigned long 244 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 245 qdf_dma_addr_t dma_addr) 246 { 247 return dma_addr; 248 } 249 #endif 250 251 /** 252 * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table 253 * @dev: device instace 254 * @sgt: scatter gather table pointer 255 * @cpu_addr: HLOS virtual address 256 * @dma_addr: dma/iova 257 * @size: allocated memory size 258 * 259 * Return: physical address 260 */ 261 static inline int 262 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, 263 qdf_dma_addr_t dma_addr, size_t size) 264 { 265 return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr, 266 size); 267 } 268 269 /** 270 * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status 271 * @osdev: parent device instance 272 * @mem_info: Pointer to allocated memory information 273 * 274 * Based on smmu stage 1 translation enablement status, return corresponding dma 275 * address from qdf_mem_info_t. If stage 1 translation enabled, return 276 * IO virtual address otherwise return physical address. 277 * 278 * Return: dma address 279 */ 280 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev, 281 qdf_mem_info_t *mem_info) 282 { 283 if (__qdf_mem_smmu_s1_enabled(osdev)) 284 return (qdf_dma_addr_t)mem_info->iova; 285 else 286 return (qdf_dma_addr_t)mem_info->pa; 287 } 288 289 /** 290 * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer 291 * @osdev: parent device instance 292 * @mem_info: Pointer to allocated memory information 293 * 294 * Based on smmu stage 1 translation enablement status, return corresponding 295 * dma address pointer from qdf_mem_info_t structure. If stage 1 translation 296 * enabled, return pointer to IO virtual address otherwise return pointer to 297 * physical address 298 * 299 * Return: dma address storage pointer 300 */ 301 static inline qdf_dma_addr_t * 302 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, 303 qdf_mem_info_t *mem_info) 304 { 305 if (__qdf_mem_smmu_s1_enabled(osdev)) 306 return (qdf_dma_addr_t *)(&mem_info->iova); 307 else 308 return (qdf_dma_addr_t *)(&mem_info->pa); 309 } 310 311 /** 312 * __qdf_update_mem_map_table() - Update DMA memory map info 313 * @osdev: Parent device instance 314 * @mem_info: Pointer to shared memory information 315 * @dma_addr: dma address 316 * @mem_size: memory size allocated 317 * 318 * Store DMA shared memory information 319 * 320 * Return: none 321 */ 322 static inline void __qdf_update_mem_map_table(qdf_device_t osdev, 323 qdf_mem_info_t *mem_info, 324 qdf_dma_addr_t dma_addr, 325 uint32_t mem_size) 326 { 327 mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 328 mem_info->iova = dma_addr; 329 mem_info->size = mem_size; 330 } 331 332 /** 333 * __qdf_mem_get_dma_size() - Return DMA memory size 334 * @osdev: parent device instance 335 * @mem_info: Pointer to allocated memory information 336 * 337 * Return: DMA memory size 338 */ 339 static inline uint32_t 340 __qdf_mem_get_dma_size(qdf_device_t osdev, 341 qdf_mem_info_t *mem_info) 342 { 343 return mem_info->size; 344 } 345 346 /** 347 * __qdf_mem_set_dma_size() - Set DMA memory size 348 * @osdev: parent device instance 349 * @mem_info: Pointer to allocated memory information 350 * @mem_size: memory size allocated 351 * 352 * Return: none 353 */ 354 static inline void 355 __qdf_mem_set_dma_size(qdf_device_t osdev, 356 qdf_mem_info_t *mem_info, 357 uint32_t mem_size) 358 { 359 mem_info->size = mem_size; 360 } 361 362 /** 363 * __qdf_mem_get_dma_size() - Return DMA physical address 364 * @osdev: parent device instance 365 * @mem_info: Pointer to allocated memory information 366 * 367 * Return: DMA physical address 368 */ 369 static inline qdf_dma_addr_t 370 __qdf_mem_get_dma_pa(qdf_device_t osdev, 371 qdf_mem_info_t *mem_info) 372 { 373 return mem_info->pa; 374 } 375 376 /** 377 * __qdf_mem_set_dma_size() - Set DMA physical address 378 * @osdev: parent device instance 379 * @mem_info: Pointer to allocated memory information 380 * @dma_pa: DMA phsical address 381 * 382 * Return: none 383 */ 384 static inline void 385 __qdf_mem_set_dma_pa(qdf_device_t osdev, 386 qdf_mem_info_t *mem_info, 387 qdf_dma_addr_t dma_pa) 388 { 389 mem_info->pa = dma_pa; 390 } 391 #endif /* __I_QDF_MEM_H */ 392