1 /* 2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. 3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for 6 * any purpose with or without fee is hereby granted, provided that the 7 * above copyright notice and this permission notice appear in all 8 * copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 17 * PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 /** 21 * DOC: i_qdf_mem.h 22 * Linux-specific definitions for QDF memory API's 23 */ 24 25 #ifndef __I_QDF_MEM_H 26 #define __I_QDF_MEM_H 27 28 #ifdef __KERNEL__ 29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17) 30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) 31 #include <linux/autoconf.h> 32 #else 33 #include <generated/autoconf.h> 34 #endif 35 #endif 36 #include <linux/slab.h> 37 #include <linux/hardirq.h> 38 #include <linux/vmalloc.h> 39 #include <linux/pci.h> /* pci_alloc_consistent */ 40 #include <linux/cache.h> /* L1_CACHE_BYTES */ 41 42 #define __qdf_cache_line_sz L1_CACHE_BYTES 43 #include "queue.h" 44 45 #else 46 /* 47 * Provide dummy defs for kernel data types, functions, and enums 48 * used in this header file. 49 */ 50 #define GFP_KERNEL 0 51 #define GFP_ATOMIC 0 52 #define kzalloc(size, flags) NULL 53 #define vmalloc(size) NULL 54 #define kfree(buf) 55 #define vfree(buf) 56 #define pci_alloc_consistent(dev, size, paddr) NULL 57 #define __qdf_mempool_t void* 58 #define QDF_RET_IP NULL 59 #endif /* __KERNEL__ */ 60 #include <qdf_status.h> 61 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && defined(MSM_PLATFORM) 62 #include <linux/qcom-iommu-util.h> 63 #endif 64 65 #if IS_ENABLED(CONFIG_ARM_SMMU) 66 #include <pld_common.h> 67 #ifdef ENABLE_SMMU_S1_TRANSLATION 68 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) 69 #include <asm/dma-iommu.h> 70 #endif 71 #endif 72 #include <linux/iommu.h> 73 #endif 74 75 #ifdef __KERNEL__ 76 typedef struct mempool_elem { 77 STAILQ_ENTRY(mempool_elem) mempool_entry; 78 } mempool_elem_t; 79 80 /** 81 * typedef __qdf_mempool_ctxt_t - Memory pool context 82 * @pool_id: pool identifier 83 * @flags: flags 84 * @elem_size: size of each pool element in bytes 85 * @pool_mem: pool_addr address of the pool created 86 * @mem_size: Total size of the pool in bytes 87 * @free_list: free pool list 88 * @lock: spinlock object 89 * @max_elem: Maximum number of elements in tha pool 90 * @free_cnt: Number of free elements available 91 */ 92 typedef struct __qdf_mempool_ctxt { 93 int pool_id; 94 u_int32_t flags; 95 size_t elem_size; 96 void *pool_mem; 97 u_int32_t mem_size; 98 99 STAILQ_HEAD(, mempool_elem) free_list; 100 spinlock_t lock; 101 u_int32_t max_elem; 102 u_int32_t free_cnt; 103 } __qdf_mempool_ctxt_t; 104 105 typedef struct kmem_cache *qdf_kmem_cache_t; 106 #endif /* __KERNEL__ */ 107 108 #define __page_size ((size_t)PAGE_SIZE) 109 #define __qdf_align(a, mask) ALIGN(a, mask) 110 111 #ifdef DISABLE_MEMDEBUG_PANIC 112 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \ 113 do { \ 114 /* no-op */ \ 115 } while (false) 116 #else 117 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \ 118 QDF_DEBUG_PANIC(reason_fmt, ## args) 119 #endif 120 121 /* typedef for dma_data_direction */ 122 typedef enum dma_data_direction __dma_data_direction; 123 124 /** 125 * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum 126 * @dir: QDF DMA data direction 127 * 128 * Return: 129 * enum dma_data_direction 130 */ 131 static inline 132 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir) 133 { 134 switch (qdf_dir) { 135 case QDF_DMA_BIDIRECTIONAL: 136 return DMA_BIDIRECTIONAL; 137 case QDF_DMA_TO_DEVICE: 138 return DMA_TO_DEVICE; 139 case QDF_DMA_FROM_DEVICE: 140 return DMA_FROM_DEVICE; 141 default: 142 return DMA_NONE; 143 } 144 } 145 146 147 /** 148 * __qdf_mem_map_nbytes_single - Map memory for DMA 149 * @osdev: pomter OS device context 150 * @buf: pointer to memory to be dma mapped 151 * @dir: DMA map direction 152 * @nbytes: number of bytes to be mapped. 153 * @phy_addr: ponter to recive physical address. 154 * 155 * Return: success/failure 156 */ 157 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev, 158 void *buf, qdf_dma_dir_t dir, 159 int nbytes, 160 qdf_dma_addr_t *phy_addr) 161 { 162 /* assume that the OS only provides a single fragment */ 163 *phy_addr = dma_map_single(osdev->dev, buf, nbytes, 164 __qdf_dma_dir_to_os(dir)); 165 return dma_mapping_error(osdev->dev, *phy_addr) ? 166 QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS; 167 } 168 169 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) 170 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev, 171 qdf_dma_addr_t buf, 172 qdf_dma_dir_t dir, 173 int nbytes) 174 { 175 dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir)); 176 } 177 #else 178 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev, 179 qdf_dma_addr_t buf, 180 qdf_dma_dir_t dir, 181 int nbytes) 182 { 183 dma_sync_single_for_cpu(osdev->dev, buf, nbytes, 184 __qdf_dma_dir_to_os(dir)); 185 } 186 #endif 187 188 /** 189 * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA 190 * 191 * @osdev: pomter OS device context 192 * @phy_addr: physical address of memory to be dma unmapped 193 * @dir: DMA unmap direction 194 * @nbytes: number of bytes to be unmapped. 195 * 196 * Return - none 197 */ 198 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev, 199 qdf_dma_addr_t phy_addr, 200 qdf_dma_dir_t dir, int nbytes) 201 { 202 dma_unmap_single(osdev->dev, phy_addr, nbytes, 203 __qdf_dma_dir_to_os(dir)); 204 } 205 #ifdef __KERNEL__ 206 207 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t; 208 209 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt, 210 size_t pool_entry_size, u_int32_t flags); 211 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool); 212 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool); 213 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf); 214 qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name, 215 qdf_size_t size); 216 void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache); 217 void* __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache); 218 void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node); 219 #define QDF_RET_IP ((void *)_RET_IP_) 220 221 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size) 222 #endif 223 224 /** 225 * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status 226 * @osdev parent device instance 227 * 228 * Return: true if smmu s1 enabled, false if smmu s1 is bypassed 229 */ 230 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev) 231 { 232 return osdev->smmu_s1_enabled; 233 } 234 235 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION) 236 /* 237 * typedef __qdf_iommu_domain_t: abstraction for struct iommu_domain 238 */ 239 typedef struct iommu_domain __qdf_iommu_domain_t; 240 241 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) 242 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL) 243 /** 244 * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping 245 * configurations bitmap 246 * @attr: QDF iommu attribute 247 * 248 * Return: IOMMU mapping configuration bitmaps 249 */ 250 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr) 251 { 252 switch (attr) { 253 case QDF_DOMAIN_ATTR_S1_BYPASS: 254 return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS; 255 case QDF_DOMAIN_ATTR_ATOMIC: 256 return QCOM_IOMMU_MAPPING_CONF_ATOMIC; 257 case QDF_DOMAIN_ATTR_FAST: 258 return QCOM_IOMMU_MAPPING_CONF_FAST; 259 default: 260 return -EINVAL; 261 } 262 } 263 264 /** 265 * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes 266 * 267 * @domain: iommu domain 268 * @attr: iommu attribute 269 * @data: data pointer 270 * 271 * Return: 0 for success, and negative values otherwise 272 */ 273 static inline int 274 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain, 275 enum qdf_iommu_attr attr, void *data) 276 { 277 int mapping_config; 278 int mapping_bitmap; 279 int *value; 280 281 mapping_bitmap = __qdf_iommu_attr_to_os(attr); 282 if (mapping_bitmap < 0) 283 return -EINVAL; 284 285 mapping_config = qcom_iommu_get_mappings_configuration(domain); 286 if (mapping_config < 0) 287 return -EINVAL; 288 289 value = data; 290 *value = (mapping_config & mapping_bitmap) ? 1 : 0; 291 292 return 0; 293 } 294 #else /* !CONFIG_QCOM_IOMMU_UTIL */ 295 static inline int 296 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain, 297 enum qdf_iommu_attr attr, void *data) 298 { 299 return -ENOTSUPP; 300 } 301 #endif /* CONFIG_QCOM_IOMMU_UTIL */ 302 #else 303 /** 304 * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum 305 * @attr: QDF iommu attribute 306 * 307 * Return: enum iommu_attr 308 */ 309 static inline 310 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr) 311 { 312 switch (attr) { 313 case QDF_DOMAIN_ATTR_GEOMETRY: 314 return DOMAIN_ATTR_GEOMETRY; 315 case QDF_DOMAIN_ATTR_PAGING: 316 return DOMAIN_ATTR_PAGING; 317 case QDF_DOMAIN_ATTR_WINDOWS: 318 return DOMAIN_ATTR_WINDOWS; 319 case QDF_DOMAIN_ATTR_FSL_PAMU_STASH: 320 return DOMAIN_ATTR_FSL_PAMU_STASH; 321 case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE: 322 return DOMAIN_ATTR_FSL_PAMU_ENABLE; 323 case QDF_DOMAIN_ATTR_FSL_PAMUV1: 324 return DOMAIN_ATTR_FSL_PAMUV1; 325 case QDF_DOMAIN_ATTR_NESTING: 326 return DOMAIN_ATTR_NESTING; 327 case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: 328 return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE; 329 case QDF_DOMAIN_ATTR_CONTEXT_BANK: 330 return DOMAIN_ATTR_CONTEXT_BANK; 331 case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS: 332 return DOMAIN_ATTR_NON_FATAL_FAULTS; 333 case QDF_DOMAIN_ATTR_S1_BYPASS: 334 return DOMAIN_ATTR_S1_BYPASS; 335 case QDF_DOMAIN_ATTR_ATOMIC: 336 return DOMAIN_ATTR_ATOMIC; 337 case QDF_DOMAIN_ATTR_SECURE_VMID: 338 return DOMAIN_ATTR_SECURE_VMID; 339 case QDF_DOMAIN_ATTR_FAST: 340 return DOMAIN_ATTR_FAST; 341 case QDF_DOMAIN_ATTR_PGTBL_INFO: 342 return DOMAIN_ATTR_PGTBL_INFO; 343 case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT: 344 return DOMAIN_ATTR_USE_UPSTREAM_HINT; 345 case QDF_DOMAIN_ATTR_EARLY_MAP: 346 return DOMAIN_ATTR_EARLY_MAP; 347 case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT: 348 return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT; 349 case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: 350 return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT; 351 case QDF_DOMAIN_ATTR_USE_LLC_NWA: 352 return DOMAIN_ATTR_USE_LLC_NWA; 353 case QDF_DOMAIN_ATTR_SPLIT_TABLES: 354 return DOMAIN_ATTR_SPLIT_TABLES; 355 case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE: 356 return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE; 357 case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL: 358 return DOMAIN_ATTR_FAULT_MODEL_NO_STALL; 359 case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF: 360 return DOMAIN_ATTR_FAULT_MODEL_HUPCF; 361 default: 362 return DOMAIN_ATTR_EXTENDED_MAX; 363 } 364 } 365 366 /** 367 * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes 368 * 369 * @domain: iommu domain 370 * @attr: iommu attribute 371 * @data: data pointer 372 * 373 * Return: iommu domain attr 374 */ 375 static inline int 376 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain, 377 enum qdf_iommu_attr attr, void *data) 378 { 379 return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr), 380 data); 381 } 382 #endif 383 384 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) 385 /** 386 * __qdf_dev_get_domain() - get iommu domain from osdev 387 * @osdev: parent device instance 388 * 389 * Return: iommu domain 390 */ 391 static inline struct iommu_domain * 392 __qdf_dev_get_domain(qdf_device_t osdev) 393 { 394 return osdev->domain; 395 } 396 #else 397 static inline struct iommu_domain * 398 __qdf_dev_get_domain(qdf_device_t osdev) 399 { 400 if (osdev->iommu_mapping) 401 return osdev->iommu_mapping->domain; 402 403 return NULL; 404 } 405 #endif 406 407 /** 408 * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr 409 * @osdev: parent device instance 410 * @dma_addr: dma_addr 411 * 412 * Get actual physical address from dma_addr based on SMMU enablement status. 413 * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address 414 * (IOVA) otherwise returns physical address. So get SMMU physical address 415 * mapping from IOVA. 416 * 417 * Return: dmaable physical address 418 */ 419 static inline unsigned long 420 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 421 qdf_dma_addr_t dma_addr) 422 { 423 struct iommu_domain *domain; 424 425 if (__qdf_mem_smmu_s1_enabled(osdev)) { 426 domain = __qdf_dev_get_domain(osdev); 427 if (domain) 428 return iommu_iova_to_phys(domain, dma_addr); 429 } 430 431 return dma_addr; 432 } 433 #else 434 static inline unsigned long 435 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 436 qdf_dma_addr_t dma_addr) 437 { 438 return dma_addr; 439 } 440 #endif 441 442 /** 443 * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table 444 * @dev: device instace 445 * @sgt: scatter gather table pointer 446 * @cpu_addr: HLOS virtual address 447 * @dma_addr: dma/iova 448 * @size: allocated memory size 449 * 450 * Return: physical address 451 */ 452 static inline int 453 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, 454 qdf_dma_addr_t dma_addr, size_t size) 455 { 456 return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr, 457 size); 458 } 459 460 /** 461 * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table 462 * @sgt: the mapped sg table header 463 * 464 * Return: None 465 */ 466 static inline void 467 __qdf_os_mem_free_sgtable(struct sg_table *sgt) 468 { 469 sg_free_table(sgt); 470 } 471 472 /** 473 * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements 474 * @sgt: scatter gather table pointer 475 * 476 * Return: None 477 */ 478 static inline void 479 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) 480 { 481 struct scatterlist *sg; 482 int i; 483 484 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 485 if (!sg) 486 break; 487 488 sg->dma_address = sg_phys(sg); 489 } 490 } 491 492 /** 493 * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status 494 * @osdev: parent device instance 495 * @mem_info: Pointer to allocated memory information 496 * 497 * Based on smmu stage 1 translation enablement status, return corresponding dma 498 * address from qdf_mem_info_t. If stage 1 translation enabled, return 499 * IO virtual address otherwise return physical address. 500 * 501 * Return: dma address 502 */ 503 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev, 504 qdf_mem_info_t *mem_info) 505 { 506 if (__qdf_mem_smmu_s1_enabled(osdev)) 507 return (qdf_dma_addr_t)mem_info->iova; 508 else 509 return (qdf_dma_addr_t)mem_info->pa; 510 } 511 512 /** 513 * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer 514 * @osdev: parent device instance 515 * @mem_info: Pointer to allocated memory information 516 * 517 * Based on smmu stage 1 translation enablement status, return corresponding 518 * dma address pointer from qdf_mem_info_t structure. If stage 1 translation 519 * enabled, return pointer to IO virtual address otherwise return pointer to 520 * physical address 521 * 522 * Return: dma address storage pointer 523 */ 524 static inline qdf_dma_addr_t * 525 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, 526 qdf_mem_info_t *mem_info) 527 { 528 if (__qdf_mem_smmu_s1_enabled(osdev)) 529 return (qdf_dma_addr_t *)(&mem_info->iova); 530 else 531 return (qdf_dma_addr_t *)(&mem_info->pa); 532 } 533 534 /** 535 * __qdf_update_mem_map_table() - Update DMA memory map info 536 * @osdev: Parent device instance 537 * @mem_info: Pointer to shared memory information 538 * @dma_addr: dma address 539 * @mem_size: memory size allocated 540 * 541 * Store DMA shared memory information 542 * 543 * Return: none 544 */ 545 static inline void __qdf_update_mem_map_table(qdf_device_t osdev, 546 qdf_mem_info_t *mem_info, 547 qdf_dma_addr_t dma_addr, 548 uint32_t mem_size) 549 { 550 mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 551 mem_info->iova = dma_addr; 552 mem_info->size = mem_size; 553 } 554 555 /** 556 * __qdf_mem_get_dma_size() - Return DMA memory size 557 * @osdev: parent device instance 558 * @mem_info: Pointer to allocated memory information 559 * 560 * Return: DMA memory size 561 */ 562 static inline uint32_t 563 __qdf_mem_get_dma_size(qdf_device_t osdev, 564 qdf_mem_info_t *mem_info) 565 { 566 return mem_info->size; 567 } 568 569 /** 570 * __qdf_mem_set_dma_size() - Set DMA memory size 571 * @osdev: parent device instance 572 * @mem_info: Pointer to allocated memory information 573 * @mem_size: memory size allocated 574 * 575 * Return: none 576 */ 577 static inline void 578 __qdf_mem_set_dma_size(qdf_device_t osdev, 579 qdf_mem_info_t *mem_info, 580 uint32_t mem_size) 581 { 582 mem_info->size = mem_size; 583 } 584 585 /** 586 * __qdf_mem_get_dma_size() - Return DMA physical address 587 * @osdev: parent device instance 588 * @mem_info: Pointer to allocated memory information 589 * 590 * Return: DMA physical address 591 */ 592 static inline qdf_dma_addr_t 593 __qdf_mem_get_dma_pa(qdf_device_t osdev, 594 qdf_mem_info_t *mem_info) 595 { 596 return mem_info->pa; 597 } 598 599 /** 600 * __qdf_mem_set_dma_size() - Set DMA physical address 601 * @osdev: parent device instance 602 * @mem_info: Pointer to allocated memory information 603 * @dma_pa: DMA phsical address 604 * 605 * Return: none 606 */ 607 static inline void 608 __qdf_mem_set_dma_pa(qdf_device_t osdev, 609 qdf_mem_info_t *mem_info, 610 qdf_dma_addr_t dma_pa) 611 { 612 mem_info->pa = dma_pa; 613 } 614 615 616 /** 617 * __qdf_mem_alloc_consistent() - allocates consistent qdf memory 618 * @osdev: OS device handle 619 * @dev: Pointer to device handle 620 * @size: Size to be allocated 621 * @paddr: Physical address 622 * @func: Function name of the call site 623 * @line: line numbe rof the call site 624 * 625 * Return: pointer of allocated memory or null if memory alloc fails 626 */ 627 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, 628 qdf_size_t size, qdf_dma_addr_t *paddr, 629 const char *func, uint32_t line); 630 631 /** 632 * __qdf_mem_malloc() - allocates QDF memory 633 * @size: Number of bytes of memory to allocate. 634 * 635 * @func: Function name of the call site 636 * @line: line numbe rof the call site 637 * 638 * This function will dynamicallly allocate the specified number of bytes of 639 * memory. 640 * 641 * Return: 642 * Upon successful allocate, returns a non-NULL pointer to the allocated 643 * memory. If this function is unable to allocate the amount of memory 644 * specified (for any reason) it returns NULL. 645 */ 646 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line); 647 648 /** 649 * __qdf_mem_free() - free QDF memory 650 * @ptr: Pointer to the starting address of the memory to be freed. 651 * 652 * This function will free the memory pointed to by 'ptr'. 653 * Return: None 654 */ 655 void __qdf_mem_free(void *ptr); 656 657 /** 658 * __qdf_mem_valloc() - QDF virtual memory allocation API 659 * @size: Number of bytes of virtual memory to allocate. 660 * @func: Caller function name 661 * @line: Line number 662 * 663 * Return: A valid memory location on success, or NULL on failure 664 */ 665 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line); 666 667 /** 668 * __qdf_mem_vfree() - QDF API to free virtual memory 669 * @ptr: Pointer to the virtual memory to free 670 * 671 * Return: None 672 */ 673 void __qdf_mem_vfree(void *ptr); 674 675 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI 676 /** 677 * __qdf_untracked_mem_malloc() - allocates non-QDF memory 678 * @size: Number of bytes of memory to allocate. 679 * 680 * @func: Function name of the call site 681 * @line: line number of the call site 682 * 683 * This function will dynamically allocate the specified number of bytes of 684 * memory. Memory allocated is not tracked by qdf memory debug framework. 685 * 686 * Return: 687 * Upon successful allocation, returns a non-NULL pointer to the allocated 688 * memory. If this function is unable to allocate the amount of memory 689 * specified (for any reason) it returns NULL. 690 */ 691 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func, 692 uint32_t line); 693 694 /** 695 * __qdf_untracked_mem_free() - free non-QDF memory 696 * @ptr: Pointer to the starting address of the memory to be freed. 697 * 698 * This function will free the memory pointed to by 'ptr'. 699 * Return: None 700 */ 701 702 void __qdf_untracked_mem_free(void *ptr); 703 #endif 704 705 /** 706 * __qdf_mem_free_consistent() - free consistent qdf memory 707 * @osdev: OS device handle 708 * @dev: Pointer to device handle 709 * @size: Size to be allocated 710 * @vaddr: virtual address 711 * @paddr: Physical address 712 * @memctx: Pointer to DMA context 713 * 714 * Return: none 715 */ 716 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev, 717 qdf_size_t size, void *vaddr, 718 qdf_dma_addr_t paddr, qdf_dma_context_t memctx); 719 720 #endif /* __I_QDF_MEM_H */ 721