1 /* 2 * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_mem 21 * QCA driver framework (QDF) memory management APIs 22 */ 23 24 #if !defined(__QDF_MEMORY_H) 25 #define __QDF_MEMORY_H 26 27 /* Include Files */ 28 #include <qdf_types.h> 29 #include <i_qdf_mem.h> 30 #include <i_qdf_trace.h> 31 32 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz 33 34 /** 35 * qdf_align() - align to the given size. 36 * @a: input that needs to be aligned. 37 * @align_size: boundary on which 'a' has to be alinged. 38 * 39 * Return: aligned value. 40 */ 41 #define qdf_align(a, align_size) __qdf_align(a, align_size) 42 #define qdf_page_size __page_size 43 44 /** 45 * struct qdf_mem_dma_page_t - Allocated dmaable page 46 * @page_v_addr_start: Page start virtual address 47 * @page_v_addr_end: Page end virtual address 48 * @page_p_addr: Page start physical address 49 */ 50 struct qdf_mem_dma_page_t { 51 char *page_v_addr_start; 52 char *page_v_addr_end; 53 qdf_dma_addr_t page_p_addr; 54 }; 55 56 /** 57 * struct qdf_mem_multi_page_t - multiple page allocation information storage 58 * @num_element_per_page: Number of element in single page 59 * @num_pages: Number of allocation needed pages 60 * @dma_pages: page information storage in case of coherent memory 61 * @cacheable_pages: page information storage in case of cacheable memory 62 */ 63 struct qdf_mem_multi_page_t { 64 uint16_t num_element_per_page; 65 uint16_t num_pages; 66 struct qdf_mem_dma_page_t *dma_pages; 67 void **cacheable_pages; 68 qdf_size_t page_size; 69 }; 70 71 72 /* Preprocessor definitions and constants */ 73 74 typedef __qdf_mempool_t qdf_mempool_t; 75 76 /** 77 * qdf_mem_init() - Initialize QDF memory module 78 * 79 * Return: None 80 * 81 */ 82 void qdf_mem_init(void); 83 84 /** 85 * qdf_mem_exit() - Exit QDF memory module 86 * 87 * Return: None 88 * 89 */ 90 void qdf_mem_exit(void); 91 92 #define QDF_MEM_FUNC_NAME_SIZE 48 93 94 #ifdef MEMORY_DEBUG 95 /** 96 * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled 97 * 98 * Return: value of mem_debug_disabled qdf module argument 99 */ 100 bool qdf_mem_debug_config_get(void); 101 102 /** 103 * qdf_mem_malloc_debug() - debug version of QDF memory allocation API 104 * @size: Number of bytes of memory to allocate. 105 * @func: Function name of the call site 106 * @line: Line number of the call site 107 * @caller: Address of the caller function 108 * @flag: GFP flag 109 * 110 * This function will dynamicallly allocate the specified number of bytes of 111 * memory and add it to the qdf tracking list to check for memory leaks and 112 * corruptions 113 * 114 * Return: A valid memory location on success, or NULL on failure 115 */ 116 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line, 117 void *caller, uint32_t flag); 118 119 #define qdf_mem_malloc(size) \ 120 qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0) 121 122 #define qdf_mem_malloc_fl(size, func, line) \ 123 qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0) 124 125 #define qdf_mem_malloc_atomic(size) \ 126 qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC) 127 /** 128 * qdf_mem_free_debug() - debug version of qdf_mem_free 129 * @ptr: Pointer to the starting address of the memory to be freed. 130 * 131 * This function will free the memory pointed to by 'ptr'. It also checks for 132 * memory corruption, underrun, overrun, double free, domain mismatch, etc. 133 * 134 * Return: none 135 */ 136 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line); 137 138 #define qdf_mem_free(ptr) \ 139 qdf_mem_free_debug(ptr, __func__, __LINE__) 140 141 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev, 142 struct qdf_mem_multi_page_t *pages, 143 size_t element_size, uint16_t element_num, 144 qdf_dma_context_t memctxt, bool cacheable, 145 const char *func, uint32_t line, 146 void *caller); 147 148 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\ 149 memctxt, cacheable) \ 150 qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \ 151 element_num, memctxt, cacheable, \ 152 __func__, __LINE__, QDF_RET_IP) 153 154 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev, 155 struct qdf_mem_multi_page_t *pages, 156 qdf_dma_context_t memctxt, bool cacheable, 157 const char *func, uint32_t line); 158 159 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \ 160 qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \ 161 __func__, __LINE__) 162 163 /** 164 * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty 165 * 166 * Call this to ensure there are no active memory allocations being tracked 167 * against the current debug domain. For example, one should call this function 168 * immediately before a call to qdf_debug_domain_set() as a memory leak 169 * detection mechanism. 170 * 171 * e.g. 172 * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE); 173 * 174 * ... 175 * 176 * // memory is allocated and freed 177 * 178 * ... 179 * 180 * // before transitioning back to inactive state, 181 * // make sure all active memory has been freed 182 * qdf_mem_check_for_leaks(); 183 * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT); 184 * 185 * ... 186 * 187 * // also, before program exit, make sure init time memory is freed 188 * qdf_mem_check_for_leaks(); 189 * exit(); 190 * 191 * Return: None 192 */ 193 void qdf_mem_check_for_leaks(void); 194 195 /** 196 * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory 197 * @osdev: OS device handle 198 * @dev: Pointer to device handle 199 * @size: Size to be allocated 200 * @paddr: Physical address 201 * @func: Function name of the call site 202 * @line: line numbe rof the call site 203 * @caller: Address of the caller function 204 * 205 * Return: pointer of allocated memory or null if memory alloc fails 206 */ 207 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, 208 qdf_size_t size, qdf_dma_addr_t *paddr, 209 const char *func, uint32_t line, 210 void *caller); 211 212 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ 213 qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \ 214 __func__, __LINE__, QDF_RET_IP) 215 216 /** 217 * qdf_mem_free_consistent_debug() - free consistent qdf memory 218 * @osdev: OS device handle 219 * @size: Size to be allocated 220 * @vaddr: virtual address 221 * @paddr: Physical address 222 * @memctx: Pointer to DMA context 223 * @func: Function name of the call site 224 * @line: line numbe rof the call site 225 * 226 * Return: none 227 */ 228 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, 229 qdf_size_t size, void *vaddr, 230 qdf_dma_addr_t paddr, 231 qdf_dma_context_t memctx, 232 const char *func, uint32_t line); 233 234 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ 235 qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \ 236 __func__, __LINE__) 237 #else 238 static inline bool qdf_mem_debug_config_get(void) 239 { 240 return false; 241 } 242 243 /** 244 * qdf_mem_malloc() - allocation QDF memory 245 * @size: Number of bytes of memory to allocate. 246 * 247 * This function will dynamicallly allocate the specified number of bytes of 248 * memory. 249 * 250 * Return: 251 * Upon successful allocate, returns a non-NULL pointer to the allocated 252 * memory. If this function is unable to allocate the amount of memory 253 * specified (for any reason) it returns NULL. 254 */ 255 #define qdf_mem_malloc(size) \ 256 __qdf_mem_malloc(size, __func__, __LINE__) 257 258 #define qdf_mem_malloc_fl(size, func, line) \ 259 __qdf_mem_malloc(size, func, line) 260 261 /** 262 * qdf_mem_malloc_atomic() - allocation QDF memory atomically 263 * @size: Number of bytes of memory to allocate. 264 * 265 * This function will dynamicallly allocate the specified number of bytes of 266 * memory. 267 * 268 * Return: 269 * Upon successful allocate, returns a non-NULL pointer to the allocated 270 * memory. If this function is unable to allocate the amount of memory 271 * specified (for any reason) it returns NULL. 272 */ 273 #define qdf_mem_malloc_atomic(size) \ 274 qdf_mem_malloc_atomic_fl(size, __func__, __LINE__) 275 276 void *qdf_mem_malloc_atomic_fl(qdf_size_t size, 277 const char *func, 278 uint32_t line); 279 280 #define qdf_mem_free(ptr) \ 281 __qdf_mem_free(ptr) 282 283 static inline void qdf_mem_check_for_leaks(void) { } 284 285 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ 286 __qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__) 287 288 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ 289 __qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) 290 291 void qdf_mem_multi_pages_alloc(qdf_device_t osdev, 292 struct qdf_mem_multi_page_t *pages, 293 size_t element_size, uint16_t element_num, 294 qdf_dma_context_t memctxt, bool cacheable); 295 296 void qdf_mem_multi_pages_free(qdf_device_t osdev, 297 struct qdf_mem_multi_page_t *pages, 298 qdf_dma_context_t memctxt, bool cacheable); 299 300 #endif /* MEMORY_DEBUG */ 301 302 /** 303 * qdf_aligned_malloc() - allocates aligned QDF memory. 304 * @size: Size to be allocated 305 * @vaddr_unaligned: Unaligned virtual address. 306 * @paddr_unaligned: Unaligned physical address. 307 * @paddr_aligned: Aligned physical address. 308 * @align: Base address alignment. 309 * @func: Function name of the call site. 310 * @line: Line number of the call site. 311 * 312 * This function will dynamically allocate the specified number of bytes of 313 * memory. Checks if the allocated base address is aligned with base_align. 314 * If not, it frees the allocated memory, adds base_align to alloc size and 315 * re-allocates the memory. 316 * 317 * Return: 318 * Upon successful allocate, returns an aligned base address of the allocated 319 * memory. If this function is unable to allocate the amount of memory 320 * specified (for any reason) it returns NULL. 321 */ 322 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \ 323 paddr_aligned, align) \ 324 qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \ 325 paddr_aligned, align, __func__, __LINE__) 326 327 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned, 328 qdf_dma_addr_t *paddr_unaligned, 329 qdf_dma_addr_t *paddr_aligned, 330 uint32_t align, 331 const char *func, uint32_t line); 332 333 /** 334 * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory 335 * @osdev: OS device handle 336 * @size: Size to be allocated 337 * @vaddr_unaligned: Unaligned virtual address. 338 * @paddr_unaligned: Unaligned physical address. 339 * @paddr_aligned: Aligned physical address. 340 * @align: Base address alignment. 341 * @func: Function name of the call site. 342 * @line: Line number of the call site. 343 * 344 * Return: pointer of allocated memory or null if memory alloc fails. 345 */ 346 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \ 347 paddr_unaligned, paddr_aligned, \ 348 align) \ 349 qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \ 350 paddr_unaligned, paddr_aligned, \ 351 align, __func__, __LINE__) 352 353 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size, 354 void **vaddr_unaligned, 355 qdf_dma_addr_t *paddr_unaligned, 356 qdf_dma_addr_t *paddr_aligned, 357 uint32_t align, const char *func, 358 uint32_t line); 359 360 #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr) 361 362 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value); 363 364 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, 365 uint32_t num_bytes); 366 367 /** 368 * qdf_mem_set() - set (fill) memory with a specified byte value. 369 * @ptr: Pointer to memory that will be set 370 * @num_bytes: Number of bytes to be set 371 * @value: Byte set in memory 372 * 373 * WARNING: parameter @num_bytes and @value are swapped comparing with 374 * standard C function "memset", please ensure correct usage of this function! 375 * 376 * Return: None 377 */ 378 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value); 379 380 /** 381 * qdf_mem_zero() - zero out memory 382 * @ptr: pointer to memory that will be set to zero 383 * @num_bytes: number of bytes zero 384 * 385 * This function sets the memory location to all zeros, essentially clearing 386 * the memory. 387 * 388 * Return: None 389 */ 390 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes) 391 { 392 qdf_mem_set(ptr, num_bytes, 0); 393 } 394 395 /** 396 * qdf_mem_copy() - copy memory 397 * @dst_addr: Pointer to destination memory location (to copy to) 398 * @src_addr: Pointer to source memory location (to copy from) 399 * @num_bytes: Number of bytes to copy. 400 * 401 * Copy host memory from one location to another, similar to memcpy in 402 * standard C. Note this function does not specifically handle overlapping 403 * source and destination memory locations. Calling this function with 404 * overlapping source and destination memory locations will result in 405 * unpredictable results. Use qdf_mem_move() if the memory locations 406 * for the source and destination are overlapping (or could be overlapping!) 407 * 408 * Return: none 409 */ 410 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes); 411 412 /** 413 * qdf_mem_move() - move memory 414 * @dst_addr: pointer to destination memory location (to move to) 415 * @src_addr: pointer to source memory location (to move from) 416 * @num_bytes: number of bytes to move. 417 * 418 * Move host memory from one location to another, similar to memmove in 419 * standard C. Note this function *does* handle overlapping 420 * source and destination memory locations. 421 422 * Return: None 423 */ 424 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes); 425 426 /** 427 * qdf_mem_cmp() - memory compare 428 * @left: pointer to one location in memory to compare 429 * @right: pointer to second location in memory to compare 430 * @size: the number of bytes to compare 431 * 432 * Function to compare two pieces of memory, similar to memcmp function 433 * in standard C. 434 * 435 * Return: 436 * 0 -- equal 437 * < 0 -- *memory1 is less than *memory2 438 * > 0 -- *memory1 is bigger than *memory2 439 */ 440 int qdf_mem_cmp(const void *left, const void *right, size_t size); 441 442 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr); 443 444 /** 445 * qdf_mem_map_nbytes_single - Map memory for DMA 446 * @osdev: pomter OS device context 447 * @buf: pointer to memory to be dma mapped 448 * @dir: DMA map direction 449 * @nbytes: number of bytes to be mapped. 450 * @phy_addr: ponter to recive physical address. 451 * 452 * Return: success/failure 453 */ 454 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf, 455 qdf_dma_dir_t dir, int nbytes, 456 qdf_dma_addr_t *phy_addr) 457 { 458 #if defined(HIF_PCI) || defined(HIF_IPCI) 459 return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr); 460 #else 461 return 0; 462 #endif 463 } 464 465 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev, 466 qdf_dma_addr_t buf, 467 qdf_dma_dir_t dir, 468 int nbytes) 469 { 470 __qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes); 471 } 472 473 /** 474 * qdf_mem_unmap_nbytes_single() - un_map memory for DMA 475 * @osdev: pomter OS device context 476 * @phy_addr: physical address of memory to be dma unmapped 477 * @dir: DMA unmap direction 478 * @nbytes: number of bytes to be unmapped. 479 * 480 * Return: none 481 */ 482 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev, 483 qdf_dma_addr_t phy_addr, 484 qdf_dma_dir_t dir, 485 int nbytes) 486 { 487 #if defined(HIF_PCI) || defined(HIF_IPCI) 488 __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes); 489 #endif 490 } 491 492 /** 493 * qdf_mempool_init - Create and initialize memory pool 494 * @osdev: platform device object 495 * @pool_addr: address of the pool created 496 * @elem_cnt: no. of elements in pool 497 * @elem_size: size of each pool element in bytes 498 * @flags: flags 499 * Return: Handle to memory pool or NULL if allocation failed 500 */ 501 static inline int qdf_mempool_init(qdf_device_t osdev, 502 qdf_mempool_t *pool_addr, int elem_cnt, 503 size_t elem_size, uint32_t flags) 504 { 505 return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size, 506 flags); 507 } 508 509 /** 510 * qdf_mempool_destroy - Destroy memory pool 511 * @osdev: platform device object 512 * @Handle: to memory pool 513 * Return: none 514 */ 515 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool) 516 { 517 __qdf_mempool_destroy(osdev, pool); 518 } 519 520 /** 521 * qdf_mempool_alloc - Allocate an element memory pool 522 * @osdev: platform device object 523 * @Handle: to memory pool 524 * Return: Pointer to the allocated element or NULL if the pool is empty 525 */ 526 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool) 527 { 528 return (void *)__qdf_mempool_alloc(osdev, pool); 529 } 530 531 /** 532 * qdf_mempool_free - Free a memory pool element 533 * @osdev: Platform device object 534 * @pool: Handle to memory pool 535 * @buf: Element to be freed 536 * Return: none 537 */ 538 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool, 539 void *buf) 540 { 541 __qdf_mempool_free(osdev, pool, buf); 542 } 543 544 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, 545 qdf_dma_addr_t bus_addr, 546 qdf_size_t size, 547 __dma_data_direction direction); 548 549 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, 550 qdf_dma_addr_t bus_addr, 551 qdf_size_t size, 552 __dma_data_direction direction); 553 554 int qdf_mem_multi_page_link(qdf_device_t osdev, 555 struct qdf_mem_multi_page_t *pages, 556 uint32_t elem_size, uint32_t elem_count, uint8_t cacheable); 557 558 /** 559 * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count 560 * @size: number of bytes to increment by 561 * 562 * Return: None 563 */ 564 void qdf_mem_kmalloc_inc(qdf_size_t size); 565 566 /** 567 * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count 568 * @size: number of bytes to decrement by 569 * 570 * Return: None 571 */ 572 void qdf_mem_kmalloc_dec(qdf_size_t size); 573 574 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS 575 /** 576 * qdf_mem_skb_inc() - increment total skb allocation size 577 * @size: size to be added 578 * 579 * Return: none 580 */ 581 void qdf_mem_skb_inc(qdf_size_t size); 582 583 /** 584 * qdf_mem_skb_dec() - decrement total skb allocation size 585 * @size: size to be decremented 586 * 587 * Return: none 588 */ 589 void qdf_mem_skb_dec(qdf_size_t size); 590 591 #else 592 static inline void qdf_mem_skb_inc(qdf_size_t size) 593 { 594 } 595 596 static inline void qdf_mem_skb_dec(qdf_size_t size) 597 { 598 } 599 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */ 600 601 /** 602 * qdf_mem_map_table_alloc() - Allocate shared memory info structure 603 * @num: number of required storage 604 * 605 * Allocate mapping table for DMA memory allocation. This is needed for 606 * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled. 607 * 608 * Return: shared memory info storage table pointer 609 */ 610 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num) 611 { 612 qdf_mem_info_t *mem_info_arr; 613 614 mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0])); 615 return mem_info_arr; 616 } 617 618 /** 619 * qdf_update_mem_map_table() - Update DMA memory map info 620 * @osdev: Parent device instance 621 * @mem_info: Pointer to shared memory information 622 * @dma_addr: dma address 623 * @mem_size: memory size allocated 624 * 625 * Store DMA shared memory information 626 * 627 * Return: none 628 */ 629 static inline void qdf_update_mem_map_table(qdf_device_t osdev, 630 qdf_mem_info_t *mem_info, 631 qdf_dma_addr_t dma_addr, 632 uint32_t mem_size) 633 { 634 if (!mem_info) { 635 qdf_nofl_err("%s: NULL mem_info", __func__); 636 return; 637 } 638 639 __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size); 640 } 641 642 /** 643 * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status 644 * @osdev parent device instance 645 * 646 * Return: true if smmu s1 enabled, false if smmu s1 is bypassed 647 */ 648 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev) 649 { 650 return __qdf_mem_smmu_s1_enabled(osdev); 651 } 652 653 /** 654 * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address 655 * @osdev: Parent device instance 656 * @dma_addr: DMA/IOVA address 657 * 658 * Get actual physical address from dma_addr based on SMMU enablement status. 659 * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address 660 * (IOVA) otherwise returns physical address. So get SMMU physical address 661 * mapping from IOVA. 662 * 663 * Return: dmaable physical address 664 */ 665 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 666 qdf_dma_addr_t dma_addr) 667 { 668 return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 669 } 670 671 /** 672 * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table 673 * @dev: device instace 674 * @sgt: scatter gather table pointer 675 * @cpu_addr: HLOS virtual address 676 * @dma_addr: dma address 677 * @size: allocated memory size 678 * 679 * Return: physical address 680 */ 681 static inline int 682 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, 683 qdf_dma_addr_t dma_addr, size_t size) 684 { 685 return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 686 } 687 688 /** 689 * qdf_mem_free_sgtable() - Free a previously allocated sg table 690 * @sgt: the mapped sg table header 691 * 692 * Return: None 693 */ 694 static inline void 695 qdf_mem_free_sgtable(struct sg_table *sgt) 696 { 697 __qdf_os_mem_free_sgtable(sgt); 698 } 699 700 /** 701 * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements 702 * @sgt: scatter gather table pointer 703 * 704 * Return: None 705 */ 706 static inline void 707 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt) 708 { 709 __qdf_dma_get_sgtable_dma_addr(sgt); 710 } 711 712 /** 713 * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status. 714 * @osdev: Parent device instance 715 * @mem_info: Pointer to allocated memory information 716 * 717 * Get dma address based on SMMU enablement status. If SMMU Stage 1 718 * tranlation is enabled, DMA APIs return IO virtual address otherwise 719 * returns physical address. 720 * 721 * Return: dma address 722 */ 723 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev, 724 qdf_mem_info_t *mem_info) 725 { 726 return __qdf_mem_get_dma_addr(osdev, mem_info); 727 } 728 729 /** 730 * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct 731 * @osdev: Parent device instance 732 * @mem_info: Pointer to allocated memory information 733 * 734 * Based on smmu stage 1 translation enablement, return corresponding dma 735 * address storage pointer. 736 * 737 * Return: dma address storage pointer 738 */ 739 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, 740 qdf_mem_info_t *mem_info) 741 { 742 return __qdf_mem_get_dma_addr_ptr(osdev, mem_info); 743 } 744 745 746 /** 747 * qdf_mem_get_dma_size() - Return DMA memory size 748 * @osdev: parent device instance 749 * @mem_info: Pointer to allocated memory information 750 * 751 * Return: DMA memory size 752 */ 753 static inline uint32_t 754 qdf_mem_get_dma_size(qdf_device_t osdev, 755 qdf_mem_info_t *mem_info) 756 { 757 return __qdf_mem_get_dma_size(osdev, mem_info); 758 } 759 760 /** 761 * qdf_mem_set_dma_size() - Set DMA memory size 762 * @osdev: parent device instance 763 * @mem_info: Pointer to allocated memory information 764 * @mem_size: memory size allocated 765 * 766 * Return: none 767 */ 768 static inline void 769 qdf_mem_set_dma_size(qdf_device_t osdev, 770 qdf_mem_info_t *mem_info, 771 uint32_t mem_size) 772 { 773 __qdf_mem_set_dma_size(osdev, mem_info, mem_size); 774 } 775 776 /** 777 * qdf_mem_get_dma_size() - Return DMA physical address 778 * @osdev: parent device instance 779 * @mem_info: Pointer to allocated memory information 780 * 781 * Return: DMA physical address 782 */ 783 static inline qdf_dma_addr_t 784 qdf_mem_get_dma_pa(qdf_device_t osdev, 785 qdf_mem_info_t *mem_info) 786 { 787 return __qdf_mem_get_dma_pa(osdev, mem_info); 788 } 789 790 /** 791 * qdf_mem_set_dma_size() - Set DMA physical address 792 * @osdev: parent device instance 793 * @mem_info: Pointer to allocated memory information 794 * @dma_pa: DMA phsical address 795 * 796 * Return: none 797 */ 798 static inline void 799 qdf_mem_set_dma_pa(qdf_device_t osdev, 800 qdf_mem_info_t *mem_info, 801 qdf_dma_addr_t dma_pa) 802 { 803 __qdf_mem_set_dma_pa(osdev, mem_info, dma_pa); 804 } 805 806 /** 807 * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource 808 * @osdev: parent device instance 809 * @mem_info: Pointer to allocated memory information 810 * @size: size to be allocated 811 * 812 * Allocate DMA memory which will be shared with external kernel module. This 813 * information is needed for SMMU mapping. 814 * 815 * Return: 0 success 816 */ 817 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size); 818 819 /** 820 * qdf_mem_shared_mem_free() - Free shared memory 821 * @osdev: parent device instance 822 * @shared_mem: shared memory information storage 823 * 824 * Free DMA shared memory resource 825 * 826 * Return: None 827 */ 828 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev, 829 qdf_shared_mem_t *shared_mem) 830 { 831 if (!shared_mem) { 832 qdf_nofl_err("%s: NULL shared mem struct passed", 833 __func__); 834 return; 835 } 836 837 if (shared_mem->vaddr) { 838 qdf_mem_free_consistent(osdev, osdev->dev, 839 qdf_mem_get_dma_size(osdev, 840 &shared_mem->mem_info), 841 shared_mem->vaddr, 842 qdf_mem_get_dma_addr(osdev, 843 &shared_mem->mem_info), 844 qdf_get_dma_mem_context(shared_mem, 845 memctx)); 846 } 847 qdf_mem_free_sgtable(&shared_mem->sgtable); 848 qdf_mem_free(shared_mem); 849 } 850 851 /** 852 * qdf_dma_mem_stats_read() - Return the DMA memory allocated in 853 * host driver 854 * 855 * Return: None 856 */ 857 int32_t qdf_dma_mem_stats_read(void); 858 859 /** 860 * qdf_heap_mem_stats_read() - Return the heap memory allocated 861 * in host driver 862 * 863 * Return: None 864 */ 865 int32_t qdf_heap_mem_stats_read(void); 866 867 /** 868 * qdf_skb_mem_stats_read() - Return the SKB memory allocated in 869 * host driver 870 * 871 * Return: None 872 */ 873 int32_t qdf_skb_mem_stats_read(void); 874 875 #endif /* __QDF_MEMORY_H */ 876