1 /* 2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_mem 21 * QCA driver framework (QDF) memory management APIs 22 */ 23 24 #if !defined(__QDF_MEMORY_H) 25 #define __QDF_MEMORY_H 26 27 /* Include Files */ 28 #include <qdf_types.h> 29 #include <i_qdf_mem.h> 30 31 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz 32 33 /** 34 * qdf_align() - align to the given size. 35 * @a: input that needs to be aligned. 36 * @align_size: boundary on which 'a' has to be alinged. 37 * 38 * Return: aligned value. 39 */ 40 #define qdf_align(a, align_size) __qdf_align(a, align_size) 41 42 /** 43 * struct qdf_mem_dma_page_t - Allocated dmaable page 44 * @page_v_addr_start: Page start virtual address 45 * @page_v_addr_end: Page end virtual address 46 * @page_p_addr: Page start physical address 47 */ 48 struct qdf_mem_dma_page_t { 49 char *page_v_addr_start; 50 char *page_v_addr_end; 51 qdf_dma_addr_t page_p_addr; 52 }; 53 54 /** 55 * struct qdf_mem_multi_page_t - multiple page allocation information storage 56 * @num_element_per_page: Number of element in single page 57 * @num_pages: Number of allocation needed pages 58 * @dma_pages: page information storage in case of coherent memory 59 * @cacheable_pages: page information storage in case of cacheable memory 60 */ 61 struct qdf_mem_multi_page_t { 62 uint16_t num_element_per_page; 63 uint16_t num_pages; 64 struct qdf_mem_dma_page_t *dma_pages; 65 void **cacheable_pages; 66 }; 67 68 69 /* Preprocessor definitions and constants */ 70 71 typedef __qdf_mempool_t qdf_mempool_t; 72 73 /** 74 * qdf_mem_init() - Initialize QDF memory module 75 * 76 * Return: None 77 * 78 */ 79 void qdf_mem_init(void); 80 81 /** 82 * qdf_mem_exit() - Exit QDF memory module 83 * 84 * Return: None 85 * 86 */ 87 void qdf_mem_exit(void); 88 89 #ifdef MEMORY_DEBUG 90 /** 91 * qdf_mem_malloc_debug() - debug version of QDF memory allocation API 92 * @size: Number of bytes of memory to allocate. 93 * @file: File name of the call site 94 * @line: Line number of the call site 95 * @caller: Address of the caller function 96 * 97 * This function will dynamicallly allocate the specified number of bytes of 98 * memory and add it to the qdf tracking list to check for memory leaks and 99 * corruptions 100 * 101 * Return: A valid memory location on success, or NULL on failure 102 */ 103 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line, 104 void *caller); 105 106 #define qdf_mem_malloc(size) \ 107 qdf_mem_malloc_debug(size, __FILE__, __LINE__, QDF_RET_IP) 108 109 /** 110 * qdf_mem_free_debug() - debug version of qdf_mem_free 111 * @ptr: Pointer to the starting address of the memory to be freed. 112 * 113 * This function will free the memory pointed to by 'ptr'. It also checks for 114 * memory corruption, underrun, overrun, double free, domain mismatch, etc. 115 * 116 * Return: none 117 */ 118 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line); 119 120 #define qdf_mem_free(ptr) \ 121 qdf_mem_free_debug(ptr, __FILE__, __LINE__) 122 123 /** 124 * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty 125 * 126 * Call this to ensure there are no active memory allocations being tracked 127 * against the current debug domain. For example, one should call this function 128 * immediately before a call to qdf_debug_domain_set() as a memory leak 129 * detection mechanism. 130 * 131 * e.g. 132 * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE); 133 * 134 * ... 135 * 136 * // memory is allocated and freed 137 * 138 * ... 139 * 140 * // before transitioning back to inactive state, 141 * // make sure all active memory has been freed 142 * qdf_mem_check_for_leaks(); 143 * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT); 144 * 145 * ... 146 * 147 * // also, before program exit, make sure init time memory is freed 148 * qdf_mem_check_for_leaks(); 149 * exit(); 150 * 151 * Return: None 152 */ 153 void qdf_mem_check_for_leaks(void); 154 155 /** 156 * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory 157 * @osdev: OS device handle 158 * @dev: Pointer to device handle 159 * @size: Size to be allocated 160 * @paddr: Physical address 161 * @file: file name of the call site 162 * @line: line numbe rof the call site 163 * @caller: Address of the caller function 164 * 165 * Return: pointer of allocated memory or null if memory alloc fails 166 */ 167 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, 168 qdf_size_t size, qdf_dma_addr_t *paddr, 169 const char *file, uint32_t line, 170 void *caller); 171 172 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \ 173 qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \ 174 __FILE__, __LINE__, QDF_RET_IP) 175 176 /** 177 * qdf_mem_free_consistent_debug() - free consistent qdf memory 178 * @osdev: OS device handle 179 * @size: Size to be allocated 180 * @vaddr: virtual address 181 * @paddr: Physical address 182 * @memctx: Pointer to DMA context 183 * @file: file name of the call site 184 * @line: line numbe rof the call site 185 * 186 * Return: none 187 */ 188 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, 189 qdf_size_t size, void *vaddr, 190 qdf_dma_addr_t paddr, 191 qdf_dma_context_t memctx, 192 const char *file, uint32_t line); 193 194 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \ 195 qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \ 196 __FILE__, __LINE__) 197 #else 198 void *qdf_mem_malloc(qdf_size_t size); 199 200 /** 201 * qdf_mem_free() - free QDF memory 202 * @ptr: Pointer to the starting address of the memory to be freed. 203 * 204 * Return: None 205 */ 206 void qdf_mem_free(void *ptr); 207 208 static inline void qdf_mem_check_for_leaks(void) { } 209 210 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, 211 qdf_size_t size, qdf_dma_addr_t *paddr); 212 213 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, 214 qdf_size_t size, void *vaddr, 215 qdf_dma_addr_t paddr, qdf_dma_context_t memctx); 216 217 #endif /* MEMORY_DEBUG */ 218 219 void *qdf_mem_alloc_outline(qdf_device_t osdev, qdf_size_t size); 220 221 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value); 222 223 void qdf_mem_zero(void *ptr, uint32_t num_bytes); 224 225 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes); 226 227 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes); 228 229 void qdf_mem_free_outline(void *buf); 230 231 void qdf_mem_zero_outline(void *buf, qdf_size_t size); 232 233 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr); 234 235 /** 236 * qdf_mem_cmp() - memory compare 237 * @memory1: pointer to one location in memory to compare. 238 * @memory2: pointer to second location in memory to compare. 239 * @num_bytes: the number of bytes to compare. 240 * 241 * Function to compare two pieces of memory, similar to memcmp function 242 * in standard C. 243 * Return: 244 * int32_t - returns an int value that tells if the memory 245 * locations are equal or not equal. 246 * 0 -- equal 247 * < 0 -- *memory1 is less than *memory2 248 * > 0 -- *memory1 is bigger than *memory2 249 */ 250 static inline int32_t qdf_mem_cmp(const void *memory1, const void *memory2, 251 uint32_t num_bytes) 252 { 253 return __qdf_mem_cmp(memory1, memory2, num_bytes); 254 } 255 256 /** 257 * qdf_mem_map_nbytes_single - Map memory for DMA 258 * @osdev: pomter OS device context 259 * @buf: pointer to memory to be dma mapped 260 * @dir: DMA map direction 261 * @nbytes: number of bytes to be mapped. 262 * @phy_addr: ponter to recive physical address. 263 * 264 * Return: success/failure 265 */ 266 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf, 267 qdf_dma_dir_t dir, int nbytes, 268 qdf_dma_addr_t *phy_addr) 269 { 270 #if defined(HIF_PCI) 271 return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr); 272 #else 273 return 0; 274 #endif 275 } 276 277 /** 278 * qdf_mem_unmap_nbytes_single() - un_map memory for DMA 279 * @osdev: pomter OS device context 280 * @phy_addr: physical address of memory to be dma unmapped 281 * @dir: DMA unmap direction 282 * @nbytes: number of bytes to be unmapped. 283 * 284 * Return: none 285 */ 286 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev, 287 qdf_dma_addr_t phy_addr, 288 qdf_dma_dir_t dir, 289 int nbytes) 290 { 291 #if defined(HIF_PCI) 292 __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes); 293 #endif 294 } 295 296 /** 297 * qdf_mempool_init - Create and initialize memory pool 298 * @osdev: platform device object 299 * @pool_addr: address of the pool created 300 * @elem_cnt: no. of elements in pool 301 * @elem_size: size of each pool element in bytes 302 * @flags: flags 303 * Return: Handle to memory pool or NULL if allocation failed 304 */ 305 static inline int qdf_mempool_init(qdf_device_t osdev, 306 qdf_mempool_t *pool_addr, int elem_cnt, 307 size_t elem_size, uint32_t flags) 308 { 309 return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size, 310 flags); 311 } 312 313 /** 314 * qdf_mempool_destroy - Destroy memory pool 315 * @osdev: platform device object 316 * @Handle: to memory pool 317 * Return: none 318 */ 319 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool) 320 { 321 __qdf_mempool_destroy(osdev, pool); 322 } 323 324 /** 325 * qdf_mempool_alloc - Allocate an element memory pool 326 * @osdev: platform device object 327 * @Handle: to memory pool 328 * Return: Pointer to the allocated element or NULL if the pool is empty 329 */ 330 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool) 331 { 332 return (void *)__qdf_mempool_alloc(osdev, pool); 333 } 334 335 /** 336 * qdf_mempool_free - Free a memory pool element 337 * @osdev: Platform device object 338 * @pool: Handle to memory pool 339 * @buf: Element to be freed 340 * Return: none 341 */ 342 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool, 343 void *buf) 344 { 345 __qdf_mempool_free(osdev, pool, buf); 346 } 347 348 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, 349 qdf_dma_addr_t bus_addr, 350 qdf_size_t size, 351 __dma_data_direction direction); 352 353 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, 354 qdf_dma_addr_t bus_addr, 355 qdf_size_t size, 356 __dma_data_direction direction); 357 358 void qdf_mem_multi_pages_alloc(qdf_device_t osdev, 359 struct qdf_mem_multi_page_t *pages, 360 size_t element_size, uint16_t element_num, 361 qdf_dma_context_t memctxt, bool cacheable); 362 void qdf_mem_multi_pages_free(qdf_device_t osdev, 363 struct qdf_mem_multi_page_t *pages, 364 qdf_dma_context_t memctxt, bool cacheable); 365 int qdf_mem_multi_page_link(qdf_device_t osdev, 366 struct qdf_mem_multi_page_t *pages, 367 uint32_t elem_size, uint32_t elem_count, uint8_t cacheable); 368 /** 369 * qdf_mem_skb_inc() - increment total skb allocation size 370 * @size: size to be added 371 * 372 * Return: none 373 */ 374 void qdf_mem_skb_inc(qdf_size_t size); 375 376 /** 377 * qdf_mem_skb_dec() - decrement total skb allocation size 378 * @size: size to be decremented 379 * 380 * Return: none 381 */ 382 void qdf_mem_skb_dec(qdf_size_t size); 383 384 /** 385 * qdf_mem_map_table_alloc() - Allocate shared memory info structure 386 * @num: number of required storage 387 * 388 * Allocate mapping table for DMA memory allocation. This is needed for 389 * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled. 390 * 391 * Return: shared memory info storage table pointer 392 */ 393 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num) 394 { 395 qdf_mem_info_t *mem_info_arr; 396 397 mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0])); 398 return mem_info_arr; 399 } 400 401 /** 402 * qdf_update_mem_map_table() - Update DMA memory map info 403 * @osdev: Parent device instance 404 * @mem_info: Pointer to shared memory information 405 * @dma_addr: dma address 406 * @mem_size: memory size allocated 407 * 408 * Store DMA shared memory information 409 * 410 * Return: none 411 */ 412 static inline void qdf_update_mem_map_table(qdf_device_t osdev, 413 qdf_mem_info_t *mem_info, 414 qdf_dma_addr_t dma_addr, 415 uint32_t mem_size) 416 { 417 if (!mem_info) { 418 __qdf_print("%s: NULL mem_info\n", __func__); 419 return; 420 } 421 422 __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size); 423 } 424 425 /** 426 * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status 427 * @osdev parent device instance 428 * 429 * Return: true if smmu s1 enabled, false if smmu s1 is bypassed 430 */ 431 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev) 432 { 433 return __qdf_mem_smmu_s1_enabled(osdev); 434 } 435 436 /** 437 * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address 438 * @osdev: Parent device instance 439 * @dma_addr: DMA/IOVA address 440 * 441 * Get actual physical address from dma_addr based on SMMU enablement status. 442 * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address 443 * (IOVA) otherwise returns physical address. So get SMMU physical address 444 * mapping from IOVA. 445 * 446 * Return: dmaable physical address 447 */ 448 static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev, 449 qdf_dma_addr_t dma_addr) 450 { 451 return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 452 } 453 454 /** 455 * qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table 456 * @dev: device instace 457 * @sgt: scatter gather table pointer 458 * @cpu_addr: HLOS virtual address 459 * @dma_addr: dma address 460 * @size: allocated memory size 461 * 462 * Return: physical address 463 */ 464 static inline int 465 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr, 466 qdf_dma_addr_t dma_addr, size_t size) 467 { 468 return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 469 } 470 471 /** 472 * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status. 473 * @osdev: Parent device instance 474 * @mem_info: Pointer to allocated memory information 475 * 476 * Get dma address based on SMMU enablement status. If SMMU Stage 1 477 * tranlation is enabled, DMA APIs return IO virtual address otherwise 478 * returns physical address. 479 * 480 * Return: dma address 481 */ 482 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev, 483 qdf_mem_info_t *mem_info) 484 { 485 return __qdf_mem_get_dma_addr(osdev, mem_info); 486 } 487 488 /** 489 * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct 490 * @osdev: Parent device instance 491 * @mem_info: Pointer to allocated memory information 492 * 493 * Based on smmu stage 1 translation enablement, return corresponding dma 494 * address storage pointer. 495 * 496 * Return: dma address storage pointer 497 */ 498 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev, 499 qdf_mem_info_t *mem_info) 500 { 501 return __qdf_mem_get_dma_addr_ptr(osdev, mem_info); 502 } 503 504 505 /** 506 * qdf_mem_get_dma_size() - Return DMA memory size 507 * @osdev: parent device instance 508 * @mem_info: Pointer to allocated memory information 509 * 510 * Return: DMA memory size 511 */ 512 static inline uint32_t 513 qdf_mem_get_dma_size(qdf_device_t osdev, 514 qdf_mem_info_t *mem_info) 515 { 516 return __qdf_mem_get_dma_size(osdev, mem_info); 517 } 518 519 /** 520 * qdf_mem_set_dma_size() - Set DMA memory size 521 * @osdev: parent device instance 522 * @mem_info: Pointer to allocated memory information 523 * @mem_size: memory size allocated 524 * 525 * Return: none 526 */ 527 static inline void 528 qdf_mem_set_dma_size(qdf_device_t osdev, 529 qdf_mem_info_t *mem_info, 530 uint32_t mem_size) 531 { 532 __qdf_mem_set_dma_size(osdev, mem_info, mem_size); 533 } 534 535 /** 536 * qdf_mem_get_dma_size() - Return DMA physical address 537 * @osdev: parent device instance 538 * @mem_info: Pointer to allocated memory information 539 * 540 * Return: DMA physical address 541 */ 542 static inline qdf_dma_addr_t 543 qdf_mem_get_dma_pa(qdf_device_t osdev, 544 qdf_mem_info_t *mem_info) 545 { 546 return __qdf_mem_get_dma_pa(osdev, mem_info); 547 } 548 549 /** 550 * qdf_mem_set_dma_size() - Set DMA physical address 551 * @osdev: parent device instance 552 * @mem_info: Pointer to allocated memory information 553 * @dma_pa: DMA phsical address 554 * 555 * Return: none 556 */ 557 static inline void 558 qdf_mem_set_dma_pa(qdf_device_t osdev, 559 qdf_mem_info_t *mem_info, 560 qdf_dma_addr_t dma_pa) 561 { 562 __qdf_mem_set_dma_pa(osdev, mem_info, dma_pa); 563 } 564 565 /** 566 * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource 567 * @osdev: parent device instance 568 * @mem_info: Pointer to allocated memory information 569 * @size: size to be allocated 570 * 571 * Allocate DMA memory which will be shared with external kernel module. This 572 * information is needed for SMMU mapping. 573 * 574 * Return: 0 suceess 575 */ 576 static inline qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, 577 uint32_t size) 578 { 579 qdf_shared_mem_t *shared_mem; 580 581 shared_mem = qdf_mem_malloc(sizeof(*shared_mem)); 582 if (!shared_mem) { 583 __qdf_print("%s: Unable to allocate memory for shared resource struct\n", 584 __func__); 585 return NULL; 586 } 587 588 shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev, 589 size, qdf_mem_get_dma_addr_ptr(osdev, 590 &shared_mem->mem_info)); 591 if (!shared_mem->vaddr) { 592 __qdf_print("%s; Unable to allocate DMA memory for shared resource\n", 593 __func__); 594 qdf_mem_free(shared_mem); 595 return NULL; 596 } 597 598 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 599 qdf_mem_zero(shared_mem->vaddr, 600 qdf_mem_get_dma_size(osdev, &shared_mem->mem_info)); 601 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, 602 qdf_mem_paddr_from_dmaaddr(osdev, 603 qdf_mem_get_dma_addr(osdev, 604 &shared_mem->mem_info))); 605 qdf_mem_dma_get_sgtable(osdev->dev, 606 (void *)&shared_mem->sgtable, 607 shared_mem->vaddr, 608 qdf_mem_get_dma_addr(osdev, 609 &shared_mem->mem_info), 610 qdf_mem_get_dma_size(osdev, 611 &shared_mem->mem_info)); 612 613 shared_mem->sgtable.sgl->dma_address = 614 qdf_mem_get_dma_pa(osdev, &shared_mem->mem_info); 615 616 return shared_mem; 617 } 618 619 /** 620 * qdf_mem_shared_mem_free() - Free shared memory 621 * @osdev: parent device instance 622 * @shared_mem: shared memory information storage 623 * 624 * Free DMA shared memory resource 625 * 626 * Return: None 627 */ 628 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev, 629 qdf_shared_mem_t *shared_mem) 630 { 631 if (!shared_mem) { 632 __qdf_print("%s: NULL shared mem struct passed\n", 633 __func__); 634 return; 635 } 636 637 if (shared_mem->vaddr) { 638 qdf_mem_free_consistent(osdev, osdev->dev, 639 qdf_mem_get_dma_size(osdev, 640 &shared_mem->mem_info), 641 shared_mem->vaddr, 642 qdf_mem_get_dma_addr(osdev, 643 &shared_mem->mem_info), 644 qdf_get_dma_mem_context(shared_mem, 645 memctx)); 646 } 647 qdf_mem_free(shared_mem); 648 } 649 650 #endif /* __QDF_MEMORY_H */ 651