1 /* 2 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_mem 21 * This file provides OS dependent memory management APIs 22 */ 23 24 #include "qdf_debugfs.h" 25 #include "qdf_mem.h" 26 #include "qdf_nbuf.h" 27 #include "qdf_lock.h" 28 #include "qdf_mc_timer.h" 29 #include "qdf_module.h" 30 #include <qdf_trace.h> 31 #include "qdf_atomic.h" 32 #include "qdf_str.h" 33 #include "qdf_talloc.h" 34 #include <linux/debugfs.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 38 #if defined(CONFIG_CNSS) 39 #include <net/cnss.h> 40 #endif 41 42 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC 43 #include <net/cnss_prealloc.h> 44 #endif 45 46 #ifdef MEMORY_DEBUG 47 #include "qdf_debug_domain.h" 48 #include <qdf_list.h> 49 50 /* Preprocessor Definitions and Constants */ 51 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */ 52 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */ 53 #define QDF_DEBUG_STRING_SIZE 512 54 55 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT]; 56 static qdf_spinlock_t qdf_mem_list_lock; 57 58 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT]; 59 static qdf_spinlock_t qdf_mem_dma_list_lock; 60 61 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain) 62 { 63 return &qdf_mem_domains[domain]; 64 } 65 66 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain) 67 { 68 return &qdf_mem_dma_domains[domain]; 69 } 70 71 /** 72 * struct qdf_mem_header - memory object to dubug 73 * @node: node to the list 74 * @domain: the active memory domain at time of allocation 75 * @freed: flag set during free, used to detect double frees 76 * Use uint8_t so we can detect corruption 77 * @func: name of the function the allocation was made from 78 * @line: line number of the file the allocation was made from 79 * @size: size of the allocation in bytes 80 * @caller: Caller of the function for which memory is allocated 81 * @header: a known value, used to detect out-of-bounds access 82 * @time: timestamp at which allocation was made 83 */ 84 struct qdf_mem_header { 85 qdf_list_node_t node; 86 enum qdf_debug_domain domain; 87 uint8_t freed; 88 char func[QDF_MEM_FUNC_NAME_SIZE]; 89 uint32_t line; 90 uint32_t size; 91 void *caller; 92 uint64_t header; 93 uint64_t time; 94 }; 95 96 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768; 97 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687; 98 99 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr) 100 { 101 return (struct qdf_mem_header *)ptr - 1; 102 } 103 104 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr, 105 qdf_size_t size) 106 { 107 return (struct qdf_mem_header *) ((uint8_t *) ptr + size); 108 } 109 110 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header) 111 { 112 return (uint64_t *)((void *)(header + 1) + header->size); 113 } 114 115 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header) 116 { 117 return (void *)(header + 1); 118 } 119 120 /* number of bytes needed for the qdf memory debug information */ 121 #define QDF_MEM_DEBUG_SIZE \ 122 (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER)) 123 124 /* number of bytes needed for the qdf dma memory debug information */ 125 #define QDF_DMA_MEM_DEBUG_SIZE \ 126 (sizeof(struct qdf_mem_header)) 127 128 static void qdf_mem_trailer_init(struct qdf_mem_header *header) 129 { 130 QDF_BUG(header); 131 if (!header) 132 return; 133 *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER; 134 } 135 136 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size, 137 const char *func, uint32_t line, void *caller) 138 { 139 QDF_BUG(header); 140 if (!header) 141 return; 142 143 header->domain = qdf_debug_domain_get(); 144 header->freed = false; 145 146 qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE); 147 148 header->line = line; 149 header->size = size; 150 header->caller = caller; 151 header->header = WLAN_MEM_HEADER; 152 header->time = qdf_get_log_timestamp(); 153 } 154 155 enum qdf_mem_validation_bitmap { 156 QDF_MEM_BAD_HEADER = 1 << 0, 157 QDF_MEM_BAD_TRAILER = 1 << 1, 158 QDF_MEM_BAD_SIZE = 1 << 2, 159 QDF_MEM_DOUBLE_FREE = 1 << 3, 160 QDF_MEM_BAD_FREED = 1 << 4, 161 QDF_MEM_BAD_NODE = 1 << 5, 162 QDF_MEM_BAD_DOMAIN = 1 << 6, 163 QDF_MEM_WRONG_DOMAIN = 1 << 7, 164 }; 165 166 static enum qdf_mem_validation_bitmap 167 qdf_mem_trailer_validate(struct qdf_mem_header *header) 168 { 169 enum qdf_mem_validation_bitmap error_bitmap = 0; 170 171 if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER) 172 error_bitmap |= QDF_MEM_BAD_TRAILER; 173 return error_bitmap; 174 } 175 176 static enum qdf_mem_validation_bitmap 177 qdf_mem_header_validate(struct qdf_mem_header *header, 178 enum qdf_debug_domain domain) 179 { 180 enum qdf_mem_validation_bitmap error_bitmap = 0; 181 182 if (header->header != WLAN_MEM_HEADER) 183 error_bitmap |= QDF_MEM_BAD_HEADER; 184 185 if (header->size > QDF_MEM_MAX_MALLOC) 186 error_bitmap |= QDF_MEM_BAD_SIZE; 187 188 if (header->freed == true) 189 error_bitmap |= QDF_MEM_DOUBLE_FREE; 190 else if (header->freed) 191 error_bitmap |= QDF_MEM_BAD_FREED; 192 193 if (!qdf_list_node_in_any_list(&header->node)) 194 error_bitmap |= QDF_MEM_BAD_NODE; 195 196 if (header->domain < QDF_DEBUG_DOMAIN_INIT || 197 header->domain >= QDF_DEBUG_DOMAIN_COUNT) 198 error_bitmap |= QDF_MEM_BAD_DOMAIN; 199 else if (header->domain != domain) 200 error_bitmap |= QDF_MEM_WRONG_DOMAIN; 201 202 return error_bitmap; 203 } 204 205 static void 206 qdf_mem_header_assert_valid(struct qdf_mem_header *header, 207 enum qdf_debug_domain current_domain, 208 enum qdf_mem_validation_bitmap error_bitmap, 209 const char *func, 210 uint32_t line) 211 { 212 if (!error_bitmap) 213 return; 214 215 if (error_bitmap & QDF_MEM_BAD_HEADER) 216 qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)", 217 header->header, WLAN_MEM_HEADER); 218 219 if (error_bitmap & QDF_MEM_BAD_SIZE) 220 qdf_err("Corrupted memory size %u (expected < %d)", 221 header->size, QDF_MEM_MAX_MALLOC); 222 223 if (error_bitmap & QDF_MEM_BAD_TRAILER) 224 qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)", 225 *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER); 226 227 if (error_bitmap & QDF_MEM_DOUBLE_FREE) 228 qdf_err("Memory has previously been freed"); 229 230 if (error_bitmap & QDF_MEM_BAD_FREED) 231 qdf_err("Corrupted memory freed flag 0x%x", header->freed); 232 233 if (error_bitmap & QDF_MEM_BAD_NODE) 234 qdf_err("Corrupted memory header node or double free"); 235 236 if (error_bitmap & QDF_MEM_BAD_DOMAIN) 237 qdf_err("Corrupted memory domain 0x%x", header->domain); 238 239 if (error_bitmap & QDF_MEM_WRONG_DOMAIN) 240 qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)", 241 qdf_debug_domain_name(header->domain), header->domain, 242 qdf_debug_domain_name(current_domain), current_domain); 243 244 QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line); 245 } 246 #endif /* MEMORY_DEBUG */ 247 248 u_int8_t prealloc_disabled = 1; 249 qdf_declare_param(prealloc_disabled, byte); 250 qdf_export_symbol(prealloc_disabled); 251 252 #if defined WLAN_DEBUGFS 253 254 /* Debugfs root directory for qdf_mem */ 255 static struct dentry *qdf_mem_debugfs_root; 256 257 /** 258 * struct __qdf_mem_stat - qdf memory statistics 259 * @kmalloc: total kmalloc allocations 260 * @dma: total dma allocations 261 * @skb: total skb allocations 262 */ 263 static struct __qdf_mem_stat { 264 qdf_atomic_t kmalloc; 265 qdf_atomic_t dma; 266 qdf_atomic_t skb; 267 } qdf_mem_stat; 268 269 void qdf_mem_kmalloc_inc(qdf_size_t size) 270 { 271 qdf_atomic_add(size, &qdf_mem_stat.kmalloc); 272 } 273 274 static void qdf_mem_dma_inc(qdf_size_t size) 275 { 276 qdf_atomic_add(size, &qdf_mem_stat.dma); 277 } 278 279 void qdf_mem_skb_inc(qdf_size_t size) 280 { 281 qdf_atomic_add(size, &qdf_mem_stat.skb); 282 } 283 284 void qdf_mem_kmalloc_dec(qdf_size_t size) 285 { 286 qdf_atomic_sub(size, &qdf_mem_stat.kmalloc); 287 } 288 289 static inline void qdf_mem_dma_dec(qdf_size_t size) 290 { 291 qdf_atomic_sub(size, &qdf_mem_stat.dma); 292 } 293 294 void qdf_mem_skb_dec(qdf_size_t size) 295 { 296 qdf_atomic_sub(size, &qdf_mem_stat.skb); 297 } 298 299 #ifdef MEMORY_DEBUG 300 static int qdf_err_printer(void *priv, const char *fmt, ...) 301 { 302 va_list args; 303 304 va_start(args, fmt); 305 QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args); 306 va_end(args); 307 308 return 0; 309 } 310 311 static int seq_printf_printer(void *priv, const char *fmt, ...) 312 { 313 struct seq_file *file = priv; 314 va_list args; 315 316 va_start(args, fmt); 317 seq_vprintf(file, fmt, args); 318 seq_puts(file, "\n"); 319 va_end(args); 320 321 return 0; 322 } 323 324 /** 325 * struct __qdf_mem_info - memory statistics 326 * @func: the function which allocated memory 327 * @line: the line at which allocation happened 328 * @size: the size of allocation 329 * @caller: Address of the caller function 330 * @count: how many allocations of same type 331 * @time: timestamp at which allocation happened 332 */ 333 struct __qdf_mem_info { 334 char func[QDF_MEM_FUNC_NAME_SIZE]; 335 uint32_t line; 336 uint32_t size; 337 void *caller; 338 uint32_t count; 339 uint64_t time; 340 }; 341 342 /* 343 * The table depth defines the de-duplication proximity scope. 344 * A deeper table takes more time, so choose any optimum value. 345 */ 346 #define QDF_MEM_STAT_TABLE_SIZE 8 347 348 /** 349 * qdf_mem_domain_print_header() - memory domain header print logic 350 * @print: the print adapter function 351 * @print_priv: the private data to be consumed by @print 352 * 353 * Return: None 354 */ 355 static void qdf_mem_domain_print_header(qdf_abstract_print print, 356 void *print_priv) 357 { 358 print(print_priv, 359 "--------------------------------------------------------------"); 360 print(print_priv, 361 " count size total filename caller timestamp"); 362 print(print_priv, 363 "--------------------------------------------------------------"); 364 } 365 366 /** 367 * qdf_mem_meta_table_print() - memory metadata table print logic 368 * @table: the memory metadata table to print 369 * @print: the print adapter function 370 * @print_priv: the private data to be consumed by @print 371 * 372 * Return: None 373 */ 374 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table, 375 qdf_abstract_print print, 376 void *print_priv) 377 { 378 int i; 379 char debug_str[QDF_DEBUG_STRING_SIZE]; 380 size_t len = 0; 381 char *debug_prefix = "WLAN_BUG_RCA: memory leak detected"; 382 383 len += qdf_scnprintf(debug_str, sizeof(debug_str) - len, 384 "%s", debug_prefix); 385 386 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { 387 if (!table[i].count) 388 break; 389 390 print(print_priv, 391 "%6u x %5u = %7uB @ %s:%u %pS %llu", 392 table[i].count, 393 table[i].size, 394 table[i].count * table[i].size, 395 table[i].func, 396 table[i].line, table[i].caller, 397 table[i].time); 398 len += qdf_scnprintf(debug_str + len, 399 sizeof(debug_str) - len, 400 " @ %s:%u %pS", 401 table[i].func, 402 table[i].line, 403 table[i].caller); 404 } 405 print(print_priv, "%s", debug_str); 406 } 407 408 /** 409 * qdf_mem_meta_table_insert() - insert memory metadata into the given table 410 * @table: the memory metadata table to insert into 411 * @meta: the memory metadata to insert 412 * 413 * Return: true if the table is full after inserting, false otherwise 414 */ 415 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table, 416 struct qdf_mem_header *meta) 417 { 418 int i; 419 420 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { 421 if (!table[i].count) { 422 qdf_str_lcopy(table[i].func, meta->func, 423 QDF_MEM_FUNC_NAME_SIZE); 424 table[i].line = meta->line; 425 table[i].size = meta->size; 426 table[i].count = 1; 427 table[i].caller = meta->caller; 428 table[i].time = meta->time; 429 break; 430 } 431 432 if (qdf_str_eq(table[i].func, meta->func) && 433 table[i].line == meta->line && 434 table[i].size == meta->size && 435 table[i].caller == meta->caller) { 436 table[i].count++; 437 break; 438 } 439 } 440 441 /* return true if the table is now full */ 442 return i >= QDF_MEM_STAT_TABLE_SIZE - 1; 443 } 444 445 /** 446 * qdf_mem_domain_print() - output agnostic memory domain print logic 447 * @domain: the memory domain to print 448 * @print: the print adapter function 449 * @print_priv: the private data to be consumed by @print 450 * 451 * Return: None 452 */ 453 static void qdf_mem_domain_print(qdf_list_t *domain, 454 qdf_abstract_print print, 455 void *print_priv) 456 { 457 QDF_STATUS status; 458 struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE]; 459 qdf_list_node_t *node; 460 461 qdf_mem_zero(table, sizeof(table)); 462 qdf_mem_domain_print_header(print, print_priv); 463 464 /* hold lock while inserting to avoid use-after free of the metadata */ 465 qdf_spin_lock(&qdf_mem_list_lock); 466 status = qdf_list_peek_front(domain, &node); 467 while (QDF_IS_STATUS_SUCCESS(status)) { 468 struct qdf_mem_header *meta = (struct qdf_mem_header *)node; 469 bool is_full = qdf_mem_meta_table_insert(table, meta); 470 471 qdf_spin_unlock(&qdf_mem_list_lock); 472 473 if (is_full) { 474 qdf_mem_meta_table_print(table, print, print_priv); 475 qdf_mem_zero(table, sizeof(table)); 476 } 477 478 qdf_spin_lock(&qdf_mem_list_lock); 479 status = qdf_list_peek_next(domain, node, &node); 480 } 481 qdf_spin_unlock(&qdf_mem_list_lock); 482 483 qdf_mem_meta_table_print(table, print, print_priv); 484 } 485 486 /** 487 * qdf_mem_seq_start() - sequential callback to start 488 * @seq: seq_file handle 489 * @pos: The start position of the sequence 490 * 491 * Return: iterator pointer, or NULL if iteration is complete 492 */ 493 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos) 494 { 495 enum qdf_debug_domain domain = *pos; 496 497 if (!qdf_debug_domain_valid(domain)) 498 return NULL; 499 500 /* just use the current position as our iterator */ 501 return pos; 502 } 503 504 /** 505 * qdf_mem_seq_next() - next sequential callback 506 * @seq: seq_file handle 507 * @v: the current iterator 508 * @pos: the current position 509 * 510 * Get the next node and release previous node. 511 * 512 * Return: iterator pointer, or NULL if iteration is complete 513 */ 514 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos) 515 { 516 ++*pos; 517 518 return qdf_mem_seq_start(seq, pos); 519 } 520 521 /** 522 * qdf_mem_seq_stop() - stop sequential callback 523 * @seq: seq_file handle 524 * @v: current iterator 525 * 526 * Return: None 527 */ 528 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { } 529 530 /** 531 * qdf_mem_seq_show() - print sequential callback 532 * @seq: seq_file handle 533 * @v: current iterator 534 * 535 * Return: 0 - success 536 */ 537 static int qdf_mem_seq_show(struct seq_file *seq, void *v) 538 { 539 enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v; 540 541 seq_printf(seq, "\n%s Memory Domain (Id %d)\n", 542 qdf_debug_domain_name(domain_id), domain_id); 543 qdf_mem_domain_print(qdf_mem_list_get(domain_id), 544 seq_printf_printer, seq); 545 546 return 0; 547 } 548 549 /* sequential file operation table */ 550 static const struct seq_operations qdf_mem_seq_ops = { 551 .start = qdf_mem_seq_start, 552 .next = qdf_mem_seq_next, 553 .stop = qdf_mem_seq_stop, 554 .show = qdf_mem_seq_show, 555 }; 556 557 558 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file) 559 { 560 return seq_open(file, &qdf_mem_seq_ops); 561 } 562 563 /* debugfs file operation table */ 564 static const struct file_operations fops_qdf_mem_debugfs = { 565 .owner = THIS_MODULE, 566 .open = qdf_mem_debugfs_open, 567 .read = seq_read, 568 .llseek = seq_lseek, 569 .release = seq_release, 570 }; 571 572 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 573 { 574 if (!qdf_mem_debugfs_root) 575 return QDF_STATUS_E_FAILURE; 576 577 debugfs_create_file("list", 578 S_IRUSR, 579 qdf_mem_debugfs_root, 580 NULL, 581 &fops_qdf_mem_debugfs); 582 583 return QDF_STATUS_SUCCESS; 584 } 585 586 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 587 { 588 return QDF_STATUS_SUCCESS; 589 } 590 591 #else /* MEMORY_DEBUG */ 592 593 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 594 { 595 return QDF_STATUS_E_NOSUPPORT; 596 } 597 598 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 599 { 600 return QDF_STATUS_E_NOSUPPORT; 601 } 602 603 #endif /* MEMORY_DEBUG */ 604 605 606 static void qdf_mem_debugfs_exit(void) 607 { 608 debugfs_remove_recursive(qdf_mem_debugfs_root); 609 qdf_mem_debugfs_root = NULL; 610 } 611 612 static QDF_STATUS qdf_mem_debugfs_init(void) 613 { 614 struct dentry *qdf_debugfs_root = qdf_debugfs_get_root(); 615 616 if (!qdf_debugfs_root) 617 return QDF_STATUS_E_FAILURE; 618 619 qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root); 620 621 if (!qdf_mem_debugfs_root) 622 return QDF_STATUS_E_FAILURE; 623 624 625 debugfs_create_atomic_t("kmalloc", 626 S_IRUSR, 627 qdf_mem_debugfs_root, 628 &qdf_mem_stat.kmalloc); 629 630 debugfs_create_atomic_t("dma", 631 S_IRUSR, 632 qdf_mem_debugfs_root, 633 &qdf_mem_stat.dma); 634 635 debugfs_create_atomic_t("skb", 636 S_IRUSR, 637 qdf_mem_debugfs_root, 638 &qdf_mem_stat.skb); 639 640 return QDF_STATUS_SUCCESS; 641 } 642 643 #else /* WLAN_DEBUGFS */ 644 645 static inline void qdf_mem_dma_inc(qdf_size_t size) {} 646 static inline void qdf_mem_dma_dec(qdf_size_t size) {} 647 648 static QDF_STATUS qdf_mem_debugfs_init(void) 649 { 650 return QDF_STATUS_E_NOSUPPORT; 651 } 652 static void qdf_mem_debugfs_exit(void) {} 653 654 655 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 656 { 657 return QDF_STATUS_E_NOSUPPORT; 658 } 659 660 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 661 { 662 return QDF_STATUS_E_NOSUPPORT; 663 } 664 665 #endif /* WLAN_DEBUGFS */ 666 667 /** 668 * __qdf_mempool_init() - Create and initialize memory pool 669 * 670 * @osdev: platform device object 671 * @pool_addr: address of the pool created 672 * @elem_cnt: no. of elements in pool 673 * @elem_size: size of each pool element in bytes 674 * @flags: flags 675 * 676 * return: Handle to memory pool or NULL if allocation failed 677 */ 678 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr, 679 int elem_cnt, size_t elem_size, u_int32_t flags) 680 { 681 __qdf_mempool_ctxt_t *new_pool = NULL; 682 u_int32_t align = L1_CACHE_BYTES; 683 unsigned long aligned_pool_mem; 684 int pool_id; 685 int i; 686 687 if (prealloc_disabled) { 688 /* TBD: We can maintain a list of pools in qdf_device_t 689 * to help debugging 690 * when pre-allocation is not enabled 691 */ 692 new_pool = (__qdf_mempool_ctxt_t *) 693 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); 694 if (!new_pool) 695 return QDF_STATUS_E_NOMEM; 696 697 memset(new_pool, 0, sizeof(*new_pool)); 698 /* TBD: define flags for zeroing buffers etc */ 699 new_pool->flags = flags; 700 new_pool->elem_size = elem_size; 701 new_pool->max_elem = elem_cnt; 702 *pool_addr = new_pool; 703 return 0; 704 } 705 706 for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) { 707 if (!osdev->mem_pool[pool_id]) 708 break; 709 } 710 711 if (pool_id == MAX_MEM_POOLS) 712 return -ENOMEM; 713 714 new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *) 715 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); 716 if (!new_pool) 717 return -ENOMEM; 718 719 memset(new_pool, 0, sizeof(*new_pool)); 720 /* TBD: define flags for zeroing buffers etc */ 721 new_pool->flags = flags; 722 new_pool->pool_id = pool_id; 723 724 /* Round up the element size to cacheline */ 725 new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES); 726 new_pool->mem_size = elem_cnt * new_pool->elem_size + 727 ((align)?(align - 1):0); 728 729 new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL); 730 if (!new_pool->pool_mem) { 731 /* TBD: Check if we need get_free_pages above */ 732 kfree(new_pool); 733 osdev->mem_pool[pool_id] = NULL; 734 return -ENOMEM; 735 } 736 737 spin_lock_init(&new_pool->lock); 738 739 /* Initialize free list */ 740 aligned_pool_mem = (unsigned long)(new_pool->pool_mem) + 741 ((align) ? (unsigned long)(new_pool->pool_mem)%align:0); 742 STAILQ_INIT(&new_pool->free_list); 743 744 for (i = 0; i < elem_cnt; i++) 745 STAILQ_INSERT_TAIL(&(new_pool->free_list), 746 (mempool_elem_t *)(aligned_pool_mem + 747 (new_pool->elem_size * i)), mempool_entry); 748 749 750 new_pool->free_cnt = elem_cnt; 751 *pool_addr = new_pool; 752 return 0; 753 } 754 qdf_export_symbol(__qdf_mempool_init); 755 756 /** 757 * __qdf_mempool_destroy() - Destroy memory pool 758 * @osdev: platform device object 759 * @Handle: to memory pool 760 * 761 * Returns: none 762 */ 763 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool) 764 { 765 int pool_id = 0; 766 767 if (!pool) 768 return; 769 770 if (prealloc_disabled) { 771 kfree(pool); 772 return; 773 } 774 775 pool_id = pool->pool_id; 776 777 /* TBD: Check if free count matches elem_cnt if debug is enabled */ 778 kfree(pool->pool_mem); 779 kfree(pool); 780 osdev->mem_pool[pool_id] = NULL; 781 } 782 qdf_export_symbol(__qdf_mempool_destroy); 783 784 /** 785 * __qdf_mempool_alloc() - Allocate an element memory pool 786 * 787 * @osdev: platform device object 788 * @Handle: to memory pool 789 * 790 * Return: Pointer to the allocated element or NULL if the pool is empty 791 */ 792 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool) 793 { 794 void *buf = NULL; 795 796 if (!pool) 797 return NULL; 798 799 if (prealloc_disabled) 800 return qdf_mem_malloc(pool->elem_size); 801 802 spin_lock_bh(&pool->lock); 803 804 buf = STAILQ_FIRST(&pool->free_list); 805 if (buf) { 806 STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry); 807 pool->free_cnt--; 808 } 809 810 /* TBD: Update free count if debug is enabled */ 811 spin_unlock_bh(&pool->lock); 812 813 return buf; 814 } 815 qdf_export_symbol(__qdf_mempool_alloc); 816 817 /** 818 * __qdf_mempool_free() - Free a memory pool element 819 * @osdev: Platform device object 820 * @pool: Handle to memory pool 821 * @buf: Element to be freed 822 * 823 * Returns: none 824 */ 825 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf) 826 { 827 if (!pool) 828 return; 829 830 831 if (prealloc_disabled) 832 return qdf_mem_free(buf); 833 834 spin_lock_bh(&pool->lock); 835 pool->free_cnt++; 836 837 STAILQ_INSERT_TAIL 838 (&pool->free_list, (mempool_elem_t *)buf, mempool_entry); 839 spin_unlock_bh(&pool->lock); 840 } 841 qdf_export_symbol(__qdf_mempool_free); 842 843 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC 844 /** 845 * qdf_mem_prealloc_get() - conditionally pre-allocate memory 846 * @size: the number of bytes to allocate 847 * 848 * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns 849 * a chunk of pre-allocated memory. If size if less than or equal to 850 * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead. 851 * 852 * Return: NULL on failure, non-NULL on success 853 */ 854 static void *qdf_mem_prealloc_get(size_t size) 855 { 856 void *ptr; 857 858 if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD) 859 return NULL; 860 861 ptr = wcnss_prealloc_get(size); 862 if (!ptr) 863 return NULL; 864 865 memset(ptr, 0, size); 866 867 return ptr; 868 } 869 870 static inline bool qdf_mem_prealloc_put(void *ptr) 871 { 872 return wcnss_prealloc_put(ptr); 873 } 874 #else 875 static inline void *qdf_mem_prealloc_get(size_t size) 876 { 877 return NULL; 878 } 879 880 static inline bool qdf_mem_prealloc_put(void *ptr) 881 { 882 return false; 883 } 884 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */ 885 886 static int qdf_mem_malloc_flags(void) 887 { 888 if (in_interrupt() || irqs_disabled() || in_atomic()) 889 return GFP_ATOMIC; 890 891 return GFP_KERNEL; 892 } 893 894 /* External Function implementation */ 895 #ifdef MEMORY_DEBUG 896 897 /** 898 * qdf_mem_debug_init() - initialize qdf memory debug functionality 899 * 900 * Return: none 901 */ 902 static void qdf_mem_debug_init(void) 903 { 904 int i; 905 906 /* Initalizing the list with maximum size of 60000 */ 907 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 908 qdf_list_create(&qdf_mem_domains[i], 60000); 909 qdf_spinlock_create(&qdf_mem_list_lock); 910 911 /* dma */ 912 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 913 qdf_list_create(&qdf_mem_dma_domains[i], 0); 914 qdf_spinlock_create(&qdf_mem_dma_list_lock); 915 } 916 917 static uint32_t 918 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain, 919 qdf_list_t *mem_list) 920 { 921 if (qdf_list_empty(mem_list)) 922 return 0; 923 924 qdf_err("Memory leaks detected in %s domain!", 925 qdf_debug_domain_name(domain)); 926 qdf_mem_domain_print(mem_list, qdf_err_printer, NULL); 927 928 return mem_list->count; 929 } 930 931 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains) 932 { 933 uint32_t leak_count = 0; 934 int i; 935 936 /* detect and print leaks */ 937 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 938 leak_count += qdf_mem_domain_check_for_leaks(i, domains + i); 939 940 if (leak_count) 941 panic("%u fatal memory leaks detected!", leak_count); 942 } 943 944 /** 945 * qdf_mem_debug_exit() - exit qdf memory debug functionality 946 * 947 * Return: none 948 */ 949 static void qdf_mem_debug_exit(void) 950 { 951 int i; 952 953 /* mem */ 954 qdf_mem_domain_set_check_for_leaks(qdf_mem_domains); 955 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 956 qdf_list_destroy(qdf_mem_list_get(i)); 957 958 qdf_spinlock_destroy(&qdf_mem_list_lock); 959 960 /* dma */ 961 qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains); 962 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 963 qdf_list_destroy(&qdf_mem_dma_domains[i]); 964 qdf_spinlock_destroy(&qdf_mem_dma_list_lock); 965 } 966 967 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line, 968 void *caller, uint32_t flag) 969 { 970 QDF_STATUS status; 971 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 972 qdf_list_t *mem_list = qdf_mem_list_get(current_domain); 973 struct qdf_mem_header *header; 974 void *ptr; 975 unsigned long start, duration; 976 977 if (!size || size > QDF_MEM_MAX_MALLOC) { 978 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); 979 return NULL; 980 } 981 982 ptr = qdf_mem_prealloc_get(size); 983 if (ptr) 984 return ptr; 985 986 if (!flag) 987 flag = qdf_mem_malloc_flags(); 988 989 start = qdf_mc_timer_get_system_time(); 990 header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag); 991 duration = qdf_mc_timer_get_system_time() - start; 992 993 if (duration > QDF_MEM_WARN_THRESHOLD) 994 qdf_warn("Malloc slept; %lums, %zuB @ %s:%d", 995 duration, size, func, line); 996 997 if (!header) { 998 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); 999 return NULL; 1000 } 1001 1002 qdf_mem_header_init(header, size, func, line, caller); 1003 qdf_mem_trailer_init(header); 1004 ptr = qdf_mem_get_ptr(header); 1005 1006 qdf_spin_lock_irqsave(&qdf_mem_list_lock); 1007 status = qdf_list_insert_front(mem_list, &header->node); 1008 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); 1009 if (QDF_IS_STATUS_ERROR(status)) 1010 qdf_err("Failed to insert memory header; status %d", status); 1011 1012 qdf_mem_kmalloc_inc(size); 1013 1014 return ptr; 1015 } 1016 qdf_export_symbol(qdf_mem_malloc_debug); 1017 1018 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line) 1019 { 1020 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1021 struct qdf_mem_header *header; 1022 enum qdf_mem_validation_bitmap error_bitmap; 1023 1024 /* freeing a null pointer is valid */ 1025 if (qdf_unlikely(!ptr)) 1026 return; 1027 1028 if (qdf_mem_prealloc_put(ptr)) 1029 return; 1030 1031 if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header))) 1032 panic("Failed to free invalid memory location %pK", ptr); 1033 1034 qdf_talloc_assert_no_children_fl(ptr, func, line); 1035 1036 qdf_spin_lock_irqsave(&qdf_mem_list_lock); 1037 header = qdf_mem_get_header(ptr); 1038 error_bitmap = qdf_mem_header_validate(header, current_domain); 1039 error_bitmap |= qdf_mem_trailer_validate(header); 1040 1041 if (!error_bitmap) { 1042 header->freed = true; 1043 qdf_list_remove_node(qdf_mem_list_get(header->domain), 1044 &header->node); 1045 } 1046 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); 1047 1048 qdf_mem_header_assert_valid(header, current_domain, error_bitmap, 1049 func, line); 1050 1051 qdf_mem_kmalloc_dec(header->size); 1052 kfree(header); 1053 } 1054 qdf_export_symbol(qdf_mem_free_debug); 1055 1056 void qdf_mem_check_for_leaks(void) 1057 { 1058 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1059 qdf_list_t *mem_list = qdf_mem_list_get(current_domain); 1060 qdf_list_t *dma_list = qdf_mem_dma_list(current_domain); 1061 uint32_t leaks_count = 0; 1062 1063 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list); 1064 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list); 1065 1066 if (leaks_count) 1067 panic("%u fatal memory leaks detected!", leaks_count); 1068 } 1069 1070 #else 1071 static void qdf_mem_debug_init(void) {} 1072 1073 static void qdf_mem_debug_exit(void) {} 1074 1075 void *qdf_mem_malloc_fl(size_t size, const char *func, uint32_t line) 1076 { 1077 void *ptr; 1078 1079 ptr = qdf_mem_prealloc_get(size); 1080 if (ptr) 1081 return ptr; 1082 1083 ptr = kzalloc(size, qdf_mem_malloc_flags()); 1084 if (!ptr) { 1085 qdf_nofl_err("Failed to malloc %zuB @ %s:%d", 1086 size, func, line); 1087 return NULL; 1088 } 1089 1090 qdf_mem_kmalloc_inc(ksize(ptr)); 1091 1092 return ptr; 1093 } 1094 qdf_export_symbol(qdf_mem_malloc_fl); 1095 1096 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line) 1097 { 1098 void *ptr; 1099 1100 ptr = qdf_mem_prealloc_get(size); 1101 if (ptr) 1102 return ptr; 1103 1104 ptr = kzalloc(size, GFP_ATOMIC); 1105 if (!ptr) { 1106 qdf_nofl_warn("Failed to malloc %zuB @ %s:%d", 1107 size, func, line); 1108 return NULL; 1109 } 1110 1111 qdf_mem_kmalloc_inc(ksize(ptr)); 1112 1113 return ptr; 1114 } 1115 qdf_export_symbol(qdf_mem_malloc_atomic_fl); 1116 1117 /** 1118 * qdf_mem_free() - free QDF memory 1119 * @ptr: Pointer to the starting address of the memory to be free'd. 1120 * 1121 * This function will free the memory pointed to by 'ptr'. 1122 * 1123 * Return: None 1124 */ 1125 void qdf_mem_free(void *ptr) 1126 { 1127 if (!ptr) 1128 return; 1129 1130 if (qdf_mem_prealloc_put(ptr)) 1131 return; 1132 1133 qdf_mem_kmalloc_dec(ksize(ptr)); 1134 1135 kfree(ptr); 1136 } 1137 1138 qdf_export_symbol(qdf_mem_free); 1139 #endif 1140 1141 void *qdf_aligned_malloc_fl(qdf_size_t size, uint32_t ring_base_align, 1142 void **vaddr_unaligned, 1143 const char *func, uint32_t line) 1144 { 1145 void *vaddr_aligned; 1146 1147 *vaddr_unaligned = qdf_mem_malloc_fl(size, func, line); 1148 if (!*vaddr_unaligned) { 1149 qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line); 1150 return NULL; 1151 } 1152 1153 if ((unsigned long)(*vaddr_unaligned) % ring_base_align) { 1154 qdf_mem_free(*vaddr_unaligned); 1155 *vaddr_unaligned = qdf_mem_malloc_fl(size + ring_base_align - 1, 1156 func, line); 1157 if (!*vaddr_unaligned) { 1158 qdf_warn("Failed to alloc %zuB @ %s:%d", 1159 size, func, line); 1160 return NULL; 1161 } 1162 } 1163 1164 vaddr_aligned = (*vaddr_unaligned) + 1165 ((unsigned long)(*vaddr_unaligned) % ring_base_align); 1166 1167 return vaddr_aligned; 1168 } 1169 qdf_export_symbol(qdf_aligned_malloc_fl); 1170 1171 /** 1172 * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory 1173 * @osdev: OS device handle pointer 1174 * @pages: Multi page information storage 1175 * @element_size: Each element size 1176 * @element_num: Total number of elements should be allocated 1177 * @memctxt: Memory context 1178 * @cacheable: Coherent memory or cacheable memory 1179 * 1180 * This function will allocate large size of memory over multiple pages. 1181 * Large size of contiguous memory allocation will fail frequently, then 1182 * instead of allocate large memory by one shot, allocate through multiple, non 1183 * contiguous memory and combine pages when actual usage 1184 * 1185 * Return: None 1186 */ 1187 void qdf_mem_multi_pages_alloc(qdf_device_t osdev, 1188 struct qdf_mem_multi_page_t *pages, 1189 size_t element_size, uint16_t element_num, 1190 qdf_dma_context_t memctxt, bool cacheable) 1191 { 1192 uint16_t page_idx; 1193 struct qdf_mem_dma_page_t *dma_pages; 1194 void **cacheable_pages = NULL; 1195 uint16_t i; 1196 1197 pages->num_element_per_page = PAGE_SIZE / element_size; 1198 if (!pages->num_element_per_page) { 1199 qdf_print("Invalid page %d or element size %d", 1200 (int)PAGE_SIZE, (int)element_size); 1201 goto out_fail; 1202 } 1203 1204 pages->num_pages = element_num / pages->num_element_per_page; 1205 if (element_num % pages->num_element_per_page) 1206 pages->num_pages++; 1207 1208 if (cacheable) { 1209 /* Pages information storage */ 1210 pages->cacheable_pages = qdf_mem_malloc( 1211 pages->num_pages * sizeof(pages->cacheable_pages)); 1212 if (!pages->cacheable_pages) 1213 goto out_fail; 1214 1215 cacheable_pages = pages->cacheable_pages; 1216 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1217 cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE); 1218 if (!cacheable_pages[page_idx]) 1219 goto page_alloc_fail; 1220 } 1221 pages->dma_pages = NULL; 1222 } else { 1223 pages->dma_pages = qdf_mem_malloc( 1224 pages->num_pages * sizeof(struct qdf_mem_dma_page_t)); 1225 if (!pages->dma_pages) 1226 goto out_fail; 1227 1228 dma_pages = pages->dma_pages; 1229 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1230 dma_pages->page_v_addr_start = 1231 qdf_mem_alloc_consistent(osdev, osdev->dev, 1232 PAGE_SIZE, 1233 &dma_pages->page_p_addr); 1234 if (!dma_pages->page_v_addr_start) { 1235 qdf_print("dmaable page alloc fail pi %d", 1236 page_idx); 1237 goto page_alloc_fail; 1238 } 1239 dma_pages->page_v_addr_end = 1240 dma_pages->page_v_addr_start + PAGE_SIZE; 1241 dma_pages++; 1242 } 1243 pages->cacheable_pages = NULL; 1244 } 1245 return; 1246 1247 page_alloc_fail: 1248 if (cacheable) { 1249 for (i = 0; i < page_idx; i++) 1250 qdf_mem_free(pages->cacheable_pages[i]); 1251 qdf_mem_free(pages->cacheable_pages); 1252 } else { 1253 dma_pages = pages->dma_pages; 1254 for (i = 0; i < page_idx; i++) { 1255 qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, 1256 dma_pages->page_v_addr_start, 1257 dma_pages->page_p_addr, memctxt); 1258 dma_pages++; 1259 } 1260 qdf_mem_free(pages->dma_pages); 1261 } 1262 1263 out_fail: 1264 pages->cacheable_pages = NULL; 1265 pages->dma_pages = NULL; 1266 pages->num_pages = 0; 1267 return; 1268 } 1269 qdf_export_symbol(qdf_mem_multi_pages_alloc); 1270 1271 /** 1272 * qdf_mem_multi_pages_free() - free large size of kernel memory 1273 * @osdev: OS device handle pointer 1274 * @pages: Multi page information storage 1275 * @memctxt: Memory context 1276 * @cacheable: Coherent memory or cacheable memory 1277 * 1278 * This function will free large size of memory over multiple pages. 1279 * 1280 * Return: None 1281 */ 1282 void qdf_mem_multi_pages_free(qdf_device_t osdev, 1283 struct qdf_mem_multi_page_t *pages, 1284 qdf_dma_context_t memctxt, bool cacheable) 1285 { 1286 unsigned int page_idx; 1287 struct qdf_mem_dma_page_t *dma_pages; 1288 1289 if (cacheable) { 1290 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) 1291 qdf_mem_free(pages->cacheable_pages[page_idx]); 1292 qdf_mem_free(pages->cacheable_pages); 1293 } else { 1294 dma_pages = pages->dma_pages; 1295 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1296 qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, 1297 dma_pages->page_v_addr_start, 1298 dma_pages->page_p_addr, memctxt); 1299 dma_pages++; 1300 } 1301 qdf_mem_free(pages->dma_pages); 1302 } 1303 1304 pages->cacheable_pages = NULL; 1305 pages->dma_pages = NULL; 1306 pages->num_pages = 0; 1307 return; 1308 } 1309 qdf_export_symbol(qdf_mem_multi_pages_free); 1310 1311 /** 1312 * qdf_mem_multi_page_link() - Make links for multi page elements 1313 * @osdev: OS device handle pointer 1314 * @pages: Multi page information storage 1315 * @elem_size: Single element size 1316 * @elem_count: elements count should be linked 1317 * @cacheable: Coherent memory or cacheable memory 1318 * 1319 * This function will make links for multi page allocated structure 1320 * 1321 * Return: 0 success 1322 */ 1323 int qdf_mem_multi_page_link(qdf_device_t osdev, 1324 struct qdf_mem_multi_page_t *pages, 1325 uint32_t elem_size, uint32_t elem_count, uint8_t cacheable) 1326 { 1327 uint16_t i, i_int; 1328 void *page_info; 1329 void **c_elem = NULL; 1330 uint32_t num_link = 0; 1331 1332 for (i = 0; i < pages->num_pages; i++) { 1333 if (cacheable) 1334 page_info = pages->cacheable_pages[i]; 1335 else 1336 page_info = pages->dma_pages[i].page_v_addr_start; 1337 1338 if (!page_info) 1339 return -ENOMEM; 1340 1341 c_elem = (void **)page_info; 1342 for (i_int = 0; i_int < pages->num_element_per_page; i_int++) { 1343 if (i_int == (pages->num_element_per_page - 1)) { 1344 if (cacheable) 1345 *c_elem = pages-> 1346 cacheable_pages[i + 1]; 1347 else 1348 *c_elem = pages-> 1349 dma_pages[i + 1]. 1350 page_v_addr_start; 1351 num_link++; 1352 break; 1353 } else { 1354 *c_elem = 1355 (void *)(((char *)c_elem) + elem_size); 1356 } 1357 num_link++; 1358 c_elem = (void **)*c_elem; 1359 1360 /* Last link established exit */ 1361 if (num_link == (elem_count - 1)) 1362 break; 1363 } 1364 } 1365 1366 if (c_elem) 1367 *c_elem = NULL; 1368 1369 return 0; 1370 } 1371 qdf_export_symbol(qdf_mem_multi_page_link); 1372 1373 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1374 { 1375 /* special case where dst_addr or src_addr can be NULL */ 1376 if (!num_bytes) 1377 return; 1378 1379 QDF_BUG(dst_addr); 1380 QDF_BUG(src_addr); 1381 if (!dst_addr || !src_addr) 1382 return; 1383 1384 memcpy(dst_addr, src_addr, num_bytes); 1385 } 1386 qdf_export_symbol(qdf_mem_copy); 1387 1388 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size) 1389 { 1390 qdf_shared_mem_t *shared_mem; 1391 qdf_dma_addr_t dma_addr, paddr; 1392 int ret; 1393 1394 shared_mem = qdf_mem_malloc(sizeof(*shared_mem)); 1395 if (!shared_mem) 1396 return NULL; 1397 1398 shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev, 1399 size, qdf_mem_get_dma_addr_ptr(osdev, 1400 &shared_mem->mem_info)); 1401 if (!shared_mem->vaddr) { 1402 qdf_err("Unable to allocate DMA memory for shared resource"); 1403 qdf_mem_free(shared_mem); 1404 return NULL; 1405 } 1406 1407 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 1408 size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info); 1409 1410 qdf_mem_zero(shared_mem->vaddr, size); 1411 dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info); 1412 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 1413 1414 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); 1415 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, 1416 shared_mem->vaddr, dma_addr, size); 1417 if (ret) { 1418 qdf_err("Unable to get DMA sgtable"); 1419 qdf_mem_free_consistent(osdev, osdev->dev, 1420 shared_mem->mem_info.size, 1421 shared_mem->vaddr, 1422 dma_addr, 1423 qdf_get_dma_mem_context(shared_mem, 1424 memctx)); 1425 qdf_mem_free(shared_mem); 1426 return NULL; 1427 } 1428 1429 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); 1430 1431 return shared_mem; 1432 } 1433 1434 qdf_export_symbol(qdf_mem_shared_mem_alloc); 1435 1436 /** 1437 * qdf_mem_copy_toio() - copy memory 1438 * @dst_addr: Pointer to destination memory location (to copy to) 1439 * @src_addr: Pointer to source memory location (to copy from) 1440 * @num_bytes: Number of bytes to copy. 1441 * 1442 * Return: none 1443 */ 1444 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1445 { 1446 if (0 == num_bytes) { 1447 /* special case where dst_addr or src_addr can be NULL */ 1448 return; 1449 } 1450 1451 if ((!dst_addr) || (!src_addr)) { 1452 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 1453 "%s called with NULL parameter, source:%pK destination:%pK", 1454 __func__, src_addr, dst_addr); 1455 QDF_ASSERT(0); 1456 return; 1457 } 1458 memcpy_toio(dst_addr, src_addr, num_bytes); 1459 } 1460 1461 qdf_export_symbol(qdf_mem_copy_toio); 1462 1463 /** 1464 * qdf_mem_set_io() - set (fill) memory with a specified byte value. 1465 * @ptr: Pointer to memory that will be set 1466 * @value: Byte set in memory 1467 * @num_bytes: Number of bytes to be set 1468 * 1469 * Return: None 1470 */ 1471 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value) 1472 { 1473 if (!ptr) { 1474 qdf_print("%s called with NULL parameter ptr", __func__); 1475 return; 1476 } 1477 memset_io(ptr, value, num_bytes); 1478 } 1479 1480 qdf_export_symbol(qdf_mem_set_io); 1481 1482 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value) 1483 { 1484 QDF_BUG(ptr); 1485 if (!ptr) 1486 return; 1487 1488 memset(ptr, value, num_bytes); 1489 } 1490 qdf_export_symbol(qdf_mem_set); 1491 1492 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1493 { 1494 /* special case where dst_addr or src_addr can be NULL */ 1495 if (!num_bytes) 1496 return; 1497 1498 QDF_BUG(dst_addr); 1499 QDF_BUG(src_addr); 1500 if (!dst_addr || !src_addr) 1501 return; 1502 1503 memmove(dst_addr, src_addr, num_bytes); 1504 } 1505 qdf_export_symbol(qdf_mem_move); 1506 1507 int qdf_mem_cmp(const void *left, const void *right, size_t size) 1508 { 1509 QDF_BUG(left); 1510 QDF_BUG(right); 1511 1512 return memcmp(left, right, size); 1513 } 1514 qdf_export_symbol(qdf_mem_cmp); 1515 1516 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) 1517 /** 1518 * qdf_mem_dma_alloc() - allocates memory for dma 1519 * @osdev: OS device handle 1520 * @dev: Pointer to device handle 1521 * @size: Size to be allocated 1522 * @phy_addr: Physical address 1523 * 1524 * Return: pointer of allocated memory or null if memory alloc fails 1525 */ 1526 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, 1527 qdf_size_t size, 1528 qdf_dma_addr_t *phy_addr) 1529 { 1530 void *vaddr; 1531 1532 vaddr = qdf_mem_malloc(size); 1533 *phy_addr = ((uintptr_t) vaddr); 1534 /* using this type conversion to suppress "cast from pointer to integer 1535 * of different size" warning on some platforms 1536 */ 1537 BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr)); 1538 return vaddr; 1539 } 1540 1541 #elif defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) 1542 #define QCA8074_RAM_BASE 0x50000000 1543 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10 1544 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size, 1545 qdf_dma_addr_t *phy_addr) 1546 { 1547 void *vaddr = NULL; 1548 int i; 1549 1550 *phy_addr = 0; 1551 1552 for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) { 1553 vaddr = dma_alloc_coherent(dev, size, phy_addr, 1554 qdf_mem_malloc_flags()); 1555 1556 if (!vaddr) { 1557 qdf_err("%s failed , size: %zu!", __func__, size); 1558 return NULL; 1559 } 1560 1561 if (*phy_addr >= QCA8074_RAM_BASE) 1562 return vaddr; 1563 1564 dma_free_coherent(dev, size, vaddr, *phy_addr); 1565 } 1566 1567 return NULL; 1568 } 1569 1570 #else 1571 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, 1572 qdf_size_t size, qdf_dma_addr_t *paddr) 1573 { 1574 return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags()); 1575 } 1576 #endif 1577 1578 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) 1579 static inline void 1580 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) 1581 { 1582 qdf_mem_free(vaddr); 1583 } 1584 #else 1585 1586 static inline void 1587 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) 1588 { 1589 dma_free_coherent(dev, size, vaddr, paddr); 1590 } 1591 #endif 1592 1593 #ifdef MEMORY_DEBUG 1594 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, 1595 qdf_size_t size, qdf_dma_addr_t *paddr, 1596 const char *func, uint32_t line, 1597 void *caller) 1598 { 1599 QDF_STATUS status; 1600 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1601 qdf_list_t *mem_list = qdf_mem_dma_list(current_domain); 1602 struct qdf_mem_header *header; 1603 void *vaddr; 1604 1605 if (!size || size > QDF_MEM_MAX_MALLOC) { 1606 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); 1607 return NULL; 1608 } 1609 1610 vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE, 1611 paddr); 1612 1613 if (!vaddr) { 1614 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); 1615 return NULL; 1616 } 1617 1618 header = qdf_mem_dma_get_header(vaddr, size); 1619 /* For DMA buffers we only add trailers, this function will init 1620 * the header structure at the tail 1621 * Prefix the header into DMA buffer causes SMMU faults, so 1622 * do not prefix header into the DMA buffers 1623 */ 1624 qdf_mem_header_init(header, size, func, line, caller); 1625 1626 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); 1627 status = qdf_list_insert_front(mem_list, &header->node); 1628 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); 1629 if (QDF_IS_STATUS_ERROR(status)) 1630 qdf_err("Failed to insert memory header; status %d", status); 1631 1632 qdf_mem_dma_inc(size); 1633 1634 return vaddr; 1635 } 1636 qdf_export_symbol(qdf_mem_alloc_consistent_debug); 1637 1638 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, 1639 qdf_size_t size, void *vaddr, 1640 qdf_dma_addr_t paddr, 1641 qdf_dma_context_t memctx, 1642 const char *func, uint32_t line) 1643 { 1644 enum qdf_debug_domain domain = qdf_debug_domain_get(); 1645 struct qdf_mem_header *header; 1646 enum qdf_mem_validation_bitmap error_bitmap; 1647 1648 /* freeing a null pointer is valid */ 1649 if (qdf_unlikely(!vaddr)) 1650 return; 1651 1652 qdf_talloc_assert_no_children_fl(vaddr, func, line); 1653 1654 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); 1655 /* For DMA buffers we only add trailers, this function will retrieve 1656 * the header structure at the tail 1657 * Prefix the header into DMA buffer causes SMMU faults, so 1658 * do not prefix header into the DMA buffers 1659 */ 1660 header = qdf_mem_dma_get_header(vaddr, size); 1661 error_bitmap = qdf_mem_header_validate(header, domain); 1662 if (!error_bitmap) { 1663 header->freed = true; 1664 qdf_list_remove_node(qdf_mem_dma_list(header->domain), 1665 &header->node); 1666 } 1667 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); 1668 1669 qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line); 1670 1671 qdf_mem_dma_dec(header->size); 1672 qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr); 1673 } 1674 qdf_export_symbol(qdf_mem_free_consistent_debug); 1675 1676 #else 1677 1678 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, 1679 qdf_size_t size, qdf_dma_addr_t *paddr) 1680 { 1681 void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr); 1682 1683 if (vaddr) 1684 qdf_mem_dma_inc(size); 1685 1686 return vaddr; 1687 } 1688 qdf_export_symbol(qdf_mem_alloc_consistent); 1689 1690 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, 1691 qdf_size_t size, void *vaddr, 1692 qdf_dma_addr_t paddr, qdf_dma_context_t memctx) 1693 { 1694 qdf_mem_dma_dec(size); 1695 qdf_mem_dma_free(dev, size, vaddr, paddr); 1696 } 1697 qdf_export_symbol(qdf_mem_free_consistent); 1698 1699 #endif /* MEMORY_DEBUG */ 1700 1701 void *qdf_aligned_mem_alloc_consistent_fl( 1702 qdf_device_t osdev, void *dev, qdf_size_t size, 1703 void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned, 1704 qdf_dma_addr_t *paddr_aligned, uint32_t ring_base_align, 1705 const char *func, uint32_t line) 1706 { 1707 void *vaddr_aligned; 1708 1709 *vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev, size, 1710 paddr_unaligned); 1711 if (!*vaddr_unaligned) { 1712 qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line); 1713 return NULL; 1714 } 1715 1716 if ((unsigned long)(*vaddr_unaligned) % ring_base_align) { 1717 qdf_mem_free_consistent(osdev, dev, size, *vaddr_unaligned, 1718 *paddr_unaligned, 0); 1719 *vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev, 1720 size + ring_base_align - 1, paddr_unaligned); 1721 if (!*vaddr_unaligned) { 1722 qdf_warn("Failed to alloc %zuB @ %s:%d", 1723 size, func, line); 1724 return NULL; 1725 } 1726 } 1727 1728 vaddr_aligned = *vaddr_unaligned + 1729 ((unsigned long)(*vaddr_unaligned) % ring_base_align); 1730 *paddr_aligned = *paddr_unaligned + ((unsigned long)(vaddr_aligned) - 1731 (unsigned long)(*vaddr_unaligned)); 1732 1733 return vaddr_aligned; 1734 } 1735 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl); 1736 1737 /** 1738 * qdf_mem_dma_sync_single_for_device() - assign memory to device 1739 * @osdev: OS device handle 1740 * @bus_addr: dma address to give to the device 1741 * @size: Size of the memory block 1742 * @direction: direction data will be DMAed 1743 * 1744 * Assign memory to the remote device. 1745 * The cache lines are flushed to ram or invalidated as needed. 1746 * 1747 * Return: none 1748 */ 1749 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, 1750 qdf_dma_addr_t bus_addr, 1751 qdf_size_t size, 1752 enum dma_data_direction direction) 1753 { 1754 dma_sync_single_for_device(osdev->dev, bus_addr, size, direction); 1755 } 1756 qdf_export_symbol(qdf_mem_dma_sync_single_for_device); 1757 1758 /** 1759 * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU 1760 * @osdev: OS device handle 1761 * @bus_addr: dma address to give to the cpu 1762 * @size: Size of the memory block 1763 * @direction: direction data will be DMAed 1764 * 1765 * Assign memory to the CPU. 1766 * 1767 * Return: none 1768 */ 1769 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, 1770 qdf_dma_addr_t bus_addr, 1771 qdf_size_t size, 1772 enum dma_data_direction direction) 1773 { 1774 dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction); 1775 } 1776 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu); 1777 1778 void qdf_mem_init(void) 1779 { 1780 qdf_mem_debug_init(); 1781 qdf_net_buf_debug_init(); 1782 qdf_mem_debugfs_init(); 1783 qdf_mem_debug_debugfs_init(); 1784 } 1785 qdf_export_symbol(qdf_mem_init); 1786 1787 void qdf_mem_exit(void) 1788 { 1789 qdf_mem_debug_debugfs_exit(); 1790 qdf_mem_debugfs_exit(); 1791 qdf_net_buf_debug_exit(); 1792 qdf_mem_debug_exit(); 1793 } 1794 qdf_export_symbol(qdf_mem_exit); 1795 1796 /** 1797 * qdf_ether_addr_copy() - copy an Ethernet address 1798 * 1799 * @dst_addr: A six-byte array Ethernet address destination 1800 * @src_addr: A six-byte array Ethernet address source 1801 * 1802 * Please note: dst & src must both be aligned to u16. 1803 * 1804 * Return: none 1805 */ 1806 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr) 1807 { 1808 if ((!dst_addr) || (!src_addr)) { 1809 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 1810 "%s called with NULL parameter, source:%pK destination:%pK", 1811 __func__, src_addr, dst_addr); 1812 QDF_ASSERT(0); 1813 return; 1814 } 1815 ether_addr_copy(dst_addr, src_addr); 1816 } 1817 qdf_export_symbol(qdf_ether_addr_copy); 1818 1819