1 /* 2 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all 7 * copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL 10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED 11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE 12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL 13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR 14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 16 * PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /** 20 * DOC: qdf_mem 21 * This file provides OS dependent memory management APIs 22 */ 23 24 #include "qdf_debugfs.h" 25 #include "qdf_mem.h" 26 #include "qdf_nbuf.h" 27 #include "qdf_lock.h" 28 #include "qdf_mc_timer.h" 29 #include "qdf_module.h" 30 #include <qdf_trace.h> 31 #include "qdf_atomic.h" 32 #include "qdf_str.h" 33 #include "qdf_talloc.h" 34 #include <linux/debugfs.h> 35 #include <linux/seq_file.h> 36 #include <linux/string.h> 37 38 #if defined(CONFIG_CNSS) 39 #include <net/cnss.h> 40 #endif 41 42 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC 43 #include <net/cnss_prealloc.h> 44 #endif 45 46 /* Preprocessor Definitions and Constants */ 47 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */ 48 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */ 49 #define QDF_DEBUG_STRING_SIZE 512 50 51 #ifdef MEMORY_DEBUG 52 #include "qdf_debug_domain.h" 53 #include <qdf_list.h> 54 55 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT]; 56 static qdf_spinlock_t qdf_mem_list_lock; 57 58 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT]; 59 static qdf_spinlock_t qdf_mem_dma_list_lock; 60 61 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain) 62 { 63 return &qdf_mem_domains[domain]; 64 } 65 66 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain) 67 { 68 return &qdf_mem_dma_domains[domain]; 69 } 70 71 /** 72 * struct qdf_mem_header - memory object to dubug 73 * @node: node to the list 74 * @domain: the active memory domain at time of allocation 75 * @freed: flag set during free, used to detect double frees 76 * Use uint8_t so we can detect corruption 77 * @func: name of the function the allocation was made from 78 * @line: line number of the file the allocation was made from 79 * @size: size of the allocation in bytes 80 * @caller: Caller of the function for which memory is allocated 81 * @header: a known value, used to detect out-of-bounds access 82 * @time: timestamp at which allocation was made 83 */ 84 struct qdf_mem_header { 85 qdf_list_node_t node; 86 enum qdf_debug_domain domain; 87 uint8_t freed; 88 char func[QDF_MEM_FUNC_NAME_SIZE]; 89 uint32_t line; 90 uint32_t size; 91 void *caller; 92 uint64_t header; 93 uint64_t time; 94 }; 95 96 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768; 97 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687; 98 99 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr) 100 { 101 return (struct qdf_mem_header *)ptr - 1; 102 } 103 104 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr, 105 qdf_size_t size) 106 { 107 return (struct qdf_mem_header *) ((uint8_t *) ptr + size); 108 } 109 110 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header) 111 { 112 return (uint64_t *)((void *)(header + 1) + header->size); 113 } 114 115 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header) 116 { 117 return (void *)(header + 1); 118 } 119 120 /* number of bytes needed for the qdf memory debug information */ 121 #define QDF_MEM_DEBUG_SIZE \ 122 (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER)) 123 124 /* number of bytes needed for the qdf dma memory debug information */ 125 #define QDF_DMA_MEM_DEBUG_SIZE \ 126 (sizeof(struct qdf_mem_header)) 127 128 static void qdf_mem_trailer_init(struct qdf_mem_header *header) 129 { 130 QDF_BUG(header); 131 if (!header) 132 return; 133 *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER; 134 } 135 136 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size, 137 const char *func, uint32_t line, void *caller) 138 { 139 QDF_BUG(header); 140 if (!header) 141 return; 142 143 header->domain = qdf_debug_domain_get(); 144 header->freed = false; 145 146 qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE); 147 148 header->line = line; 149 header->size = size; 150 header->caller = caller; 151 header->header = WLAN_MEM_HEADER; 152 header->time = qdf_get_log_timestamp(); 153 } 154 155 enum qdf_mem_validation_bitmap { 156 QDF_MEM_BAD_HEADER = 1 << 0, 157 QDF_MEM_BAD_TRAILER = 1 << 1, 158 QDF_MEM_BAD_SIZE = 1 << 2, 159 QDF_MEM_DOUBLE_FREE = 1 << 3, 160 QDF_MEM_BAD_FREED = 1 << 4, 161 QDF_MEM_BAD_NODE = 1 << 5, 162 QDF_MEM_BAD_DOMAIN = 1 << 6, 163 QDF_MEM_WRONG_DOMAIN = 1 << 7, 164 }; 165 166 static enum qdf_mem_validation_bitmap 167 qdf_mem_trailer_validate(struct qdf_mem_header *header) 168 { 169 enum qdf_mem_validation_bitmap error_bitmap = 0; 170 171 if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER) 172 error_bitmap |= QDF_MEM_BAD_TRAILER; 173 return error_bitmap; 174 } 175 176 static enum qdf_mem_validation_bitmap 177 qdf_mem_header_validate(struct qdf_mem_header *header, 178 enum qdf_debug_domain domain) 179 { 180 enum qdf_mem_validation_bitmap error_bitmap = 0; 181 182 if (header->header != WLAN_MEM_HEADER) 183 error_bitmap |= QDF_MEM_BAD_HEADER; 184 185 if (header->size > QDF_MEM_MAX_MALLOC) 186 error_bitmap |= QDF_MEM_BAD_SIZE; 187 188 if (header->freed == true) 189 error_bitmap |= QDF_MEM_DOUBLE_FREE; 190 else if (header->freed) 191 error_bitmap |= QDF_MEM_BAD_FREED; 192 193 if (!qdf_list_node_in_any_list(&header->node)) 194 error_bitmap |= QDF_MEM_BAD_NODE; 195 196 if (header->domain < QDF_DEBUG_DOMAIN_INIT || 197 header->domain >= QDF_DEBUG_DOMAIN_COUNT) 198 error_bitmap |= QDF_MEM_BAD_DOMAIN; 199 else if (header->domain != domain) 200 error_bitmap |= QDF_MEM_WRONG_DOMAIN; 201 202 return error_bitmap; 203 } 204 205 static void 206 qdf_mem_header_assert_valid(struct qdf_mem_header *header, 207 enum qdf_debug_domain current_domain, 208 enum qdf_mem_validation_bitmap error_bitmap, 209 const char *func, 210 uint32_t line) 211 { 212 if (!error_bitmap) 213 return; 214 215 if (error_bitmap & QDF_MEM_BAD_HEADER) 216 qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)", 217 header->header, WLAN_MEM_HEADER); 218 219 if (error_bitmap & QDF_MEM_BAD_SIZE) 220 qdf_err("Corrupted memory size %u (expected < %d)", 221 header->size, QDF_MEM_MAX_MALLOC); 222 223 if (error_bitmap & QDF_MEM_BAD_TRAILER) 224 qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)", 225 *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER); 226 227 if (error_bitmap & QDF_MEM_DOUBLE_FREE) 228 qdf_err("Memory has previously been freed"); 229 230 if (error_bitmap & QDF_MEM_BAD_FREED) 231 qdf_err("Corrupted memory freed flag 0x%x", header->freed); 232 233 if (error_bitmap & QDF_MEM_BAD_NODE) 234 qdf_err("Corrupted memory header node or double free"); 235 236 if (error_bitmap & QDF_MEM_BAD_DOMAIN) 237 qdf_err("Corrupted memory domain 0x%x", header->domain); 238 239 if (error_bitmap & QDF_MEM_WRONG_DOMAIN) 240 qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)", 241 qdf_debug_domain_name(header->domain), header->domain, 242 qdf_debug_domain_name(current_domain), current_domain); 243 244 QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line); 245 } 246 #endif /* MEMORY_DEBUG */ 247 248 u_int8_t prealloc_disabled = 1; 249 qdf_declare_param(prealloc_disabled, byte); 250 qdf_export_symbol(prealloc_disabled); 251 252 #if defined WLAN_DEBUGFS 253 254 /* Debugfs root directory for qdf_mem */ 255 static struct dentry *qdf_mem_debugfs_root; 256 257 /** 258 * struct __qdf_mem_stat - qdf memory statistics 259 * @kmalloc: total kmalloc allocations 260 * @dma: total dma allocations 261 * @skb: total skb allocations 262 */ 263 static struct __qdf_mem_stat { 264 qdf_atomic_t kmalloc; 265 qdf_atomic_t dma; 266 qdf_atomic_t skb; 267 } qdf_mem_stat; 268 269 void qdf_mem_kmalloc_inc(qdf_size_t size) 270 { 271 qdf_atomic_add(size, &qdf_mem_stat.kmalloc); 272 } 273 274 static void qdf_mem_dma_inc(qdf_size_t size) 275 { 276 qdf_atomic_add(size, &qdf_mem_stat.dma); 277 } 278 279 void qdf_mem_skb_inc(qdf_size_t size) 280 { 281 qdf_atomic_add(size, &qdf_mem_stat.skb); 282 } 283 284 void qdf_mem_kmalloc_dec(qdf_size_t size) 285 { 286 qdf_atomic_sub(size, &qdf_mem_stat.kmalloc); 287 } 288 289 static inline void qdf_mem_dma_dec(qdf_size_t size) 290 { 291 qdf_atomic_sub(size, &qdf_mem_stat.dma); 292 } 293 294 void qdf_mem_skb_dec(qdf_size_t size) 295 { 296 qdf_atomic_sub(size, &qdf_mem_stat.skb); 297 } 298 299 #ifdef MEMORY_DEBUG 300 static int qdf_err_printer(void *priv, const char *fmt, ...) 301 { 302 va_list args; 303 304 va_start(args, fmt); 305 QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args); 306 va_end(args); 307 308 return 0; 309 } 310 311 static int seq_printf_printer(void *priv, const char *fmt, ...) 312 { 313 struct seq_file *file = priv; 314 va_list args; 315 316 va_start(args, fmt); 317 seq_vprintf(file, fmt, args); 318 seq_puts(file, "\n"); 319 va_end(args); 320 321 return 0; 322 } 323 324 /** 325 * struct __qdf_mem_info - memory statistics 326 * @func: the function which allocated memory 327 * @line: the line at which allocation happened 328 * @size: the size of allocation 329 * @caller: Address of the caller function 330 * @count: how many allocations of same type 331 * @time: timestamp at which allocation happened 332 */ 333 struct __qdf_mem_info { 334 char func[QDF_MEM_FUNC_NAME_SIZE]; 335 uint32_t line; 336 uint32_t size; 337 void *caller; 338 uint32_t count; 339 uint64_t time; 340 }; 341 342 /* 343 * The table depth defines the de-duplication proximity scope. 344 * A deeper table takes more time, so choose any optimum value. 345 */ 346 #define QDF_MEM_STAT_TABLE_SIZE 8 347 348 /** 349 * qdf_mem_domain_print_header() - memory domain header print logic 350 * @print: the print adapter function 351 * @print_priv: the private data to be consumed by @print 352 * 353 * Return: None 354 */ 355 static void qdf_mem_domain_print_header(qdf_abstract_print print, 356 void *print_priv) 357 { 358 print(print_priv, 359 "--------------------------------------------------------------"); 360 print(print_priv, 361 " count size total filename caller timestamp"); 362 print(print_priv, 363 "--------------------------------------------------------------"); 364 } 365 366 /** 367 * qdf_mem_meta_table_print() - memory metadata table print logic 368 * @table: the memory metadata table to print 369 * @print: the print adapter function 370 * @print_priv: the private data to be consumed by @print 371 * 372 * Return: None 373 */ 374 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table, 375 qdf_abstract_print print, 376 void *print_priv) 377 { 378 int i; 379 char debug_str[QDF_DEBUG_STRING_SIZE]; 380 size_t len = 0; 381 char *debug_prefix = "WLAN_BUG_RCA: memory leak detected"; 382 383 len += qdf_scnprintf(debug_str, sizeof(debug_str) - len, 384 "%s", debug_prefix); 385 386 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { 387 if (!table[i].count) 388 break; 389 390 print(print_priv, 391 "%6u x %5u = %7uB @ %s:%u %pS %llu", 392 table[i].count, 393 table[i].size, 394 table[i].count * table[i].size, 395 table[i].func, 396 table[i].line, table[i].caller, 397 table[i].time); 398 len += qdf_scnprintf(debug_str + len, 399 sizeof(debug_str) - len, 400 " @ %s:%u %pS", 401 table[i].func, 402 table[i].line, 403 table[i].caller); 404 } 405 print(print_priv, "%s", debug_str); 406 } 407 408 /** 409 * qdf_mem_meta_table_insert() - insert memory metadata into the given table 410 * @table: the memory metadata table to insert into 411 * @meta: the memory metadata to insert 412 * 413 * Return: true if the table is full after inserting, false otherwise 414 */ 415 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table, 416 struct qdf_mem_header *meta) 417 { 418 int i; 419 420 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) { 421 if (!table[i].count) { 422 qdf_str_lcopy(table[i].func, meta->func, 423 QDF_MEM_FUNC_NAME_SIZE); 424 table[i].line = meta->line; 425 table[i].size = meta->size; 426 table[i].count = 1; 427 table[i].caller = meta->caller; 428 table[i].time = meta->time; 429 break; 430 } 431 432 if (qdf_str_eq(table[i].func, meta->func) && 433 table[i].line == meta->line && 434 table[i].size == meta->size && 435 table[i].caller == meta->caller) { 436 table[i].count++; 437 break; 438 } 439 } 440 441 /* return true if the table is now full */ 442 return i >= QDF_MEM_STAT_TABLE_SIZE - 1; 443 } 444 445 /** 446 * qdf_mem_domain_print() - output agnostic memory domain print logic 447 * @domain: the memory domain to print 448 * @print: the print adapter function 449 * @print_priv: the private data to be consumed by @print 450 * 451 * Return: None 452 */ 453 static void qdf_mem_domain_print(qdf_list_t *domain, 454 qdf_abstract_print print, 455 void *print_priv) 456 { 457 QDF_STATUS status; 458 struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE]; 459 qdf_list_node_t *node; 460 461 qdf_mem_zero(table, sizeof(table)); 462 qdf_mem_domain_print_header(print, print_priv); 463 464 /* hold lock while inserting to avoid use-after free of the metadata */ 465 qdf_spin_lock(&qdf_mem_list_lock); 466 status = qdf_list_peek_front(domain, &node); 467 while (QDF_IS_STATUS_SUCCESS(status)) { 468 struct qdf_mem_header *meta = (struct qdf_mem_header *)node; 469 bool is_full = qdf_mem_meta_table_insert(table, meta); 470 471 qdf_spin_unlock(&qdf_mem_list_lock); 472 473 if (is_full) { 474 qdf_mem_meta_table_print(table, print, print_priv); 475 qdf_mem_zero(table, sizeof(table)); 476 } 477 478 qdf_spin_lock(&qdf_mem_list_lock); 479 status = qdf_list_peek_next(domain, node, &node); 480 } 481 qdf_spin_unlock(&qdf_mem_list_lock); 482 483 qdf_mem_meta_table_print(table, print, print_priv); 484 } 485 486 /** 487 * qdf_mem_seq_start() - sequential callback to start 488 * @seq: seq_file handle 489 * @pos: The start position of the sequence 490 * 491 * Return: iterator pointer, or NULL if iteration is complete 492 */ 493 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos) 494 { 495 enum qdf_debug_domain domain = *pos; 496 497 if (!qdf_debug_domain_valid(domain)) 498 return NULL; 499 500 /* just use the current position as our iterator */ 501 return pos; 502 } 503 504 /** 505 * qdf_mem_seq_next() - next sequential callback 506 * @seq: seq_file handle 507 * @v: the current iterator 508 * @pos: the current position 509 * 510 * Get the next node and release previous node. 511 * 512 * Return: iterator pointer, or NULL if iteration is complete 513 */ 514 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos) 515 { 516 ++*pos; 517 518 return qdf_mem_seq_start(seq, pos); 519 } 520 521 /** 522 * qdf_mem_seq_stop() - stop sequential callback 523 * @seq: seq_file handle 524 * @v: current iterator 525 * 526 * Return: None 527 */ 528 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { } 529 530 /** 531 * qdf_mem_seq_show() - print sequential callback 532 * @seq: seq_file handle 533 * @v: current iterator 534 * 535 * Return: 0 - success 536 */ 537 static int qdf_mem_seq_show(struct seq_file *seq, void *v) 538 { 539 enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v; 540 541 seq_printf(seq, "\n%s Memory Domain (Id %d)\n", 542 qdf_debug_domain_name(domain_id), domain_id); 543 qdf_mem_domain_print(qdf_mem_list_get(domain_id), 544 seq_printf_printer, seq); 545 546 return 0; 547 } 548 549 /* sequential file operation table */ 550 static const struct seq_operations qdf_mem_seq_ops = { 551 .start = qdf_mem_seq_start, 552 .next = qdf_mem_seq_next, 553 .stop = qdf_mem_seq_stop, 554 .show = qdf_mem_seq_show, 555 }; 556 557 558 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file) 559 { 560 return seq_open(file, &qdf_mem_seq_ops); 561 } 562 563 /* debugfs file operation table */ 564 static const struct file_operations fops_qdf_mem_debugfs = { 565 .owner = THIS_MODULE, 566 .open = qdf_mem_debugfs_open, 567 .read = seq_read, 568 .llseek = seq_lseek, 569 .release = seq_release, 570 }; 571 572 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 573 { 574 if (!qdf_mem_debugfs_root) 575 return QDF_STATUS_E_FAILURE; 576 577 debugfs_create_file("list", 578 S_IRUSR, 579 qdf_mem_debugfs_root, 580 NULL, 581 &fops_qdf_mem_debugfs); 582 583 return QDF_STATUS_SUCCESS; 584 } 585 586 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 587 { 588 return QDF_STATUS_SUCCESS; 589 } 590 591 #else /* MEMORY_DEBUG */ 592 593 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 594 { 595 return QDF_STATUS_E_NOSUPPORT; 596 } 597 598 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 599 { 600 return QDF_STATUS_E_NOSUPPORT; 601 } 602 603 #endif /* MEMORY_DEBUG */ 604 605 606 static void qdf_mem_debugfs_exit(void) 607 { 608 debugfs_remove_recursive(qdf_mem_debugfs_root); 609 qdf_mem_debugfs_root = NULL; 610 } 611 612 static QDF_STATUS qdf_mem_debugfs_init(void) 613 { 614 struct dentry *qdf_debugfs_root = qdf_debugfs_get_root(); 615 616 if (!qdf_debugfs_root) 617 return QDF_STATUS_E_FAILURE; 618 619 qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root); 620 621 if (!qdf_mem_debugfs_root) 622 return QDF_STATUS_E_FAILURE; 623 624 625 debugfs_create_atomic_t("kmalloc", 626 S_IRUSR, 627 qdf_mem_debugfs_root, 628 &qdf_mem_stat.kmalloc); 629 630 debugfs_create_atomic_t("dma", 631 S_IRUSR, 632 qdf_mem_debugfs_root, 633 &qdf_mem_stat.dma); 634 635 debugfs_create_atomic_t("skb", 636 S_IRUSR, 637 qdf_mem_debugfs_root, 638 &qdf_mem_stat.skb); 639 640 return QDF_STATUS_SUCCESS; 641 } 642 643 #else /* WLAN_DEBUGFS */ 644 645 static inline void qdf_mem_dma_inc(qdf_size_t size) {} 646 static inline void qdf_mem_dma_dec(qdf_size_t size) {} 647 648 static QDF_STATUS qdf_mem_debugfs_init(void) 649 { 650 return QDF_STATUS_E_NOSUPPORT; 651 } 652 static void qdf_mem_debugfs_exit(void) {} 653 654 655 static QDF_STATUS qdf_mem_debug_debugfs_init(void) 656 { 657 return QDF_STATUS_E_NOSUPPORT; 658 } 659 660 static QDF_STATUS qdf_mem_debug_debugfs_exit(void) 661 { 662 return QDF_STATUS_E_NOSUPPORT; 663 } 664 665 #endif /* WLAN_DEBUGFS */ 666 667 /** 668 * __qdf_mempool_init() - Create and initialize memory pool 669 * 670 * @osdev: platform device object 671 * @pool_addr: address of the pool created 672 * @elem_cnt: no. of elements in pool 673 * @elem_size: size of each pool element in bytes 674 * @flags: flags 675 * 676 * return: Handle to memory pool or NULL if allocation failed 677 */ 678 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr, 679 int elem_cnt, size_t elem_size, u_int32_t flags) 680 { 681 __qdf_mempool_ctxt_t *new_pool = NULL; 682 u_int32_t align = L1_CACHE_BYTES; 683 unsigned long aligned_pool_mem; 684 int pool_id; 685 int i; 686 687 if (prealloc_disabled) { 688 /* TBD: We can maintain a list of pools in qdf_device_t 689 * to help debugging 690 * when pre-allocation is not enabled 691 */ 692 new_pool = (__qdf_mempool_ctxt_t *) 693 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); 694 if (!new_pool) 695 return QDF_STATUS_E_NOMEM; 696 697 memset(new_pool, 0, sizeof(*new_pool)); 698 /* TBD: define flags for zeroing buffers etc */ 699 new_pool->flags = flags; 700 new_pool->elem_size = elem_size; 701 new_pool->max_elem = elem_cnt; 702 *pool_addr = new_pool; 703 return 0; 704 } 705 706 for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) { 707 if (!osdev->mem_pool[pool_id]) 708 break; 709 } 710 711 if (pool_id == MAX_MEM_POOLS) 712 return -ENOMEM; 713 714 new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *) 715 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL); 716 if (!new_pool) 717 return -ENOMEM; 718 719 memset(new_pool, 0, sizeof(*new_pool)); 720 /* TBD: define flags for zeroing buffers etc */ 721 new_pool->flags = flags; 722 new_pool->pool_id = pool_id; 723 724 /* Round up the element size to cacheline */ 725 new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES); 726 new_pool->mem_size = elem_cnt * new_pool->elem_size + 727 ((align)?(align - 1):0); 728 729 new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL); 730 if (!new_pool->pool_mem) { 731 /* TBD: Check if we need get_free_pages above */ 732 kfree(new_pool); 733 osdev->mem_pool[pool_id] = NULL; 734 return -ENOMEM; 735 } 736 737 spin_lock_init(&new_pool->lock); 738 739 /* Initialize free list */ 740 aligned_pool_mem = (unsigned long)(new_pool->pool_mem) + 741 ((align) ? (unsigned long)(new_pool->pool_mem)%align:0); 742 STAILQ_INIT(&new_pool->free_list); 743 744 for (i = 0; i < elem_cnt; i++) 745 STAILQ_INSERT_TAIL(&(new_pool->free_list), 746 (mempool_elem_t *)(aligned_pool_mem + 747 (new_pool->elem_size * i)), mempool_entry); 748 749 750 new_pool->free_cnt = elem_cnt; 751 *pool_addr = new_pool; 752 return 0; 753 } 754 qdf_export_symbol(__qdf_mempool_init); 755 756 /** 757 * __qdf_mempool_destroy() - Destroy memory pool 758 * @osdev: platform device object 759 * @Handle: to memory pool 760 * 761 * Returns: none 762 */ 763 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool) 764 { 765 int pool_id = 0; 766 767 if (!pool) 768 return; 769 770 if (prealloc_disabled) { 771 kfree(pool); 772 return; 773 } 774 775 pool_id = pool->pool_id; 776 777 /* TBD: Check if free count matches elem_cnt if debug is enabled */ 778 kfree(pool->pool_mem); 779 kfree(pool); 780 osdev->mem_pool[pool_id] = NULL; 781 } 782 qdf_export_symbol(__qdf_mempool_destroy); 783 784 /** 785 * __qdf_mempool_alloc() - Allocate an element memory pool 786 * 787 * @osdev: platform device object 788 * @Handle: to memory pool 789 * 790 * Return: Pointer to the allocated element or NULL if the pool is empty 791 */ 792 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool) 793 { 794 void *buf = NULL; 795 796 if (!pool) 797 return NULL; 798 799 if (prealloc_disabled) 800 return qdf_mem_malloc(pool->elem_size); 801 802 spin_lock_bh(&pool->lock); 803 804 buf = STAILQ_FIRST(&pool->free_list); 805 if (buf) { 806 STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry); 807 pool->free_cnt--; 808 } 809 810 /* TBD: Update free count if debug is enabled */ 811 spin_unlock_bh(&pool->lock); 812 813 return buf; 814 } 815 qdf_export_symbol(__qdf_mempool_alloc); 816 817 /** 818 * __qdf_mempool_free() - Free a memory pool element 819 * @osdev: Platform device object 820 * @pool: Handle to memory pool 821 * @buf: Element to be freed 822 * 823 * Returns: none 824 */ 825 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf) 826 { 827 if (!pool) 828 return; 829 830 831 if (prealloc_disabled) 832 return qdf_mem_free(buf); 833 834 spin_lock_bh(&pool->lock); 835 pool->free_cnt++; 836 837 STAILQ_INSERT_TAIL 838 (&pool->free_list, (mempool_elem_t *)buf, mempool_entry); 839 spin_unlock_bh(&pool->lock); 840 } 841 qdf_export_symbol(__qdf_mempool_free); 842 843 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC 844 /** 845 * qdf_mem_prealloc_get() - conditionally pre-allocate memory 846 * @size: the number of bytes to allocate 847 * 848 * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns 849 * a chunk of pre-allocated memory. If size if less than or equal to 850 * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead. 851 * 852 * Return: NULL on failure, non-NULL on success 853 */ 854 static void *qdf_mem_prealloc_get(size_t size) 855 { 856 void *ptr; 857 858 if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD) 859 return NULL; 860 861 ptr = wcnss_prealloc_get(size); 862 if (!ptr) 863 return NULL; 864 865 memset(ptr, 0, size); 866 867 return ptr; 868 } 869 870 static inline bool qdf_mem_prealloc_put(void *ptr) 871 { 872 return wcnss_prealloc_put(ptr); 873 } 874 #else 875 static inline void *qdf_mem_prealloc_get(size_t size) 876 { 877 return NULL; 878 } 879 880 static inline bool qdf_mem_prealloc_put(void *ptr) 881 { 882 return false; 883 } 884 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */ 885 886 static int qdf_mem_malloc_flags(void) 887 { 888 if (in_interrupt() || irqs_disabled() || in_atomic()) 889 return GFP_ATOMIC; 890 891 return GFP_KERNEL; 892 } 893 894 /* External Function implementation */ 895 #ifdef MEMORY_DEBUG 896 897 /** 898 * qdf_mem_debug_init() - initialize qdf memory debug functionality 899 * 900 * Return: none 901 */ 902 static void qdf_mem_debug_init(void) 903 { 904 int i; 905 906 /* Initalizing the list with maximum size of 60000 */ 907 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 908 qdf_list_create(&qdf_mem_domains[i], 60000); 909 qdf_spinlock_create(&qdf_mem_list_lock); 910 911 /* dma */ 912 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 913 qdf_list_create(&qdf_mem_dma_domains[i], 0); 914 qdf_spinlock_create(&qdf_mem_dma_list_lock); 915 } 916 917 static uint32_t 918 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain, 919 qdf_list_t *mem_list) 920 { 921 if (qdf_list_empty(mem_list)) 922 return 0; 923 924 qdf_err("Memory leaks detected in %s domain!", 925 qdf_debug_domain_name(domain)); 926 qdf_mem_domain_print(mem_list, qdf_err_printer, NULL); 927 928 return mem_list->count; 929 } 930 931 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains) 932 { 933 uint32_t leak_count = 0; 934 int i; 935 936 /* detect and print leaks */ 937 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 938 leak_count += qdf_mem_domain_check_for_leaks(i, domains + i); 939 940 if (leak_count) 941 QDF_DEBUG_PANIC("%u fatal memory leaks detected!", 942 leak_count); 943 } 944 945 /** 946 * qdf_mem_debug_exit() - exit qdf memory debug functionality 947 * 948 * Return: none 949 */ 950 static void qdf_mem_debug_exit(void) 951 { 952 int i; 953 954 /* mem */ 955 qdf_mem_domain_set_check_for_leaks(qdf_mem_domains); 956 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 957 qdf_list_destroy(qdf_mem_list_get(i)); 958 959 qdf_spinlock_destroy(&qdf_mem_list_lock); 960 961 /* dma */ 962 qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains); 963 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) 964 qdf_list_destroy(&qdf_mem_dma_domains[i]); 965 qdf_spinlock_destroy(&qdf_mem_dma_list_lock); 966 } 967 968 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line, 969 void *caller, uint32_t flag) 970 { 971 QDF_STATUS status; 972 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 973 qdf_list_t *mem_list = qdf_mem_list_get(current_domain); 974 struct qdf_mem_header *header; 975 void *ptr; 976 unsigned long start, duration; 977 978 if (!size || size > QDF_MEM_MAX_MALLOC) { 979 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); 980 return NULL; 981 } 982 983 ptr = qdf_mem_prealloc_get(size); 984 if (ptr) 985 return ptr; 986 987 if (!flag) 988 flag = qdf_mem_malloc_flags(); 989 990 start = qdf_mc_timer_get_system_time(); 991 header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag); 992 duration = qdf_mc_timer_get_system_time() - start; 993 994 if (duration > QDF_MEM_WARN_THRESHOLD) 995 qdf_warn("Malloc slept; %lums, %zuB @ %s:%d", 996 duration, size, func, line); 997 998 if (!header) { 999 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); 1000 return NULL; 1001 } 1002 1003 qdf_mem_header_init(header, size, func, line, caller); 1004 qdf_mem_trailer_init(header); 1005 ptr = qdf_mem_get_ptr(header); 1006 1007 qdf_spin_lock_irqsave(&qdf_mem_list_lock); 1008 status = qdf_list_insert_front(mem_list, &header->node); 1009 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); 1010 if (QDF_IS_STATUS_ERROR(status)) 1011 qdf_err("Failed to insert memory header; status %d", status); 1012 1013 qdf_mem_kmalloc_inc(ksize(header)); 1014 1015 return ptr; 1016 } 1017 qdf_export_symbol(qdf_mem_malloc_debug); 1018 1019 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line) 1020 { 1021 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1022 struct qdf_mem_header *header; 1023 enum qdf_mem_validation_bitmap error_bitmap; 1024 1025 /* freeing a null pointer is valid */ 1026 if (qdf_unlikely(!ptr)) 1027 return; 1028 1029 if (qdf_mem_prealloc_put(ptr)) 1030 return; 1031 1032 if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header))) 1033 QDF_DEBUG_PANIC("Failed to free invalid memory location %pK", 1034 ptr); 1035 1036 qdf_talloc_assert_no_children_fl(ptr, func, line); 1037 1038 qdf_spin_lock_irqsave(&qdf_mem_list_lock); 1039 header = qdf_mem_get_header(ptr); 1040 error_bitmap = qdf_mem_header_validate(header, current_domain); 1041 error_bitmap |= qdf_mem_trailer_validate(header); 1042 1043 if (!error_bitmap) { 1044 header->freed = true; 1045 qdf_list_remove_node(qdf_mem_list_get(header->domain), 1046 &header->node); 1047 } 1048 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock); 1049 1050 qdf_mem_header_assert_valid(header, current_domain, error_bitmap, 1051 func, line); 1052 1053 qdf_mem_kmalloc_dec(ksize(header)); 1054 kfree(header); 1055 } 1056 qdf_export_symbol(qdf_mem_free_debug); 1057 1058 void qdf_mem_check_for_leaks(void) 1059 { 1060 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1061 qdf_list_t *mem_list = qdf_mem_list_get(current_domain); 1062 qdf_list_t *dma_list = qdf_mem_dma_list(current_domain); 1063 uint32_t leaks_count = 0; 1064 1065 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list); 1066 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list); 1067 1068 if (leaks_count) 1069 QDF_DEBUG_PANIC("%u fatal memory leaks detected!", 1070 leaks_count); 1071 } 1072 1073 /** 1074 * qdf_mem_multi_pages_alloc_debug() - Debug version of 1075 * qdf_mem_multi_pages_alloc 1076 * @osdev: OS device handle pointer 1077 * @pages: Multi page information storage 1078 * @element_size: Each element size 1079 * @element_num: Total number of elements should be allocated 1080 * @memctxt: Memory context 1081 * @cacheable: Coherent memory or cacheable memory 1082 * @func: Caller of this allocator 1083 * @line: Line number of the caller 1084 * @caller: Return address of the caller 1085 * 1086 * This function will allocate large size of memory over multiple pages. 1087 * Large size of contiguous memory allocation will fail frequently, then 1088 * instead of allocate large memory by one shot, allocate through multiple, non 1089 * contiguous memory and combine pages when actual usage 1090 * 1091 * Return: None 1092 */ 1093 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev, 1094 struct qdf_mem_multi_page_t *pages, 1095 size_t element_size, uint16_t element_num, 1096 qdf_dma_context_t memctxt, bool cacheable, 1097 const char *func, uint32_t line, 1098 void *caller) 1099 { 1100 uint16_t page_idx; 1101 struct qdf_mem_dma_page_t *dma_pages; 1102 void **cacheable_pages = NULL; 1103 uint16_t i; 1104 1105 pages->num_element_per_page = PAGE_SIZE / element_size; 1106 if (!pages->num_element_per_page) { 1107 qdf_print("Invalid page %d or element size %d", 1108 (int)PAGE_SIZE, (int)element_size); 1109 goto out_fail; 1110 } 1111 1112 pages->num_pages = element_num / pages->num_element_per_page; 1113 if (element_num % pages->num_element_per_page) 1114 pages->num_pages++; 1115 1116 if (cacheable) { 1117 /* Pages information storage */ 1118 pages->cacheable_pages = qdf_mem_malloc_debug( 1119 pages->num_pages * sizeof(pages->cacheable_pages), 1120 func, line, caller, 0); 1121 if (!pages->cacheable_pages) 1122 goto out_fail; 1123 1124 cacheable_pages = pages->cacheable_pages; 1125 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1126 cacheable_pages[page_idx] = qdf_mem_malloc_debug( 1127 PAGE_SIZE, func, line, caller, 0); 1128 if (!cacheable_pages[page_idx]) 1129 goto page_alloc_fail; 1130 } 1131 pages->dma_pages = NULL; 1132 } else { 1133 pages->dma_pages = qdf_mem_malloc_debug( 1134 pages->num_pages * sizeof(struct qdf_mem_dma_page_t), 1135 func, line, caller, 0); 1136 if (!pages->dma_pages) 1137 goto out_fail; 1138 1139 dma_pages = pages->dma_pages; 1140 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1141 dma_pages->page_v_addr_start = 1142 qdf_mem_alloc_consistent_debug( 1143 osdev, osdev->dev, PAGE_SIZE, 1144 &dma_pages->page_p_addr, 1145 func, line, caller); 1146 if (!dma_pages->page_v_addr_start) { 1147 qdf_print("dmaable page alloc fail pi %d", 1148 page_idx); 1149 goto page_alloc_fail; 1150 } 1151 dma_pages->page_v_addr_end = 1152 dma_pages->page_v_addr_start + PAGE_SIZE; 1153 dma_pages++; 1154 } 1155 pages->cacheable_pages = NULL; 1156 } 1157 return; 1158 1159 page_alloc_fail: 1160 if (cacheable) { 1161 for (i = 0; i < page_idx; i++) 1162 qdf_mem_free_debug(pages->cacheable_pages[i], 1163 func, line); 1164 qdf_mem_free_debug(pages->cacheable_pages, func, line); 1165 } else { 1166 dma_pages = pages->dma_pages; 1167 for (i = 0; i < page_idx; i++) { 1168 qdf_mem_free_consistent_debug( 1169 osdev, osdev->dev, 1170 PAGE_SIZE, dma_pages->page_v_addr_start, 1171 dma_pages->page_p_addr, memctxt, func, line); 1172 dma_pages++; 1173 } 1174 qdf_mem_free_debug(pages->dma_pages, func, line); 1175 } 1176 1177 out_fail: 1178 pages->cacheable_pages = NULL; 1179 pages->dma_pages = NULL; 1180 pages->num_pages = 0; 1181 } 1182 1183 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug); 1184 1185 /** 1186 * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free 1187 * @osdev: OS device handle pointer 1188 * @pages: Multi page information storage 1189 * @memctxt: Memory context 1190 * @cacheable: Coherent memory or cacheable memory 1191 * @func: Caller of this allocator 1192 * @line: Line number of the caller 1193 * 1194 * This function will free large size of memory over multiple pages. 1195 * 1196 * Return: None 1197 */ 1198 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev, 1199 struct qdf_mem_multi_page_t *pages, 1200 qdf_dma_context_t memctxt, bool cacheable, 1201 const char *func, uint32_t line) 1202 { 1203 unsigned int page_idx; 1204 struct qdf_mem_dma_page_t *dma_pages; 1205 1206 if (cacheable) { 1207 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) 1208 qdf_mem_free_debug(pages->cacheable_pages[page_idx], 1209 func, line); 1210 qdf_mem_free_debug(pages->cacheable_pages, func, line); 1211 } else { 1212 dma_pages = pages->dma_pages; 1213 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1214 qdf_mem_free_consistent_debug( 1215 osdev, osdev->dev, PAGE_SIZE, 1216 dma_pages->page_v_addr_start, 1217 dma_pages->page_p_addr, memctxt, func, line); 1218 dma_pages++; 1219 } 1220 qdf_mem_free_debug(pages->dma_pages, func, line); 1221 } 1222 1223 pages->cacheable_pages = NULL; 1224 pages->dma_pages = NULL; 1225 pages->num_pages = 0; 1226 } 1227 1228 qdf_export_symbol(qdf_mem_multi_pages_free_debug); 1229 1230 #else 1231 static void qdf_mem_debug_init(void) {} 1232 1233 static void qdf_mem_debug_exit(void) {} 1234 1235 void *qdf_mem_malloc_fl(size_t size, const char *func, uint32_t line) 1236 { 1237 void *ptr; 1238 1239 if (!size || size > QDF_MEM_MAX_MALLOC) { 1240 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func, 1241 line); 1242 return NULL; 1243 } 1244 1245 ptr = qdf_mem_prealloc_get(size); 1246 if (ptr) 1247 return ptr; 1248 1249 ptr = kzalloc(size, qdf_mem_malloc_flags()); 1250 if (!ptr) { 1251 qdf_nofl_err("Failed to malloc %zuB @ %s:%d", 1252 size, func, line); 1253 return NULL; 1254 } 1255 1256 qdf_mem_kmalloc_inc(ksize(ptr)); 1257 1258 return ptr; 1259 } 1260 qdf_export_symbol(qdf_mem_malloc_fl); 1261 1262 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line) 1263 { 1264 void *ptr; 1265 1266 ptr = qdf_mem_prealloc_get(size); 1267 if (ptr) 1268 return ptr; 1269 1270 ptr = kzalloc(size, GFP_ATOMIC); 1271 if (!ptr) { 1272 qdf_nofl_warn("Failed to malloc %zuB @ %s:%d", 1273 size, func, line); 1274 return NULL; 1275 } 1276 1277 qdf_mem_kmalloc_inc(ksize(ptr)); 1278 1279 return ptr; 1280 } 1281 qdf_export_symbol(qdf_mem_malloc_atomic_fl); 1282 1283 /** 1284 * qdf_mem_free() - free QDF memory 1285 * @ptr: Pointer to the starting address of the memory to be free'd. 1286 * 1287 * This function will free the memory pointed to by 'ptr'. 1288 * 1289 * Return: None 1290 */ 1291 void qdf_mem_free(void *ptr) 1292 { 1293 if (!ptr) 1294 return; 1295 1296 if (qdf_mem_prealloc_put(ptr)) 1297 return; 1298 1299 qdf_mem_kmalloc_dec(ksize(ptr)); 1300 1301 kfree(ptr); 1302 } 1303 1304 qdf_export_symbol(qdf_mem_free); 1305 1306 /** 1307 * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory 1308 * @osdev: OS device handle pointer 1309 * @pages: Multi page information storage 1310 * @element_size: Each element size 1311 * @element_num: Total number of elements should be allocated 1312 * @memctxt: Memory context 1313 * @cacheable: Coherent memory or cacheable memory 1314 * 1315 * This function will allocate large size of memory over multiple pages. 1316 * Large size of contiguous memory allocation will fail frequently, then 1317 * instead of allocate large memory by one shot, allocate through multiple, non 1318 * contiguous memory and combine pages when actual usage 1319 * 1320 * Return: None 1321 */ 1322 void qdf_mem_multi_pages_alloc(qdf_device_t osdev, 1323 struct qdf_mem_multi_page_t *pages, 1324 size_t element_size, uint16_t element_num, 1325 qdf_dma_context_t memctxt, bool cacheable) 1326 { 1327 uint16_t page_idx; 1328 struct qdf_mem_dma_page_t *dma_pages; 1329 void **cacheable_pages = NULL; 1330 uint16_t i; 1331 1332 pages->num_element_per_page = PAGE_SIZE / element_size; 1333 if (!pages->num_element_per_page) { 1334 qdf_print("Invalid page %d or element size %d", 1335 (int)PAGE_SIZE, (int)element_size); 1336 goto out_fail; 1337 } 1338 1339 pages->num_pages = element_num / pages->num_element_per_page; 1340 if (element_num % pages->num_element_per_page) 1341 pages->num_pages++; 1342 1343 if (cacheable) { 1344 /* Pages information storage */ 1345 pages->cacheable_pages = qdf_mem_malloc( 1346 pages->num_pages * sizeof(pages->cacheable_pages)); 1347 if (!pages->cacheable_pages) 1348 goto out_fail; 1349 1350 cacheable_pages = pages->cacheable_pages; 1351 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1352 cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE); 1353 if (!cacheable_pages[page_idx]) 1354 goto page_alloc_fail; 1355 } 1356 pages->dma_pages = NULL; 1357 } else { 1358 pages->dma_pages = qdf_mem_malloc( 1359 pages->num_pages * sizeof(struct qdf_mem_dma_page_t)); 1360 if (!pages->dma_pages) 1361 goto out_fail; 1362 1363 dma_pages = pages->dma_pages; 1364 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1365 dma_pages->page_v_addr_start = 1366 qdf_mem_alloc_consistent(osdev, osdev->dev, 1367 PAGE_SIZE, 1368 &dma_pages->page_p_addr); 1369 if (!dma_pages->page_v_addr_start) { 1370 qdf_print("dmaable page alloc fail pi %d", 1371 page_idx); 1372 goto page_alloc_fail; 1373 } 1374 dma_pages->page_v_addr_end = 1375 dma_pages->page_v_addr_start + PAGE_SIZE; 1376 dma_pages++; 1377 } 1378 pages->cacheable_pages = NULL; 1379 } 1380 return; 1381 1382 page_alloc_fail: 1383 if (cacheable) { 1384 for (i = 0; i < page_idx; i++) 1385 qdf_mem_free(pages->cacheable_pages[i]); 1386 qdf_mem_free(pages->cacheable_pages); 1387 } else { 1388 dma_pages = pages->dma_pages; 1389 for (i = 0; i < page_idx; i++) { 1390 qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, 1391 dma_pages->page_v_addr_start, 1392 dma_pages->page_p_addr, memctxt); 1393 dma_pages++; 1394 } 1395 qdf_mem_free(pages->dma_pages); 1396 } 1397 1398 out_fail: 1399 pages->cacheable_pages = NULL; 1400 pages->dma_pages = NULL; 1401 pages->num_pages = 0; 1402 return; 1403 } 1404 qdf_export_symbol(qdf_mem_multi_pages_alloc); 1405 1406 /** 1407 * qdf_mem_multi_pages_free() - free large size of kernel memory 1408 * @osdev: OS device handle pointer 1409 * @pages: Multi page information storage 1410 * @memctxt: Memory context 1411 * @cacheable: Coherent memory or cacheable memory 1412 * 1413 * This function will free large size of memory over multiple pages. 1414 * 1415 * Return: None 1416 */ 1417 void qdf_mem_multi_pages_free(qdf_device_t osdev, 1418 struct qdf_mem_multi_page_t *pages, 1419 qdf_dma_context_t memctxt, bool cacheable) 1420 { 1421 unsigned int page_idx; 1422 struct qdf_mem_dma_page_t *dma_pages; 1423 1424 if (cacheable) { 1425 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) 1426 qdf_mem_free(pages->cacheable_pages[page_idx]); 1427 qdf_mem_free(pages->cacheable_pages); 1428 } else { 1429 dma_pages = pages->dma_pages; 1430 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) { 1431 qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE, 1432 dma_pages->page_v_addr_start, 1433 dma_pages->page_p_addr, memctxt); 1434 dma_pages++; 1435 } 1436 qdf_mem_free(pages->dma_pages); 1437 } 1438 1439 pages->cacheable_pages = NULL; 1440 pages->dma_pages = NULL; 1441 pages->num_pages = 0; 1442 return; 1443 } 1444 qdf_export_symbol(qdf_mem_multi_pages_free); 1445 #endif 1446 1447 void *qdf_aligned_malloc_fl(uint32_t *size, 1448 void **vaddr_unaligned, 1449 qdf_dma_addr_t *paddr_unaligned, 1450 qdf_dma_addr_t *paddr_aligned, 1451 uint32_t align, 1452 const char *func, uint32_t line) 1453 { 1454 void *vaddr_aligned; 1455 uint32_t align_alloc_size; 1456 1457 *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func, 1458 line); 1459 if (!*vaddr_unaligned) { 1460 qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line); 1461 return NULL; 1462 } 1463 1464 *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned); 1465 1466 /* Re-allocate additional bytes to align base address only if 1467 * above allocation returns unaligned address. Reason for 1468 * trying exact size allocation above is, OS tries to allocate 1469 * blocks of size power-of-2 pages and then free extra pages. 1470 * e.g., of a ring size of 1MB, the allocation below will 1471 * request 1MB plus 7 bytes for alignment, which will cause a 1472 * 2MB block allocation,and that is failing sometimes due to 1473 * memory fragmentation. 1474 */ 1475 if ((unsigned long)(*paddr_unaligned) & (align - 1)) { 1476 align_alloc_size = *size + align - 1; 1477 1478 qdf_mem_free(*vaddr_unaligned); 1479 *vaddr_unaligned = qdf_mem_malloc_fl( 1480 (qdf_size_t)align_alloc_size, func, line); 1481 if (!*vaddr_unaligned) { 1482 qdf_warn("Failed to alloc %uB @ %s:%d", 1483 align_alloc_size, func, line); 1484 return NULL; 1485 } 1486 1487 *paddr_unaligned = qdf_mem_virt_to_phys( 1488 *vaddr_unaligned); 1489 *size = align_alloc_size; 1490 } 1491 1492 *paddr_aligned = (qdf_dma_addr_t)qdf_align 1493 ((unsigned long)(*paddr_unaligned), align); 1494 1495 vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) + 1496 ((unsigned long)(*paddr_aligned) - 1497 (unsigned long)(*paddr_unaligned))); 1498 1499 return vaddr_aligned; 1500 } 1501 1502 qdf_export_symbol(qdf_aligned_malloc_fl); 1503 1504 /** 1505 * qdf_mem_multi_page_link() - Make links for multi page elements 1506 * @osdev: OS device handle pointer 1507 * @pages: Multi page information storage 1508 * @elem_size: Single element size 1509 * @elem_count: elements count should be linked 1510 * @cacheable: Coherent memory or cacheable memory 1511 * 1512 * This function will make links for multi page allocated structure 1513 * 1514 * Return: 0 success 1515 */ 1516 int qdf_mem_multi_page_link(qdf_device_t osdev, 1517 struct qdf_mem_multi_page_t *pages, 1518 uint32_t elem_size, uint32_t elem_count, uint8_t cacheable) 1519 { 1520 uint16_t i, i_int; 1521 void *page_info; 1522 void **c_elem = NULL; 1523 uint32_t num_link = 0; 1524 1525 for (i = 0; i < pages->num_pages; i++) { 1526 if (cacheable) 1527 page_info = pages->cacheable_pages[i]; 1528 else 1529 page_info = pages->dma_pages[i].page_v_addr_start; 1530 1531 if (!page_info) 1532 return -ENOMEM; 1533 1534 c_elem = (void **)page_info; 1535 for (i_int = 0; i_int < pages->num_element_per_page; i_int++) { 1536 if (i_int == (pages->num_element_per_page - 1)) { 1537 if (cacheable) 1538 *c_elem = pages-> 1539 cacheable_pages[i + 1]; 1540 else 1541 *c_elem = pages-> 1542 dma_pages[i + 1]. 1543 page_v_addr_start; 1544 num_link++; 1545 break; 1546 } else { 1547 *c_elem = 1548 (void *)(((char *)c_elem) + elem_size); 1549 } 1550 num_link++; 1551 c_elem = (void **)*c_elem; 1552 1553 /* Last link established exit */ 1554 if (num_link == (elem_count - 1)) 1555 break; 1556 } 1557 } 1558 1559 if (c_elem) 1560 *c_elem = NULL; 1561 1562 return 0; 1563 } 1564 qdf_export_symbol(qdf_mem_multi_page_link); 1565 1566 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1567 { 1568 /* special case where dst_addr or src_addr can be NULL */ 1569 if (!num_bytes) 1570 return; 1571 1572 QDF_BUG(dst_addr); 1573 QDF_BUG(src_addr); 1574 if (!dst_addr || !src_addr) 1575 return; 1576 1577 memcpy(dst_addr, src_addr, num_bytes); 1578 } 1579 qdf_export_symbol(qdf_mem_copy); 1580 1581 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size) 1582 { 1583 qdf_shared_mem_t *shared_mem; 1584 qdf_dma_addr_t dma_addr, paddr; 1585 int ret; 1586 1587 shared_mem = qdf_mem_malloc(sizeof(*shared_mem)); 1588 if (!shared_mem) 1589 return NULL; 1590 1591 shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev, 1592 size, qdf_mem_get_dma_addr_ptr(osdev, 1593 &shared_mem->mem_info)); 1594 if (!shared_mem->vaddr) { 1595 qdf_err("Unable to allocate DMA memory for shared resource"); 1596 qdf_mem_free(shared_mem); 1597 return NULL; 1598 } 1599 1600 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size); 1601 size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info); 1602 1603 qdf_mem_zero(shared_mem->vaddr, size); 1604 dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info); 1605 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr); 1606 1607 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr); 1608 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable, 1609 shared_mem->vaddr, dma_addr, size); 1610 if (ret) { 1611 qdf_err("Unable to get DMA sgtable"); 1612 qdf_mem_free_consistent(osdev, osdev->dev, 1613 shared_mem->mem_info.size, 1614 shared_mem->vaddr, 1615 dma_addr, 1616 qdf_get_dma_mem_context(shared_mem, 1617 memctx)); 1618 qdf_mem_free(shared_mem); 1619 return NULL; 1620 } 1621 1622 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable); 1623 1624 return shared_mem; 1625 } 1626 1627 qdf_export_symbol(qdf_mem_shared_mem_alloc); 1628 1629 /** 1630 * qdf_mem_copy_toio() - copy memory 1631 * @dst_addr: Pointer to destination memory location (to copy to) 1632 * @src_addr: Pointer to source memory location (to copy from) 1633 * @num_bytes: Number of bytes to copy. 1634 * 1635 * Return: none 1636 */ 1637 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1638 { 1639 if (0 == num_bytes) { 1640 /* special case where dst_addr or src_addr can be NULL */ 1641 return; 1642 } 1643 1644 if ((!dst_addr) || (!src_addr)) { 1645 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 1646 "%s called with NULL parameter, source:%pK destination:%pK", 1647 __func__, src_addr, dst_addr); 1648 QDF_ASSERT(0); 1649 return; 1650 } 1651 memcpy_toio(dst_addr, src_addr, num_bytes); 1652 } 1653 1654 qdf_export_symbol(qdf_mem_copy_toio); 1655 1656 /** 1657 * qdf_mem_set_io() - set (fill) memory with a specified byte value. 1658 * @ptr: Pointer to memory that will be set 1659 * @value: Byte set in memory 1660 * @num_bytes: Number of bytes to be set 1661 * 1662 * Return: None 1663 */ 1664 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value) 1665 { 1666 if (!ptr) { 1667 qdf_print("%s called with NULL parameter ptr", __func__); 1668 return; 1669 } 1670 memset_io(ptr, value, num_bytes); 1671 } 1672 1673 qdf_export_symbol(qdf_mem_set_io); 1674 1675 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value) 1676 { 1677 QDF_BUG(ptr); 1678 if (!ptr) 1679 return; 1680 1681 memset(ptr, value, num_bytes); 1682 } 1683 qdf_export_symbol(qdf_mem_set); 1684 1685 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes) 1686 { 1687 /* special case where dst_addr or src_addr can be NULL */ 1688 if (!num_bytes) 1689 return; 1690 1691 QDF_BUG(dst_addr); 1692 QDF_BUG(src_addr); 1693 if (!dst_addr || !src_addr) 1694 return; 1695 1696 memmove(dst_addr, src_addr, num_bytes); 1697 } 1698 qdf_export_symbol(qdf_mem_move); 1699 1700 int qdf_mem_cmp(const void *left, const void *right, size_t size) 1701 { 1702 QDF_BUG(left); 1703 QDF_BUG(right); 1704 1705 return memcmp(left, right, size); 1706 } 1707 qdf_export_symbol(qdf_mem_cmp); 1708 1709 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) 1710 /** 1711 * qdf_mem_dma_alloc() - allocates memory for dma 1712 * @osdev: OS device handle 1713 * @dev: Pointer to device handle 1714 * @size: Size to be allocated 1715 * @phy_addr: Physical address 1716 * 1717 * Return: pointer of allocated memory or null if memory alloc fails 1718 */ 1719 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, 1720 qdf_size_t size, 1721 qdf_dma_addr_t *phy_addr) 1722 { 1723 void *vaddr; 1724 1725 vaddr = qdf_mem_malloc(size); 1726 *phy_addr = ((uintptr_t) vaddr); 1727 /* using this type conversion to suppress "cast from pointer to integer 1728 * of different size" warning on some platforms 1729 */ 1730 BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr)); 1731 return vaddr; 1732 } 1733 1734 #elif defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \ 1735 !defined(QCA_WIFI_QCN9000) 1736 1737 #define QCA8074_RAM_BASE 0x50000000 1738 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10 1739 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size, 1740 qdf_dma_addr_t *phy_addr) 1741 { 1742 void *vaddr = NULL; 1743 int i; 1744 1745 *phy_addr = 0; 1746 1747 for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) { 1748 vaddr = dma_alloc_coherent(dev, size, phy_addr, 1749 qdf_mem_malloc_flags()); 1750 1751 if (!vaddr) { 1752 qdf_err("%s failed , size: %zu!", __func__, size); 1753 return NULL; 1754 } 1755 1756 if (*phy_addr >= QCA8074_RAM_BASE) 1757 return vaddr; 1758 1759 dma_free_coherent(dev, size, vaddr, *phy_addr); 1760 } 1761 1762 return NULL; 1763 } 1764 1765 #else 1766 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, 1767 qdf_size_t size, qdf_dma_addr_t *paddr) 1768 { 1769 return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags()); 1770 } 1771 #endif 1772 1773 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB) 1774 static inline void 1775 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) 1776 { 1777 qdf_mem_free(vaddr); 1778 } 1779 #else 1780 1781 static inline void 1782 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr) 1783 { 1784 dma_free_coherent(dev, size, vaddr, paddr); 1785 } 1786 #endif 1787 1788 #ifdef MEMORY_DEBUG 1789 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev, 1790 qdf_size_t size, qdf_dma_addr_t *paddr, 1791 const char *func, uint32_t line, 1792 void *caller) 1793 { 1794 QDF_STATUS status; 1795 enum qdf_debug_domain current_domain = qdf_debug_domain_get(); 1796 qdf_list_t *mem_list = qdf_mem_dma_list(current_domain); 1797 struct qdf_mem_header *header; 1798 void *vaddr; 1799 1800 if (!size || size > QDF_MEM_MAX_MALLOC) { 1801 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line); 1802 return NULL; 1803 } 1804 1805 vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE, 1806 paddr); 1807 1808 if (!vaddr) { 1809 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line); 1810 return NULL; 1811 } 1812 1813 header = qdf_mem_dma_get_header(vaddr, size); 1814 /* For DMA buffers we only add trailers, this function will init 1815 * the header structure at the tail 1816 * Prefix the header into DMA buffer causes SMMU faults, so 1817 * do not prefix header into the DMA buffers 1818 */ 1819 qdf_mem_header_init(header, size, func, line, caller); 1820 1821 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); 1822 status = qdf_list_insert_front(mem_list, &header->node); 1823 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); 1824 if (QDF_IS_STATUS_ERROR(status)) 1825 qdf_err("Failed to insert memory header; status %d", status); 1826 1827 qdf_mem_dma_inc(size); 1828 1829 return vaddr; 1830 } 1831 qdf_export_symbol(qdf_mem_alloc_consistent_debug); 1832 1833 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev, 1834 qdf_size_t size, void *vaddr, 1835 qdf_dma_addr_t paddr, 1836 qdf_dma_context_t memctx, 1837 const char *func, uint32_t line) 1838 { 1839 enum qdf_debug_domain domain = qdf_debug_domain_get(); 1840 struct qdf_mem_header *header; 1841 enum qdf_mem_validation_bitmap error_bitmap; 1842 1843 /* freeing a null pointer is valid */ 1844 if (qdf_unlikely(!vaddr)) 1845 return; 1846 1847 qdf_talloc_assert_no_children_fl(vaddr, func, line); 1848 1849 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock); 1850 /* For DMA buffers we only add trailers, this function will retrieve 1851 * the header structure at the tail 1852 * Prefix the header into DMA buffer causes SMMU faults, so 1853 * do not prefix header into the DMA buffers 1854 */ 1855 header = qdf_mem_dma_get_header(vaddr, size); 1856 error_bitmap = qdf_mem_header_validate(header, domain); 1857 if (!error_bitmap) { 1858 header->freed = true; 1859 qdf_list_remove_node(qdf_mem_dma_list(header->domain), 1860 &header->node); 1861 } 1862 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock); 1863 1864 qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line); 1865 1866 qdf_mem_dma_dec(header->size); 1867 qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr); 1868 } 1869 qdf_export_symbol(qdf_mem_free_consistent_debug); 1870 1871 #else 1872 1873 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, 1874 qdf_size_t size, qdf_dma_addr_t *paddr) 1875 { 1876 void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr); 1877 1878 if (vaddr) 1879 qdf_mem_dma_inc(size); 1880 1881 return vaddr; 1882 } 1883 qdf_export_symbol(qdf_mem_alloc_consistent); 1884 1885 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, 1886 qdf_size_t size, void *vaddr, 1887 qdf_dma_addr_t paddr, qdf_dma_context_t memctx) 1888 { 1889 qdf_mem_dma_dec(size); 1890 qdf_mem_dma_free(dev, size, vaddr, paddr); 1891 } 1892 qdf_export_symbol(qdf_mem_free_consistent); 1893 1894 #endif /* MEMORY_DEBUG */ 1895 1896 void *qdf_aligned_mem_alloc_consistent_fl( 1897 qdf_device_t osdev, uint32_t *size, 1898 void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned, 1899 qdf_dma_addr_t *paddr_aligned, uint32_t align, 1900 const char *func, uint32_t line) 1901 { 1902 void *vaddr_aligned; 1903 uint32_t align_alloc_size; 1904 1905 *vaddr_unaligned = qdf_mem_alloc_consistent( 1906 osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned); 1907 if (!*vaddr_unaligned) { 1908 qdf_warn("Failed to alloc %uB @ %s:%d", 1909 *size, func, line); 1910 return NULL; 1911 } 1912 1913 /* Re-allocate additional bytes to align base address only if 1914 * above allocation returns unaligned address. Reason for 1915 * trying exact size allocation above is, OS tries to allocate 1916 * blocks of size power-of-2 pages and then free extra pages. 1917 * e.g., of a ring size of 1MB, the allocation below will 1918 * request 1MB plus 7 bytes for alignment, which will cause a 1919 * 2MB block allocation,and that is failing sometimes due to 1920 * memory fragmentation. 1921 */ 1922 if ((unsigned long)(*paddr_unaligned) & (align - 1)) { 1923 align_alloc_size = *size + align - 1; 1924 1925 qdf_mem_free_consistent(osdev, osdev->dev, *size, 1926 *vaddr_unaligned, 1927 *paddr_unaligned, 0); 1928 1929 *vaddr_unaligned = qdf_mem_alloc_consistent( 1930 osdev, osdev->dev, align_alloc_size, 1931 paddr_unaligned); 1932 if (!*vaddr_unaligned) { 1933 qdf_warn("Failed to alloc %uB @ %s:%d", 1934 align_alloc_size, func, line); 1935 return NULL; 1936 } 1937 1938 *size = align_alloc_size; 1939 } 1940 1941 *paddr_aligned = (qdf_dma_addr_t)qdf_align( 1942 (unsigned long)(*paddr_unaligned), align); 1943 1944 vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) + 1945 ((unsigned long)(*paddr_aligned) - 1946 (unsigned long)(*paddr_unaligned))); 1947 1948 return vaddr_aligned; 1949 } 1950 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl); 1951 1952 /** 1953 * qdf_mem_dma_sync_single_for_device() - assign memory to device 1954 * @osdev: OS device handle 1955 * @bus_addr: dma address to give to the device 1956 * @size: Size of the memory block 1957 * @direction: direction data will be DMAed 1958 * 1959 * Assign memory to the remote device. 1960 * The cache lines are flushed to ram or invalidated as needed. 1961 * 1962 * Return: none 1963 */ 1964 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, 1965 qdf_dma_addr_t bus_addr, 1966 qdf_size_t size, 1967 enum dma_data_direction direction) 1968 { 1969 dma_sync_single_for_device(osdev->dev, bus_addr, size, direction); 1970 } 1971 qdf_export_symbol(qdf_mem_dma_sync_single_for_device); 1972 1973 /** 1974 * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU 1975 * @osdev: OS device handle 1976 * @bus_addr: dma address to give to the cpu 1977 * @size: Size of the memory block 1978 * @direction: direction data will be DMAed 1979 * 1980 * Assign memory to the CPU. 1981 * 1982 * Return: none 1983 */ 1984 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev, 1985 qdf_dma_addr_t bus_addr, 1986 qdf_size_t size, 1987 enum dma_data_direction direction) 1988 { 1989 dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction); 1990 } 1991 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu); 1992 1993 void qdf_mem_init(void) 1994 { 1995 qdf_mem_debug_init(); 1996 qdf_net_buf_debug_init(); 1997 qdf_mem_debugfs_init(); 1998 qdf_mem_debug_debugfs_init(); 1999 } 2000 qdf_export_symbol(qdf_mem_init); 2001 2002 void qdf_mem_exit(void) 2003 { 2004 qdf_mem_debug_debugfs_exit(); 2005 qdf_mem_debugfs_exit(); 2006 qdf_net_buf_debug_exit(); 2007 qdf_mem_debug_exit(); 2008 } 2009 qdf_export_symbol(qdf_mem_exit); 2010 2011 /** 2012 * qdf_ether_addr_copy() - copy an Ethernet address 2013 * 2014 * @dst_addr: A six-byte array Ethernet address destination 2015 * @src_addr: A six-byte array Ethernet address source 2016 * 2017 * Please note: dst & src must both be aligned to u16. 2018 * 2019 * Return: none 2020 */ 2021 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr) 2022 { 2023 if ((!dst_addr) || (!src_addr)) { 2024 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, 2025 "%s called with NULL parameter, source:%pK destination:%pK", 2026 __func__, src_addr, dst_addr); 2027 QDF_ASSERT(0); 2028 return; 2029 } 2030 ether_addr_copy(dst_addr, src_addr); 2031 } 2032 qdf_export_symbol(qdf_ether_addr_copy); 2033 2034