xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 1f55ed1a9f5050d8da228aa8dd3fff7c0242aa71)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 
38 #ifdef CONFIG_MCL
39 #include <host_diag_core_event.h>
40 #else
41 #define host_log_low_resource_failure(code) do {} while (0)
42 #endif
43 
44 #if defined(CONFIG_CNSS)
45 #include <net/cnss.h>
46 #endif
47 
48 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
49 #include <net/cnss_prealloc.h>
50 #endif
51 
52 #ifdef MEMORY_DEBUG
53 #include "qdf_debug_domain.h"
54 #include <qdf_list.h>
55 
56 /* Preprocessor Definitions and Constants */
57 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
58 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
59 #define QDF_DEBUG_STRING_SIZE 512
60 
61 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
62 static qdf_spinlock_t qdf_mem_list_lock;
63 
64 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
65 static qdf_spinlock_t qdf_mem_dma_list_lock;
66 
67 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
68 {
69 	return &qdf_mem_domains[domain];
70 }
71 
72 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
73 {
74 	return &qdf_mem_dma_domains[domain];
75 }
76 
77 /**
78  * struct qdf_mem_header - memory object to dubug
79  * @node: node to the list
80  * @domain: the active memory domain at time of allocation
81  * @freed: flag set during free, used to detect double frees
82  *	Use uint8_t so we can detect corruption
83  * @func: name of the function the allocation was made from
84  * @line: line number of the file the allocation was made from
85  * @size: size of the allocation in bytes
86  * @caller: Caller of the function for which memory is allocated
87  * @header: a known value, used to detect out-of-bounds access
88  * @time: timestamp at which allocation was made
89  */
90 struct qdf_mem_header {
91 	qdf_list_node_t node;
92 	enum qdf_debug_domain domain;
93 	uint8_t freed;
94 	char func[QDF_MEM_FUNC_NAME_SIZE];
95 	uint32_t line;
96 	uint32_t size;
97 	void *caller;
98 	uint64_t header;
99 	uint64_t time;
100 };
101 
102 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
103 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
104 
105 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
106 {
107 	return (struct qdf_mem_header *)ptr - 1;
108 }
109 
110 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
111 							    qdf_size_t size)
112 {
113 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
114 }
115 
116 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
117 {
118 	return (uint64_t *)((void *)(header + 1) + header->size);
119 }
120 
121 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
122 {
123 	return (void *)(header + 1);
124 }
125 
126 /* number of bytes needed for the qdf memory debug information */
127 #define QDF_MEM_DEBUG_SIZE \
128 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
129 
130 /* number of bytes needed for the qdf dma memory debug information */
131 #define QDF_DMA_MEM_DEBUG_SIZE \
132 	(sizeof(struct qdf_mem_header))
133 
134 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
135 {
136 	QDF_BUG(header);
137 	if (!header)
138 		return;
139 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
140 }
141 
142 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
143 				const char *func, uint32_t line, void *caller)
144 {
145 	QDF_BUG(header);
146 	if (!header)
147 		return;
148 
149 	header->domain = qdf_debug_domain_get();
150 	header->freed = false;
151 
152 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
153 
154 	header->line = line;
155 	header->size = size;
156 	header->caller = caller;
157 	header->header = WLAN_MEM_HEADER;
158 	header->time = qdf_get_log_timestamp();
159 }
160 
161 enum qdf_mem_validation_bitmap {
162 	QDF_MEM_BAD_HEADER = 1 << 0,
163 	QDF_MEM_BAD_TRAILER = 1 << 1,
164 	QDF_MEM_BAD_SIZE = 1 << 2,
165 	QDF_MEM_DOUBLE_FREE = 1 << 3,
166 	QDF_MEM_BAD_FREED = 1 << 4,
167 	QDF_MEM_BAD_NODE = 1 << 5,
168 	QDF_MEM_BAD_DOMAIN = 1 << 6,
169 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
170 };
171 
172 static enum qdf_mem_validation_bitmap
173 qdf_mem_trailer_validate(struct qdf_mem_header *header)
174 {
175 	enum qdf_mem_validation_bitmap error_bitmap = 0;
176 
177 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
178 		error_bitmap |= QDF_MEM_BAD_TRAILER;
179 	return error_bitmap;
180 }
181 
182 static enum qdf_mem_validation_bitmap
183 qdf_mem_header_validate(struct qdf_mem_header *header,
184 			enum qdf_debug_domain domain)
185 {
186 	enum qdf_mem_validation_bitmap error_bitmap = 0;
187 
188 	if (header->header != WLAN_MEM_HEADER)
189 		error_bitmap |= QDF_MEM_BAD_HEADER;
190 
191 	if (header->size > QDF_MEM_MAX_MALLOC)
192 		error_bitmap |= QDF_MEM_BAD_SIZE;
193 
194 	if (header->freed == true)
195 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
196 	else if (header->freed)
197 		error_bitmap |= QDF_MEM_BAD_FREED;
198 
199 	if (!qdf_list_node_in_any_list(&header->node))
200 		error_bitmap |= QDF_MEM_BAD_NODE;
201 
202 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
203 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
204 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
205 	else if (header->domain != domain)
206 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
207 
208 	return error_bitmap;
209 }
210 
211 static void
212 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
213 			    enum qdf_debug_domain current_domain,
214 			    enum qdf_mem_validation_bitmap error_bitmap,
215 			    const char *func,
216 			    uint32_t line)
217 {
218 	if (!error_bitmap)
219 		return;
220 
221 	if (error_bitmap & QDF_MEM_BAD_HEADER)
222 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
223 			header->header, WLAN_MEM_HEADER);
224 
225 	if (error_bitmap & QDF_MEM_BAD_SIZE)
226 		qdf_err("Corrupted memory size %u (expected < %d)",
227 			header->size, QDF_MEM_MAX_MALLOC);
228 
229 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
230 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
231 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
232 
233 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
234 		qdf_err("Memory has previously been freed");
235 
236 	if (error_bitmap & QDF_MEM_BAD_FREED)
237 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
238 
239 	if (error_bitmap & QDF_MEM_BAD_NODE)
240 		qdf_err("Corrupted memory header node or double free");
241 
242 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
243 		qdf_err("Corrupted memory domain 0x%x", header->domain);
244 
245 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
246 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
247 			qdf_debug_domain_name(header->domain), header->domain,
248 			qdf_debug_domain_name(current_domain), current_domain);
249 
250 	QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
251 }
252 #endif /* MEMORY_DEBUG */
253 
254 u_int8_t prealloc_disabled = 1;
255 qdf_declare_param(prealloc_disabled, byte);
256 qdf_export_symbol(prealloc_disabled);
257 
258 #if defined WLAN_DEBUGFS
259 
260 /* Debugfs root directory for qdf_mem */
261 static struct dentry *qdf_mem_debugfs_root;
262 
263 /**
264  * struct __qdf_mem_stat - qdf memory statistics
265  * @kmalloc:	total kmalloc allocations
266  * @dma:	total dma allocations
267  * @skb:	total skb allocations
268  */
269 static struct __qdf_mem_stat {
270 	qdf_atomic_t kmalloc;
271 	qdf_atomic_t dma;
272 	qdf_atomic_t skb;
273 } qdf_mem_stat;
274 
275 void qdf_mem_kmalloc_inc(qdf_size_t size)
276 {
277 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
278 }
279 
280 static void qdf_mem_dma_inc(qdf_size_t size)
281 {
282 	qdf_atomic_add(size, &qdf_mem_stat.dma);
283 }
284 
285 void qdf_mem_skb_inc(qdf_size_t size)
286 {
287 	qdf_atomic_add(size, &qdf_mem_stat.skb);
288 }
289 
290 void qdf_mem_kmalloc_dec(qdf_size_t size)
291 {
292 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
293 }
294 
295 static inline void qdf_mem_dma_dec(qdf_size_t size)
296 {
297 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
298 }
299 
300 void qdf_mem_skb_dec(qdf_size_t size)
301 {
302 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
303 }
304 
305 #ifdef MEMORY_DEBUG
306 static int qdf_err_printer(void *priv, const char *fmt, ...)
307 {
308 	va_list args;
309 
310 	va_start(args, fmt);
311 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
312 	va_end(args);
313 
314 	return 0;
315 }
316 
317 static int seq_printf_printer(void *priv, const char *fmt, ...)
318 {
319 	struct seq_file *file = priv;
320 	va_list args;
321 
322 	va_start(args, fmt);
323 	seq_vprintf(file, fmt, args);
324 	seq_puts(file, "\n");
325 	va_end(args);
326 
327 	return 0;
328 }
329 
330 /**
331  * struct __qdf_mem_info - memory statistics
332  * @func: the function which allocated memory
333  * @line: the line at which allocation happened
334  * @size: the size of allocation
335  * @caller: Address of the caller function
336  * @count: how many allocations of same type
337  * @time: timestamp at which allocation happened
338  */
339 struct __qdf_mem_info {
340 	char func[QDF_MEM_FUNC_NAME_SIZE];
341 	uint32_t line;
342 	uint32_t size;
343 	void *caller;
344 	uint32_t count;
345 	uint64_t time;
346 };
347 
348 /*
349  * The table depth defines the de-duplication proximity scope.
350  * A deeper table takes more time, so choose any optimum value.
351  */
352 #define QDF_MEM_STAT_TABLE_SIZE 8
353 
354 /**
355  * qdf_mem_domain_print_header() - memory domain header print logic
356  * @print: the print adapter function
357  * @print_priv: the private data to be consumed by @print
358  *
359  * Return: None
360  */
361 static void qdf_mem_domain_print_header(qdf_abstract_print print,
362 					void *print_priv)
363 {
364 	print(print_priv,
365 	      "--------------------------------------------------------------");
366 	print(print_priv,
367 	      " count    size     total    filename     caller    timestamp");
368 	print(print_priv,
369 	      "--------------------------------------------------------------");
370 }
371 
372 /**
373  * qdf_mem_meta_table_print() - memory metadata table print logic
374  * @table: the memory metadata table to print
375  * @print: the print adapter function
376  * @print_priv: the private data to be consumed by @print
377  *
378  * Return: None
379  */
380 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
381 				     qdf_abstract_print print,
382 				     void *print_priv)
383 {
384 	int i;
385 	char debug_str[QDF_DEBUG_STRING_SIZE];
386 	size_t len = 0;
387 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
388 
389 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
390 			     "%s", debug_prefix);
391 
392 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
393 		if (!table[i].count)
394 			break;
395 
396 		print(print_priv,
397 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
398 		      table[i].count,
399 		      table[i].size,
400 		      table[i].count * table[i].size,
401 		      table[i].func,
402 		      table[i].line, table[i].caller,
403 		      table[i].time);
404 		len += qdf_scnprintf(debug_str + len,
405 				     sizeof(debug_str) - len,
406 				     " @ %s:%u %pS",
407 				     table[i].func,
408 				     table[i].line,
409 				     table[i].caller);
410 	}
411 	print(print_priv, "%s", debug_str);
412 }
413 
414 /**
415  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
416  * @table: the memory metadata table to insert into
417  * @meta: the memory metadata to insert
418  *
419  * Return: true if the table is full after inserting, false otherwise
420  */
421 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
422 				      struct qdf_mem_header *meta)
423 {
424 	int i;
425 
426 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
427 		if (!table[i].count) {
428 			qdf_str_lcopy(table[i].func, meta->func,
429 				      QDF_MEM_FUNC_NAME_SIZE);
430 			table[i].line = meta->line;
431 			table[i].size = meta->size;
432 			table[i].count = 1;
433 			table[i].caller = meta->caller;
434 			table[i].time = meta->time;
435 			break;
436 		}
437 
438 		if (qdf_str_eq(table[i].func, meta->func) &&
439 		    table[i].line == meta->line &&
440 		    table[i].size == meta->size &&
441 		    table[i].caller == meta->caller) {
442 			table[i].count++;
443 			break;
444 		}
445 	}
446 
447 	/* return true if the table is now full */
448 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
449 }
450 
451 /**
452  * qdf_mem_domain_print() - output agnostic memory domain print logic
453  * @domain: the memory domain to print
454  * @print: the print adapter function
455  * @print_priv: the private data to be consumed by @print
456  *
457  * Return: None
458  */
459 static void qdf_mem_domain_print(qdf_list_t *domain,
460 				 qdf_abstract_print print,
461 				 void *print_priv)
462 {
463 	QDF_STATUS status;
464 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
465 	qdf_list_node_t *node;
466 
467 	qdf_mem_zero(table, sizeof(table));
468 	qdf_mem_domain_print_header(print, print_priv);
469 
470 	/* hold lock while inserting to avoid use-after free of the metadata */
471 	qdf_spin_lock(&qdf_mem_list_lock);
472 	status = qdf_list_peek_front(domain, &node);
473 	while (QDF_IS_STATUS_SUCCESS(status)) {
474 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
475 		bool is_full = qdf_mem_meta_table_insert(table, meta);
476 
477 		qdf_spin_unlock(&qdf_mem_list_lock);
478 
479 		if (is_full) {
480 			qdf_mem_meta_table_print(table, print, print_priv);
481 			qdf_mem_zero(table, sizeof(table));
482 		}
483 
484 		qdf_spin_lock(&qdf_mem_list_lock);
485 		status = qdf_list_peek_next(domain, node, &node);
486 	}
487 	qdf_spin_unlock(&qdf_mem_list_lock);
488 
489 	qdf_mem_meta_table_print(table, print, print_priv);
490 }
491 
492 /**
493  * qdf_mem_seq_start() - sequential callback to start
494  * @seq: seq_file handle
495  * @pos: The start position of the sequence
496  *
497  * Return: iterator pointer, or NULL if iteration is complete
498  */
499 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
500 {
501 	enum qdf_debug_domain domain = *pos;
502 
503 	if (!qdf_debug_domain_valid(domain))
504 		return NULL;
505 
506 	/* just use the current position as our iterator */
507 	return pos;
508 }
509 
510 /**
511  * qdf_mem_seq_next() - next sequential callback
512  * @seq: seq_file handle
513  * @v: the current iterator
514  * @pos: the current position
515  *
516  * Get the next node and release previous node.
517  *
518  * Return: iterator pointer, or NULL if iteration is complete
519  */
520 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
521 {
522 	++*pos;
523 
524 	return qdf_mem_seq_start(seq, pos);
525 }
526 
527 /**
528  * qdf_mem_seq_stop() - stop sequential callback
529  * @seq: seq_file handle
530  * @v: current iterator
531  *
532  * Return: None
533  */
534 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
535 
536 /**
537  * qdf_mem_seq_show() - print sequential callback
538  * @seq: seq_file handle
539  * @v: current iterator
540  *
541  * Return: 0 - success
542  */
543 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
544 {
545 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
546 
547 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
548 		   qdf_debug_domain_name(domain_id), domain_id);
549 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
550 			     seq_printf_printer, seq);
551 
552 	return 0;
553 }
554 
555 /* sequential file operation table */
556 static const struct seq_operations qdf_mem_seq_ops = {
557 	.start = qdf_mem_seq_start,
558 	.next  = qdf_mem_seq_next,
559 	.stop  = qdf_mem_seq_stop,
560 	.show  = qdf_mem_seq_show,
561 };
562 
563 
564 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
565 {
566 	return seq_open(file, &qdf_mem_seq_ops);
567 }
568 
569 /* debugfs file operation table */
570 static const struct file_operations fops_qdf_mem_debugfs = {
571 	.owner = THIS_MODULE,
572 	.open = qdf_mem_debugfs_open,
573 	.read = seq_read,
574 	.llseek = seq_lseek,
575 	.release = seq_release,
576 };
577 
578 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
579 {
580 	if (!qdf_mem_debugfs_root)
581 		return QDF_STATUS_E_FAILURE;
582 
583 	debugfs_create_file("list",
584 			    S_IRUSR,
585 			    qdf_mem_debugfs_root,
586 			    NULL,
587 			    &fops_qdf_mem_debugfs);
588 
589 	return QDF_STATUS_SUCCESS;
590 }
591 
592 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
593 {
594 	return QDF_STATUS_SUCCESS;
595 }
596 
597 #else /* MEMORY_DEBUG */
598 
599 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
600 {
601 	return QDF_STATUS_E_NOSUPPORT;
602 }
603 
604 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
605 {
606 	return QDF_STATUS_E_NOSUPPORT;
607 }
608 
609 #endif /* MEMORY_DEBUG */
610 
611 
612 static void qdf_mem_debugfs_exit(void)
613 {
614 	debugfs_remove_recursive(qdf_mem_debugfs_root);
615 	qdf_mem_debugfs_root = NULL;
616 }
617 
618 static QDF_STATUS qdf_mem_debugfs_init(void)
619 {
620 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
621 
622 	if (!qdf_debugfs_root)
623 		return QDF_STATUS_E_FAILURE;
624 
625 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
626 
627 	if (!qdf_mem_debugfs_root)
628 		return QDF_STATUS_E_FAILURE;
629 
630 
631 	debugfs_create_atomic_t("kmalloc",
632 				S_IRUSR,
633 				qdf_mem_debugfs_root,
634 				&qdf_mem_stat.kmalloc);
635 
636 	debugfs_create_atomic_t("dma",
637 				S_IRUSR,
638 				qdf_mem_debugfs_root,
639 				&qdf_mem_stat.dma);
640 
641 	debugfs_create_atomic_t("skb",
642 				S_IRUSR,
643 				qdf_mem_debugfs_root,
644 				&qdf_mem_stat.skb);
645 
646 	return QDF_STATUS_SUCCESS;
647 }
648 
649 #else /* WLAN_DEBUGFS */
650 
651 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
652 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
653 
654 static QDF_STATUS qdf_mem_debugfs_init(void)
655 {
656 	return QDF_STATUS_E_NOSUPPORT;
657 }
658 static void qdf_mem_debugfs_exit(void) {}
659 
660 
661 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
662 {
663 	return QDF_STATUS_E_NOSUPPORT;
664 }
665 
666 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
667 {
668 	return QDF_STATUS_E_NOSUPPORT;
669 }
670 
671 #endif /* WLAN_DEBUGFS */
672 
673 /**
674  * __qdf_mempool_init() - Create and initialize memory pool
675  *
676  * @osdev: platform device object
677  * @pool_addr: address of the pool created
678  * @elem_cnt: no. of elements in pool
679  * @elem_size: size of each pool element in bytes
680  * @flags: flags
681  *
682  * return: Handle to memory pool or NULL if allocation failed
683  */
684 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
685 		       int elem_cnt, size_t elem_size, u_int32_t flags)
686 {
687 	__qdf_mempool_ctxt_t *new_pool = NULL;
688 	u_int32_t align = L1_CACHE_BYTES;
689 	unsigned long aligned_pool_mem;
690 	int pool_id;
691 	int i;
692 
693 	if (prealloc_disabled) {
694 		/* TBD: We can maintain a list of pools in qdf_device_t
695 		 * to help debugging
696 		 * when pre-allocation is not enabled
697 		 */
698 		new_pool = (__qdf_mempool_ctxt_t *)
699 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
700 		if (new_pool == NULL)
701 			return QDF_STATUS_E_NOMEM;
702 
703 		memset(new_pool, 0, sizeof(*new_pool));
704 		/* TBD: define flags for zeroing buffers etc */
705 		new_pool->flags = flags;
706 		new_pool->elem_size = elem_size;
707 		new_pool->max_elem = elem_cnt;
708 		*pool_addr = new_pool;
709 		return 0;
710 	}
711 
712 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
713 		if (osdev->mem_pool[pool_id] == NULL)
714 			break;
715 	}
716 
717 	if (pool_id == MAX_MEM_POOLS)
718 		return -ENOMEM;
719 
720 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
721 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
722 	if (new_pool == NULL)
723 		return -ENOMEM;
724 
725 	memset(new_pool, 0, sizeof(*new_pool));
726 	/* TBD: define flags for zeroing buffers etc */
727 	new_pool->flags = flags;
728 	new_pool->pool_id = pool_id;
729 
730 	/* Round up the element size to cacheline */
731 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
732 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
733 				((align)?(align - 1):0);
734 
735 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
736 	if (new_pool->pool_mem == NULL) {
737 			/* TBD: Check if we need get_free_pages above */
738 		kfree(new_pool);
739 		osdev->mem_pool[pool_id] = NULL;
740 		return -ENOMEM;
741 	}
742 
743 	spin_lock_init(&new_pool->lock);
744 
745 	/* Initialize free list */
746 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
747 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
748 	STAILQ_INIT(&new_pool->free_list);
749 
750 	for (i = 0; i < elem_cnt; i++)
751 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
752 			(mempool_elem_t *)(aligned_pool_mem +
753 			(new_pool->elem_size * i)), mempool_entry);
754 
755 
756 	new_pool->free_cnt = elem_cnt;
757 	*pool_addr = new_pool;
758 	return 0;
759 }
760 qdf_export_symbol(__qdf_mempool_init);
761 
762 /**
763  * __qdf_mempool_destroy() - Destroy memory pool
764  * @osdev: platform device object
765  * @Handle: to memory pool
766  *
767  * Returns: none
768  */
769 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
770 {
771 	int pool_id = 0;
772 
773 	if (!pool)
774 		return;
775 
776 	if (prealloc_disabled) {
777 		kfree(pool);
778 		return;
779 	}
780 
781 	pool_id = pool->pool_id;
782 
783 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
784 	kfree(pool->pool_mem);
785 	kfree(pool);
786 	osdev->mem_pool[pool_id] = NULL;
787 }
788 qdf_export_symbol(__qdf_mempool_destroy);
789 
790 /**
791  * __qdf_mempool_alloc() - Allocate an element memory pool
792  *
793  * @osdev: platform device object
794  * @Handle: to memory pool
795  *
796  * Return: Pointer to the allocated element or NULL if the pool is empty
797  */
798 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
799 {
800 	void *buf = NULL;
801 
802 	if (!pool)
803 		return NULL;
804 
805 	if (prealloc_disabled)
806 		return  qdf_mem_malloc(pool->elem_size);
807 
808 	spin_lock_bh(&pool->lock);
809 
810 	buf = STAILQ_FIRST(&pool->free_list);
811 	if (buf != NULL) {
812 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
813 		pool->free_cnt--;
814 	}
815 
816 	/* TBD: Update free count if debug is enabled */
817 	spin_unlock_bh(&pool->lock);
818 
819 	return buf;
820 }
821 qdf_export_symbol(__qdf_mempool_alloc);
822 
823 /**
824  * __qdf_mempool_free() - Free a memory pool element
825  * @osdev: Platform device object
826  * @pool: Handle to memory pool
827  * @buf: Element to be freed
828  *
829  * Returns: none
830  */
831 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
832 {
833 	if (!pool)
834 		return;
835 
836 
837 	if (prealloc_disabled)
838 		return qdf_mem_free(buf);
839 
840 	spin_lock_bh(&pool->lock);
841 	pool->free_cnt++;
842 
843 	STAILQ_INSERT_TAIL
844 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
845 	spin_unlock_bh(&pool->lock);
846 }
847 qdf_export_symbol(__qdf_mempool_free);
848 
849 /**
850  * qdf_mem_alloc_outline() - allocation QDF memory
851  * @osdev: platform device object
852  * @size: Number of bytes of memory to allocate.
853  *
854  * This function will dynamicallly allocate the specified number of bytes of
855  * memory.
856  *
857  * Return:
858  * Upon successful allocate, returns a non-NULL pointer to the allocated
859  * memory.  If this function is unable to allocate the amount of memory
860  * specified (for any reason) it returns NULL.
861  */
862 void *
863 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
864 {
865 	return qdf_mem_malloc(size);
866 }
867 qdf_export_symbol(qdf_mem_alloc_outline);
868 
869 /**
870  * qdf_mem_free_outline() - QDF memory free API
871  * @ptr: Pointer to the starting address of the memory to be free'd.
872  *
873  * This function will free the memory pointed to by 'ptr'. It also checks
874  * is memory is corrupted or getting double freed and panic.
875  *
876  * Return: none
877  */
878 void
879 qdf_mem_free_outline(void *buf)
880 {
881 	qdf_mem_free(buf);
882 }
883 qdf_export_symbol(qdf_mem_free_outline);
884 
885 /**
886  * qdf_mem_zero_outline() - zero out memory
887  * @buf: pointer to memory that will be set to zero
888  * @size: number of bytes zero
889  *
890  * This function sets the memory location to all zeros, essentially clearing
891  * the memory.
892  *
893  * Return: none
894  */
895 void
896 qdf_mem_zero_outline(void *buf, qdf_size_t size)
897 {
898 	qdf_mem_zero(buf, size);
899 }
900 qdf_export_symbol(qdf_mem_zero_outline);
901 
902 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
903 /**
904  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
905  * @size: the number of bytes to allocate
906  *
907  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
908  * a chunk of pre-allocated memory. If size if less than or equal to
909  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
910  *
911  * Return: NULL on failure, non-NULL on success
912  */
913 static void *qdf_mem_prealloc_get(size_t size)
914 {
915 	void *ptr;
916 
917 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
918 		return NULL;
919 
920 	ptr = wcnss_prealloc_get(size);
921 	if (!ptr)
922 		return NULL;
923 
924 	memset(ptr, 0, size);
925 
926 	return ptr;
927 }
928 
929 static inline bool qdf_mem_prealloc_put(void *ptr)
930 {
931 	return wcnss_prealloc_put(ptr);
932 }
933 #else
934 static inline void *qdf_mem_prealloc_get(size_t size)
935 {
936 	return NULL;
937 }
938 
939 static inline bool qdf_mem_prealloc_put(void *ptr)
940 {
941 	return false;
942 }
943 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
944 
945 static int qdf_mem_malloc_flags(void)
946 {
947 	if (in_interrupt() || irqs_disabled() || in_atomic())
948 		return GFP_ATOMIC;
949 
950 	return GFP_KERNEL;
951 }
952 
953 /* External Function implementation */
954 #ifdef MEMORY_DEBUG
955 
956 /**
957  * qdf_mem_debug_init() - initialize qdf memory debug functionality
958  *
959  * Return: none
960  */
961 static void qdf_mem_debug_init(void)
962 {
963 	int i;
964 
965 	/* Initalizing the list with maximum size of 60000 */
966 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
967 		qdf_list_create(&qdf_mem_domains[i], 60000);
968 	qdf_spinlock_create(&qdf_mem_list_lock);
969 
970 	/* dma */
971 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
972 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
973 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
974 }
975 
976 static uint32_t
977 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
978 			       qdf_list_t *mem_list)
979 {
980 	if (qdf_list_empty(mem_list))
981 		return 0;
982 
983 	qdf_err("Memory leaks detected in %s domain!",
984 		qdf_debug_domain_name(domain));
985 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
986 
987 	return mem_list->count;
988 }
989 
990 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
991 {
992 	uint32_t leak_count = 0;
993 	int i;
994 
995 	/* detect and print leaks */
996 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
997 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
998 
999 	if (leak_count)
1000 		panic("%u fatal memory leaks detected!", leak_count);
1001 }
1002 
1003 /**
1004  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1005  *
1006  * Return: none
1007  */
1008 static void qdf_mem_debug_exit(void)
1009 {
1010 	int i;
1011 
1012 	/* mem */
1013 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1014 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1015 		qdf_list_destroy(qdf_mem_list_get(i));
1016 
1017 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1018 
1019 	/* dma */
1020 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1021 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1022 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1023 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1024 }
1025 
1026 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1027 			   void *caller, uint32_t flag)
1028 {
1029 	QDF_STATUS status;
1030 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1031 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1032 	struct qdf_mem_header *header;
1033 	void *ptr;
1034 	unsigned long start, duration;
1035 
1036 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1037 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1038 		return NULL;
1039 	}
1040 
1041 	ptr = qdf_mem_prealloc_get(size);
1042 	if (ptr)
1043 		return ptr;
1044 
1045 	if (!flag)
1046 		flag = qdf_mem_malloc_flags();
1047 
1048 	start = qdf_mc_timer_get_system_time();
1049 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1050 	duration = qdf_mc_timer_get_system_time() - start;
1051 
1052 	if (duration > QDF_MEM_WARN_THRESHOLD)
1053 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1054 			 duration, size, func, line);
1055 
1056 	if (!header) {
1057 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1058 		return NULL;
1059 	}
1060 
1061 	qdf_mem_header_init(header, size, func, line, caller);
1062 	qdf_mem_trailer_init(header);
1063 	ptr = qdf_mem_get_ptr(header);
1064 
1065 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1066 	status = qdf_list_insert_front(mem_list, &header->node);
1067 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1068 	if (QDF_IS_STATUS_ERROR(status))
1069 		qdf_err("Failed to insert memory header; status %d", status);
1070 
1071 	qdf_mem_kmalloc_inc(size);
1072 
1073 	return ptr;
1074 }
1075 qdf_export_symbol(qdf_mem_malloc_debug);
1076 
1077 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1078 {
1079 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1080 	struct qdf_mem_header *header;
1081 	enum qdf_mem_validation_bitmap error_bitmap;
1082 
1083 	/* freeing a null pointer is valid */
1084 	if (qdf_unlikely(!ptr))
1085 		return;
1086 
1087 	if (qdf_mem_prealloc_put(ptr))
1088 		return;
1089 
1090 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1091 		panic("Failed to free invalid memory location %pK", ptr);
1092 
1093 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1094 
1095 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1096 	header = qdf_mem_get_header(ptr);
1097 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1098 	error_bitmap |= qdf_mem_trailer_validate(header);
1099 
1100 	if (!error_bitmap) {
1101 		header->freed = true;
1102 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1103 				     &header->node);
1104 	}
1105 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1106 
1107 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1108 				    func, line);
1109 
1110 	qdf_mem_kmalloc_dec(header->size);
1111 	kfree(header);
1112 }
1113 qdf_export_symbol(qdf_mem_free_debug);
1114 
1115 void qdf_mem_check_for_leaks(void)
1116 {
1117 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1118 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1119 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1120 	uint32_t leaks_count = 0;
1121 
1122 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1123 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1124 
1125 	if (leaks_count)
1126 		panic("%u fatal memory leaks detected!", leaks_count);
1127 }
1128 
1129 #else
1130 static void qdf_mem_debug_init(void) {}
1131 
1132 static void qdf_mem_debug_exit(void) {}
1133 
1134 void *qdf_mem_malloc_fl(size_t size, const char *func, uint32_t line)
1135 {
1136 	void *ptr;
1137 
1138 	ptr = qdf_mem_prealloc_get(size);
1139 	if (ptr)
1140 		return ptr;
1141 
1142 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1143 	if (!ptr) {
1144 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1145 			      size, func, line);
1146 		return NULL;
1147 	}
1148 
1149 	qdf_mem_kmalloc_inc(ksize(ptr));
1150 
1151 	return ptr;
1152 }
1153 qdf_export_symbol(qdf_mem_malloc_fl);
1154 
1155 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1156 {
1157 	void *ptr;
1158 
1159 	ptr = qdf_mem_prealloc_get(size);
1160 	if (ptr)
1161 		return ptr;
1162 
1163 	ptr = kzalloc(size, GFP_ATOMIC);
1164 	if (!ptr) {
1165 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1166 			      size, func, line);
1167 		return NULL;
1168 	}
1169 
1170 	qdf_mem_kmalloc_inc(ksize(ptr));
1171 
1172 	return ptr;
1173 }
1174 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1175 
1176 /**
1177  * qdf_mem_free() - free QDF memory
1178  * @ptr: Pointer to the starting address of the memory to be free'd.
1179  *
1180  * This function will free the memory pointed to by 'ptr'.
1181  *
1182  * Return: None
1183  */
1184 void qdf_mem_free(void *ptr)
1185 {
1186 	if (!ptr)
1187 		return;
1188 
1189 	if (qdf_mem_prealloc_put(ptr))
1190 		return;
1191 
1192 	qdf_mem_kmalloc_dec(ksize(ptr));
1193 
1194 	kfree(ptr);
1195 }
1196 
1197 qdf_export_symbol(qdf_mem_free);
1198 #endif
1199 
1200 void *qdf_aligned_malloc_fl(qdf_size_t size, uint32_t ring_base_align,
1201 			    void **vaddr_unaligned,
1202 			    const char *func, uint32_t line)
1203 {
1204 	void *vaddr_aligned;
1205 
1206 	*vaddr_unaligned = qdf_mem_malloc_fl(size, func, line);
1207 	if (!*vaddr_unaligned) {
1208 		qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
1209 		return NULL;
1210 	}
1211 
1212 	if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
1213 		qdf_mem_free(*vaddr_unaligned);
1214 		*vaddr_unaligned = qdf_mem_malloc_fl(size + ring_base_align - 1,
1215 						  func, line);
1216 		if (!*vaddr_unaligned) {
1217 			qdf_warn("Failed to alloc %zuB @ %s:%d",
1218 				 size, func, line);
1219 			return NULL;
1220 		}
1221 	}
1222 
1223 	vaddr_aligned = (*vaddr_unaligned) +
1224 		((unsigned long)(*vaddr_unaligned) % ring_base_align);
1225 
1226 	return vaddr_aligned;
1227 }
1228 qdf_export_symbol(qdf_aligned_malloc_fl);
1229 
1230 /**
1231  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1232  * @osdev: OS device handle pointer
1233  * @pages: Multi page information storage
1234  * @element_size: Each element size
1235  * @element_num: Total number of elements should be allocated
1236  * @memctxt: Memory context
1237  * @cacheable: Coherent memory or cacheable memory
1238  *
1239  * This function will allocate large size of memory over multiple pages.
1240  * Large size of contiguous memory allocation will fail frequently, then
1241  * instead of allocate large memory by one shot, allocate through multiple, non
1242  * contiguous memory and combine pages when actual usage
1243  *
1244  * Return: None
1245  */
1246 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1247 			       struct qdf_mem_multi_page_t *pages,
1248 			       size_t element_size, uint16_t element_num,
1249 			       qdf_dma_context_t memctxt, bool cacheable)
1250 {
1251 	uint16_t page_idx;
1252 	struct qdf_mem_dma_page_t *dma_pages;
1253 	void **cacheable_pages = NULL;
1254 	uint16_t i;
1255 
1256 	pages->num_element_per_page = PAGE_SIZE / element_size;
1257 	if (!pages->num_element_per_page) {
1258 		qdf_print("Invalid page %d or element size %d",
1259 			  (int)PAGE_SIZE, (int)element_size);
1260 		goto out_fail;
1261 	}
1262 
1263 	pages->num_pages = element_num / pages->num_element_per_page;
1264 	if (element_num % pages->num_element_per_page)
1265 		pages->num_pages++;
1266 
1267 	if (cacheable) {
1268 		/* Pages information storage */
1269 		pages->cacheable_pages = qdf_mem_malloc(
1270 			pages->num_pages * sizeof(pages->cacheable_pages));
1271 		if (!pages->cacheable_pages) {
1272 			qdf_print("Cacheable page storage alloc fail");
1273 			goto out_fail;
1274 		}
1275 
1276 		cacheable_pages = pages->cacheable_pages;
1277 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1278 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1279 			if (!cacheable_pages[page_idx]) {
1280 				qdf_print("cacheable page alloc fail, pi %d",
1281 					  page_idx);
1282 				goto page_alloc_fail;
1283 			}
1284 		}
1285 		pages->dma_pages = NULL;
1286 	} else {
1287 		pages->dma_pages = qdf_mem_malloc(
1288 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1289 		if (!pages->dma_pages) {
1290 			qdf_print("dmaable page storage alloc fail");
1291 			goto out_fail;
1292 		}
1293 
1294 		dma_pages = pages->dma_pages;
1295 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1296 			dma_pages->page_v_addr_start =
1297 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1298 					 PAGE_SIZE,
1299 					&dma_pages->page_p_addr);
1300 			if (!dma_pages->page_v_addr_start) {
1301 				qdf_print("dmaable page alloc fail pi %d",
1302 					page_idx);
1303 				goto page_alloc_fail;
1304 			}
1305 			dma_pages->page_v_addr_end =
1306 				dma_pages->page_v_addr_start + PAGE_SIZE;
1307 			dma_pages++;
1308 		}
1309 		pages->cacheable_pages = NULL;
1310 	}
1311 	return;
1312 
1313 page_alloc_fail:
1314 	if (cacheable) {
1315 		for (i = 0; i < page_idx; i++)
1316 			qdf_mem_free(pages->cacheable_pages[i]);
1317 		qdf_mem_free(pages->cacheable_pages);
1318 	} else {
1319 		dma_pages = pages->dma_pages;
1320 		for (i = 0; i < page_idx; i++) {
1321 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1322 				dma_pages->page_v_addr_start,
1323 				dma_pages->page_p_addr, memctxt);
1324 			dma_pages++;
1325 		}
1326 		qdf_mem_free(pages->dma_pages);
1327 	}
1328 
1329 out_fail:
1330 	pages->cacheable_pages = NULL;
1331 	pages->dma_pages = NULL;
1332 	pages->num_pages = 0;
1333 	return;
1334 }
1335 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1336 
1337 /**
1338  * qdf_mem_multi_pages_free() - free large size of kernel memory
1339  * @osdev: OS device handle pointer
1340  * @pages: Multi page information storage
1341  * @memctxt: Memory context
1342  * @cacheable: Coherent memory or cacheable memory
1343  *
1344  * This function will free large size of memory over multiple pages.
1345  *
1346  * Return: None
1347  */
1348 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1349 			      struct qdf_mem_multi_page_t *pages,
1350 			      qdf_dma_context_t memctxt, bool cacheable)
1351 {
1352 	unsigned int page_idx;
1353 	struct qdf_mem_dma_page_t *dma_pages;
1354 
1355 	if (cacheable) {
1356 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1357 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1358 		qdf_mem_free(pages->cacheable_pages);
1359 	} else {
1360 		dma_pages = pages->dma_pages;
1361 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1362 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1363 				dma_pages->page_v_addr_start,
1364 				dma_pages->page_p_addr, memctxt);
1365 			dma_pages++;
1366 		}
1367 		qdf_mem_free(pages->dma_pages);
1368 	}
1369 
1370 	pages->cacheable_pages = NULL;
1371 	pages->dma_pages = NULL;
1372 	pages->num_pages = 0;
1373 	return;
1374 }
1375 qdf_export_symbol(qdf_mem_multi_pages_free);
1376 
1377 /**
1378  * qdf_mem_multi_page_link() - Make links for multi page elements
1379  * @osdev: OS device handle pointer
1380  * @pages: Multi page information storage
1381  * @elem_size: Single element size
1382  * @elem_count: elements count should be linked
1383  * @cacheable: Coherent memory or cacheable memory
1384  *
1385  * This function will make links for multi page allocated structure
1386  *
1387  * Return: 0 success
1388  */
1389 int qdf_mem_multi_page_link(qdf_device_t osdev,
1390 		struct qdf_mem_multi_page_t *pages,
1391 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1392 {
1393 	uint16_t i, i_int;
1394 	void *page_info;
1395 	void **c_elem = NULL;
1396 	uint32_t num_link = 0;
1397 
1398 	for (i = 0; i < pages->num_pages; i++) {
1399 		if (cacheable)
1400 			page_info = pages->cacheable_pages[i];
1401 		else
1402 			page_info = pages->dma_pages[i].page_v_addr_start;
1403 
1404 		if (!page_info)
1405 			return -ENOMEM;
1406 
1407 		c_elem = (void **)page_info;
1408 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1409 			if (i_int == (pages->num_element_per_page - 1)) {
1410 				if (cacheable)
1411 					*c_elem = pages->
1412 						cacheable_pages[i + 1];
1413 				else
1414 					*c_elem = pages->
1415 						dma_pages[i + 1].
1416 							page_v_addr_start;
1417 				num_link++;
1418 				break;
1419 			} else {
1420 				*c_elem =
1421 					(void *)(((char *)c_elem) + elem_size);
1422 			}
1423 			num_link++;
1424 			c_elem = (void **)*c_elem;
1425 
1426 			/* Last link established exit */
1427 			if (num_link == (elem_count - 1))
1428 				break;
1429 		}
1430 	}
1431 
1432 	if (c_elem)
1433 		*c_elem = NULL;
1434 
1435 	return 0;
1436 }
1437 qdf_export_symbol(qdf_mem_multi_page_link);
1438 
1439 /**
1440  * qdf_mem_copy() - copy memory
1441  * @dst_addr: Pointer to destination memory location (to copy to)
1442  * @src_addr: Pointer to source memory location (to copy from)
1443  * @num_bytes: Number of bytes to copy.
1444  *
1445  * Copy host memory from one location to another, similar to memcpy in
1446  * standard C.  Note this function does not specifically handle overlapping
1447  * source and destination memory locations.  Calling this function with
1448  * overlapping source and destination memory locations will result in
1449  * unpredictable results.  Use qdf_mem_move() if the memory locations
1450  * for the source and destination are overlapping (or could be overlapping!)
1451  *
1452  * Return: none
1453  */
1454 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1455 {
1456 	if (0 == num_bytes) {
1457 		/* special case where dst_addr or src_addr can be NULL */
1458 		return;
1459 	}
1460 
1461 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1462 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1463 			  "%s called with NULL parameter, source:%pK destination:%pK",
1464 			  __func__, src_addr, dst_addr);
1465 		QDF_ASSERT(0);
1466 		return;
1467 	}
1468 	memcpy(dst_addr, src_addr, num_bytes);
1469 }
1470 qdf_export_symbol(qdf_mem_copy);
1471 
1472 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1473 {
1474 	qdf_shared_mem_t *shared_mem;
1475 	qdf_dma_addr_t dma_addr, paddr;
1476 	int ret;
1477 
1478 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1479 	if (!shared_mem) {
1480 		qdf_err("Unable to allocate memory for shared resource struct");
1481 		return NULL;
1482 	}
1483 
1484 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1485 				size, qdf_mem_get_dma_addr_ptr(osdev,
1486 						&shared_mem->mem_info));
1487 	if (!shared_mem->vaddr) {
1488 		qdf_err("Unable to allocate DMA memory for shared resource");
1489 		qdf_mem_free(shared_mem);
1490 		return NULL;
1491 	}
1492 
1493 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1494 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1495 
1496 	qdf_mem_zero(shared_mem->vaddr, size);
1497 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1498 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1499 
1500 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1501 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1502 				      shared_mem->vaddr, dma_addr, size);
1503 	if (ret) {
1504 		qdf_err("Unable to get DMA sgtable");
1505 		qdf_mem_free_consistent(osdev, osdev->dev,
1506 					shared_mem->mem_info.size,
1507 					shared_mem->vaddr,
1508 					dma_addr,
1509 					qdf_get_dma_mem_context(shared_mem,
1510 								memctx));
1511 		qdf_mem_free(shared_mem);
1512 		return NULL;
1513 	}
1514 
1515 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1516 
1517 	return shared_mem;
1518 }
1519 
1520 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1521 
1522 /**
1523  * qdf_mem_zero() - zero out memory
1524  * @ptr: pointer to memory that will be set to zero
1525  * @num_bytes: number of bytes zero
1526  *
1527  * This function sets the memory location to all zeros, essentially clearing
1528  * the memory.
1529  *
1530  * Return: None
1531  */
1532 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1533 {
1534 	if (0 == num_bytes) {
1535 		/* special case where ptr can be NULL */
1536 		return;
1537 	}
1538 
1539 	if (ptr == NULL) {
1540 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1541 			  "%s called with NULL parameter ptr", __func__);
1542 		return;
1543 	}
1544 	memset(ptr, 0, num_bytes);
1545 }
1546 qdf_export_symbol(qdf_mem_zero);
1547 
1548 /**
1549  * qdf_mem_copy_toio() - copy memory
1550  * @dst_addr: Pointer to destination memory location (to copy to)
1551  * @src_addr: Pointer to source memory location (to copy from)
1552  * @num_bytes: Number of bytes to copy.
1553  *
1554  * Return: none
1555  */
1556 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1557 {
1558 	if (0 == num_bytes) {
1559 		/* special case where dst_addr or src_addr can be NULL */
1560 		return;
1561 	}
1562 
1563 	if ((!dst_addr) || (!src_addr)) {
1564 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1565 			  "%s called with NULL parameter, source:%pK destination:%pK",
1566 			  __func__, src_addr, dst_addr);
1567 		QDF_ASSERT(0);
1568 		return;
1569 	}
1570 	memcpy_toio(dst_addr, src_addr, num_bytes);
1571 }
1572 
1573 qdf_export_symbol(qdf_mem_copy_toio);
1574 
1575 /**
1576  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1577  * @ptr: Pointer to memory that will be set
1578  * @value: Byte set in memory
1579  * @num_bytes: Number of bytes to be set
1580  *
1581  * Return: None
1582  */
1583 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1584 {
1585 	if (!ptr) {
1586 		qdf_print("%s called with NULL parameter ptr", __func__);
1587 		return;
1588 	}
1589 	memset_io(ptr, value, num_bytes);
1590 }
1591 
1592 qdf_export_symbol(qdf_mem_set_io);
1593 
1594 /**
1595  * qdf_mem_set() - set (fill) memory with a specified byte value.
1596  * @ptr: Pointer to memory that will be set
1597  * @num_bytes: Number of bytes to be set
1598  * @value: Byte set in memory
1599  *
1600  * Return: None
1601  */
1602 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1603 {
1604 	if (ptr == NULL) {
1605 		qdf_print("%s called with NULL parameter ptr", __func__);
1606 		return;
1607 	}
1608 	memset(ptr, value, num_bytes);
1609 }
1610 qdf_export_symbol(qdf_mem_set);
1611 
1612 /**
1613  * qdf_mem_move() - move memory
1614  * @dst_addr: pointer to destination memory location (to move to)
1615  * @src_addr: pointer to source memory location (to move from)
1616  * @num_bytes: number of bytes to move.
1617  *
1618  * Move host memory from one location to another, similar to memmove in
1619  * standard C.  Note this function *does* handle overlapping
1620  * source and destination memory locations.
1621 
1622  * Return: None
1623  */
1624 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1625 {
1626 	if (0 == num_bytes) {
1627 		/* special case where dst_addr or src_addr can be NULL */
1628 		return;
1629 	}
1630 
1631 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1632 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1633 			  "%s called with NULL parameter, source:%pK destination:%pK",
1634 			  __func__, src_addr, dst_addr);
1635 		QDF_ASSERT(0);
1636 		return;
1637 	}
1638 	memmove(dst_addr, src_addr, num_bytes);
1639 }
1640 qdf_export_symbol(qdf_mem_move);
1641 
1642 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1643 /**
1644  * qdf_mem_dma_alloc() - allocates memory for dma
1645  * @osdev: OS device handle
1646  * @dev: Pointer to device handle
1647  * @size: Size to be allocated
1648  * @phy_addr: Physical address
1649  *
1650  * Return: pointer of allocated memory or null if memory alloc fails
1651  */
1652 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1653 				      qdf_size_t size,
1654 				      qdf_dma_addr_t *phy_addr)
1655 {
1656 	void *vaddr;
1657 
1658 	vaddr = qdf_mem_malloc(size);
1659 	*phy_addr = ((uintptr_t) vaddr);
1660 	/* using this type conversion to suppress "cast from pointer to integer
1661 	 * of different size" warning on some platforms
1662 	 */
1663 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1664 	return vaddr;
1665 }
1666 
1667 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1668 #define QCA8074_RAM_BASE 0x50000000
1669 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1670 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1671 			qdf_dma_addr_t *phy_addr)
1672 {
1673 	void *vaddr = NULL;
1674 	int i;
1675 
1676 	*phy_addr = 0;
1677 
1678 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1679 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1680 					   qdf_mem_malloc_flags());
1681 
1682 		if (!vaddr) {
1683 			qdf_err("%s failed , size: %zu!", __func__, size);
1684 			return NULL;
1685 		}
1686 
1687 		if (*phy_addr >= QCA8074_RAM_BASE)
1688 			return vaddr;
1689 
1690 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1691 	}
1692 
1693 	return NULL;
1694 }
1695 
1696 #else
1697 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1698 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1699 {
1700 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1701 }
1702 #endif
1703 
1704 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1705 static inline void
1706 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1707 {
1708 	qdf_mem_free(vaddr);
1709 }
1710 #else
1711 
1712 static inline void
1713 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1714 {
1715 	dma_free_coherent(dev, size, vaddr, paddr);
1716 }
1717 #endif
1718 
1719 #ifdef MEMORY_DEBUG
1720 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1721 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1722 				     const char *func, uint32_t line,
1723 				     void *caller)
1724 {
1725 	QDF_STATUS status;
1726 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1727 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1728 	struct qdf_mem_header *header;
1729 	void *vaddr;
1730 
1731 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1732 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1733 		return NULL;
1734 	}
1735 
1736 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1737 				   paddr);
1738 
1739 	if (!vaddr) {
1740 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1741 		return NULL;
1742 	}
1743 
1744 	header = qdf_mem_dma_get_header(vaddr, size);
1745 	/* For DMA buffers we only add trailers, this function will init
1746 	 * the header structure at the tail
1747 	 * Prefix the header into DMA buffer causes SMMU faults, so
1748 	 * do not prefix header into the DMA buffers
1749 	 */
1750 	qdf_mem_header_init(header, size, func, line, caller);
1751 
1752 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1753 	status = qdf_list_insert_front(mem_list, &header->node);
1754 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1755 	if (QDF_IS_STATUS_ERROR(status))
1756 		qdf_err("Failed to insert memory header; status %d", status);
1757 
1758 	qdf_mem_dma_inc(size);
1759 
1760 	return vaddr;
1761 }
1762 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1763 
1764 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1765 				   qdf_size_t size, void *vaddr,
1766 				   qdf_dma_addr_t paddr,
1767 				   qdf_dma_context_t memctx,
1768 				   const char *func, uint32_t line)
1769 {
1770 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1771 	struct qdf_mem_header *header;
1772 	enum qdf_mem_validation_bitmap error_bitmap;
1773 
1774 	/* freeing a null pointer is valid */
1775 	if (qdf_unlikely(!vaddr))
1776 		return;
1777 
1778 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
1779 
1780 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1781 	/* For DMA buffers we only add trailers, this function will retrieve
1782 	 * the header structure at the tail
1783 	 * Prefix the header into DMA buffer causes SMMU faults, so
1784 	 * do not prefix header into the DMA buffers
1785 	 */
1786 	header = qdf_mem_dma_get_header(vaddr, size);
1787 	error_bitmap = qdf_mem_header_validate(header, domain);
1788 	if (!error_bitmap) {
1789 		header->freed = true;
1790 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
1791 				     &header->node);
1792 	}
1793 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1794 
1795 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
1796 
1797 	qdf_mem_dma_dec(header->size);
1798 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1799 }
1800 qdf_export_symbol(qdf_mem_free_consistent_debug);
1801 
1802 #else
1803 
1804 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1805 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1806 {
1807 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1808 
1809 	if (vaddr)
1810 		qdf_mem_dma_inc(size);
1811 
1812 	return vaddr;
1813 }
1814 qdf_export_symbol(qdf_mem_alloc_consistent);
1815 
1816 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1817 			     qdf_size_t size, void *vaddr,
1818 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1819 {
1820 	qdf_mem_dma_dec(size);
1821 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1822 }
1823 qdf_export_symbol(qdf_mem_free_consistent);
1824 
1825 #endif /* MEMORY_DEBUG */
1826 
1827 void *qdf_aligned_mem_alloc_consistent_fl(
1828 	qdf_device_t osdev, void *dev, qdf_size_t size,
1829 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
1830 	qdf_dma_addr_t *paddr_aligned, uint32_t ring_base_align,
1831 	const char *func, uint32_t line)
1832 {
1833 	void *vaddr_aligned;
1834 
1835 	*vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev, size,
1836 						    paddr_unaligned);
1837 	if (!*vaddr_unaligned) {
1838 		qdf_warn("Failed to alloc %zuB @ %s:%d", size, func, line);
1839 		return NULL;
1840 	}
1841 
1842 	if ((unsigned long)(*vaddr_unaligned) % ring_base_align) {
1843 		qdf_mem_free_consistent(osdev, dev, size, *vaddr_unaligned,
1844 					*paddr_unaligned, 0);
1845 		*vaddr_unaligned = qdf_mem_alloc_consistent(osdev, dev,
1846 				size + ring_base_align - 1, paddr_unaligned);
1847 		if (!*vaddr_unaligned) {
1848 			qdf_warn("Failed to alloc %zuB @ %s:%d",
1849 				 size, func, line);
1850 			return NULL;
1851 		}
1852 	}
1853 
1854 	vaddr_aligned = *vaddr_unaligned +
1855 		((unsigned long)(*vaddr_unaligned) % ring_base_align);
1856 	*paddr_aligned = *paddr_unaligned + ((unsigned long)(vaddr_aligned) -
1857 		 (unsigned long)(*vaddr_unaligned));
1858 
1859 	return vaddr_aligned;
1860 }
1861 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
1862 
1863 /**
1864  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1865  * @osdev: OS device handle
1866  * @bus_addr: dma address to give to the device
1867  * @size: Size of the memory block
1868  * @direction: direction data will be DMAed
1869  *
1870  * Assign memory to the remote device.
1871  * The cache lines are flushed to ram or invalidated as needed.
1872  *
1873  * Return: none
1874  */
1875 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1876 					qdf_dma_addr_t bus_addr,
1877 					qdf_size_t size,
1878 					enum dma_data_direction direction)
1879 {
1880 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1881 }
1882 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1883 
1884 /**
1885  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1886  * @osdev: OS device handle
1887  * @bus_addr: dma address to give to the cpu
1888  * @size: Size of the memory block
1889  * @direction: direction data will be DMAed
1890  *
1891  * Assign memory to the CPU.
1892  *
1893  * Return: none
1894  */
1895 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1896 				     qdf_dma_addr_t bus_addr,
1897 				     qdf_size_t size,
1898 				     enum dma_data_direction direction)
1899 {
1900 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1901 }
1902 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1903 
1904 void qdf_mem_init(void)
1905 {
1906 	qdf_mem_debug_init();
1907 	qdf_net_buf_debug_init();
1908 	qdf_mem_debugfs_init();
1909 	qdf_mem_debug_debugfs_init();
1910 }
1911 qdf_export_symbol(qdf_mem_init);
1912 
1913 void qdf_mem_exit(void)
1914 {
1915 	qdf_mem_debug_debugfs_exit();
1916 	qdf_mem_debugfs_exit();
1917 	qdf_net_buf_debug_exit();
1918 	qdf_mem_debug_exit();
1919 }
1920 qdf_export_symbol(qdf_mem_exit);
1921 
1922 /**
1923  * qdf_ether_addr_copy() - copy an Ethernet address
1924  *
1925  * @dst_addr: A six-byte array Ethernet address destination
1926  * @src_addr: A six-byte array Ethernet address source
1927  *
1928  * Please note: dst & src must both be aligned to u16.
1929  *
1930  * Return: none
1931  */
1932 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1933 {
1934 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1935 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1936 			  "%s called with NULL parameter, source:%pK destination:%pK",
1937 			  __func__, src_addr, dst_addr);
1938 		QDF_ASSERT(0);
1939 		return;
1940 	}
1941 	ether_addr_copy(dst_addr, src_addr);
1942 }
1943 qdf_export_symbol(qdf_ether_addr_copy);
1944 
1945