xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_str.h"
32 #include "qdf_talloc.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 #include <qdf_list.h>
37 
38 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
39 #include <net/cnss_prealloc.h>
40 #endif
41 
42 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
43 static bool mem_debug_disabled;
44 qdf_declare_param(mem_debug_disabled, bool);
45 qdf_export_symbol(mem_debug_disabled);
46 #endif
47 
48 #ifdef MEMORY_DEBUG
49 static bool is_initial_mem_debug_disabled;
50 #endif
51 
52 /* Preprocessor Definitions and Constants */
53 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
54 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
55 #define QDF_DEBUG_STRING_SIZE 512
56 
57 /**
58  * struct __qdf_mem_stat - qdf memory statistics
59  * @kmalloc: total kmalloc allocations
60  * @dma: total dma allocations
61  * @skb: total skb allocations
62  * @skb_total: total skb allocations in host driver
63  * @dp_tx_skb: total Tx skb allocations in datapath
64  * @dp_rx_skb: total Rx skb allocations in datapath
65  * @skb_mem_max: high watermark for skb allocations
66  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
67  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
68  * @dp_tx_skb_count: DP Tx buffer count
69  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
70  * @dp_rx_skb_count: DP Rx buffer count
71  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
72  * @tx_descs_outstanding: Current pending Tx descs count
73  * @tx_descs_max: High watermark for pending Tx descs count
74  */
75 static struct __qdf_mem_stat {
76 	qdf_atomic_t kmalloc;
77 	qdf_atomic_t dma;
78 	qdf_atomic_t skb;
79 	qdf_atomic_t skb_total;
80 	qdf_atomic_t dp_tx_skb;
81 	qdf_atomic_t dp_rx_skb;
82 	int32_t skb_mem_max;
83 	int32_t dp_tx_skb_mem_max;
84 	int32_t dp_rx_skb_mem_max;
85 	qdf_atomic_t dp_tx_skb_count;
86 	int32_t dp_tx_skb_count_max;
87 	qdf_atomic_t dp_rx_skb_count;
88 	int32_t dp_rx_skb_count_max;
89 	qdf_atomic_t tx_descs_outstanding;
90 	int32_t tx_descs_max;
91 } qdf_mem_stat;
92 
93 #ifdef MEMORY_DEBUG
94 #include "qdf_debug_domain.h"
95 
96 enum list_type {
97 	LIST_TYPE_MEM = 0,
98 	LIST_TYPE_DMA = 1,
99 	LIST_TYPE_NBUF = 2,
100 	LIST_TYPE_MAX,
101 };
102 
103 /**
104  * major_alloc_priv: private data registered to debugfs entry created to list
105  *                   the list major allocations
106  * @type:            type of the list to be parsed
107  * @threshold:       configured by user by overwriting the respective debugfs
108  *                   sys entry. This is to list the functions which requested
109  *                   memory/dma allocations more than threshold nubmer of times.
110  */
111 struct major_alloc_priv {
112 	enum list_type type;
113 	uint32_t threshold;
114 };
115 
116 static struct major_alloc_priv mem_priv = {
117 	/* List type set to mem */
118 	LIST_TYPE_MEM,
119 	/* initial threshold to list APIs which allocates mem >= 50 times */
120 	50
121 };
122 
123 static struct major_alloc_priv dma_priv = {
124 	/* List type set to DMA */
125 	LIST_TYPE_DMA,
126 	/* initial threshold to list APIs which allocates dma >= 50 times */
127 	50
128 };
129 
130 static struct major_alloc_priv nbuf_priv = {
131 	/* List type set to NBUF */
132 	LIST_TYPE_NBUF,
133 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
134 	50
135 };
136 
137 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
138 static qdf_spinlock_t qdf_mem_list_lock;
139 
140 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
141 static qdf_spinlock_t qdf_mem_dma_list_lock;
142 
143 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
144 {
145 	return &qdf_mem_domains[domain];
146 }
147 
148 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
149 {
150 	return &qdf_mem_dma_domains[domain];
151 }
152 
153 /**
154  * struct qdf_mem_header - memory object to dubug
155  * @node: node to the list
156  * @domain: the active memory domain at time of allocation
157  * @freed: flag set during free, used to detect double frees
158  *	Use uint8_t so we can detect corruption
159  * @func: name of the function the allocation was made from
160  * @line: line number of the file the allocation was made from
161  * @size: size of the allocation in bytes
162  * @caller: Caller of the function for which memory is allocated
163  * @header: a known value, used to detect out-of-bounds access
164  * @time: timestamp at which allocation was made
165  */
166 struct qdf_mem_header {
167 	qdf_list_node_t node;
168 	enum qdf_debug_domain domain;
169 	uint8_t freed;
170 	char func[QDF_MEM_FUNC_NAME_SIZE];
171 	uint32_t line;
172 	uint32_t size;
173 	void *caller;
174 	uint64_t header;
175 	uint64_t time;
176 };
177 
178 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
179 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
180 
181 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
182 {
183 	return (struct qdf_mem_header *)ptr - 1;
184 }
185 
186 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
187 							    qdf_size_t size)
188 {
189 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
190 }
191 
192 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
193 {
194 	return (uint64_t *)((void *)(header + 1) + header->size);
195 }
196 
197 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
198 {
199 	return (void *)(header + 1);
200 }
201 
202 /* number of bytes needed for the qdf memory debug information */
203 #define QDF_MEM_DEBUG_SIZE \
204 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
205 
206 /* number of bytes needed for the qdf dma memory debug information */
207 #define QDF_DMA_MEM_DEBUG_SIZE \
208 	(sizeof(struct qdf_mem_header))
209 
210 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
211 {
212 	QDF_BUG(header);
213 	if (!header)
214 		return;
215 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
216 }
217 
218 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
219 				const char *func, uint32_t line, void *caller)
220 {
221 	QDF_BUG(header);
222 	if (!header)
223 		return;
224 
225 	header->domain = qdf_debug_domain_get();
226 	header->freed = false;
227 
228 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
229 
230 	header->line = line;
231 	header->size = size;
232 	header->caller = caller;
233 	header->header = WLAN_MEM_HEADER;
234 	header->time = qdf_get_log_timestamp();
235 }
236 
237 enum qdf_mem_validation_bitmap {
238 	QDF_MEM_BAD_HEADER = 1 << 0,
239 	QDF_MEM_BAD_TRAILER = 1 << 1,
240 	QDF_MEM_BAD_SIZE = 1 << 2,
241 	QDF_MEM_DOUBLE_FREE = 1 << 3,
242 	QDF_MEM_BAD_FREED = 1 << 4,
243 	QDF_MEM_BAD_NODE = 1 << 5,
244 	QDF_MEM_BAD_DOMAIN = 1 << 6,
245 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
246 };
247 
248 static enum qdf_mem_validation_bitmap
249 qdf_mem_trailer_validate(struct qdf_mem_header *header)
250 {
251 	enum qdf_mem_validation_bitmap error_bitmap = 0;
252 
253 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
254 		error_bitmap |= QDF_MEM_BAD_TRAILER;
255 	return error_bitmap;
256 }
257 
258 static enum qdf_mem_validation_bitmap
259 qdf_mem_header_validate(struct qdf_mem_header *header,
260 			enum qdf_debug_domain domain)
261 {
262 	enum qdf_mem_validation_bitmap error_bitmap = 0;
263 
264 	if (header->header != WLAN_MEM_HEADER)
265 		error_bitmap |= QDF_MEM_BAD_HEADER;
266 
267 	if (header->size > QDF_MEM_MAX_MALLOC)
268 		error_bitmap |= QDF_MEM_BAD_SIZE;
269 
270 	if (header->freed == true)
271 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
272 	else if (header->freed)
273 		error_bitmap |= QDF_MEM_BAD_FREED;
274 
275 	if (!qdf_list_node_in_any_list(&header->node))
276 		error_bitmap |= QDF_MEM_BAD_NODE;
277 
278 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
279 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
280 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
281 	else if (header->domain != domain)
282 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
283 
284 	return error_bitmap;
285 }
286 
287 static void
288 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
289 			    enum qdf_debug_domain current_domain,
290 			    enum qdf_mem_validation_bitmap error_bitmap,
291 			    const char *func,
292 			    uint32_t line)
293 {
294 	if (!error_bitmap)
295 		return;
296 
297 	if (error_bitmap & QDF_MEM_BAD_HEADER)
298 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
299 			header->header, WLAN_MEM_HEADER);
300 
301 	if (error_bitmap & QDF_MEM_BAD_SIZE)
302 		qdf_err("Corrupted memory size %u (expected < %d)",
303 			header->size, QDF_MEM_MAX_MALLOC);
304 
305 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
306 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
307 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
308 
309 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
310 		qdf_err("Memory has previously been freed");
311 
312 	if (error_bitmap & QDF_MEM_BAD_FREED)
313 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
314 
315 	if (error_bitmap & QDF_MEM_BAD_NODE)
316 		qdf_err("Corrupted memory header node or double free");
317 
318 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
319 		qdf_err("Corrupted memory domain 0x%x", header->domain);
320 
321 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
322 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
323 			qdf_debug_domain_name(header->domain), header->domain,
324 			qdf_debug_domain_name(current_domain), current_domain);
325 
326 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
327 }
328 #endif /* MEMORY_DEBUG */
329 
330 u_int8_t prealloc_disabled = 1;
331 qdf_declare_param(prealloc_disabled, byte);
332 qdf_export_symbol(prealloc_disabled);
333 
334 #if defined WLAN_DEBUGFS
335 
336 /* Debugfs root directory for qdf_mem */
337 static struct dentry *qdf_mem_debugfs_root;
338 
339 #ifdef MEMORY_DEBUG
340 static int qdf_err_printer(void *priv, const char *fmt, ...)
341 {
342 	va_list args;
343 
344 	va_start(args, fmt);
345 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
346 	va_end(args);
347 
348 	return 0;
349 }
350 
351 static int seq_printf_printer(void *priv, const char *fmt, ...)
352 {
353 	struct seq_file *file = priv;
354 	va_list args;
355 
356 	va_start(args, fmt);
357 	seq_vprintf(file, fmt, args);
358 	seq_puts(file, "\n");
359 	va_end(args);
360 
361 	return 0;
362 }
363 
364 /**
365  * struct __qdf_mem_info - memory statistics
366  * @func: the function which allocated memory
367  * @line: the line at which allocation happened
368  * @size: the size of allocation
369  * @caller: Address of the caller function
370  * @count: how many allocations of same type
371  * @time: timestamp at which allocation happened
372  */
373 struct __qdf_mem_info {
374 	char func[QDF_MEM_FUNC_NAME_SIZE];
375 	uint32_t line;
376 	uint32_t size;
377 	void *caller;
378 	uint32_t count;
379 	uint64_t time;
380 };
381 
382 /*
383  * The table depth defines the de-duplication proximity scope.
384  * A deeper table takes more time, so choose any optimum value.
385  */
386 #define QDF_MEM_STAT_TABLE_SIZE 8
387 
388 /**
389  * qdf_mem_debug_print_header() - memory debug header print logic
390  * @print: the print adapter function
391  * @print_priv: the private data to be consumed by @print
392  * @threshold: the threshold value set by user to list top allocations
393  *
394  * Return: None
395  */
396 static void qdf_mem_debug_print_header(qdf_abstract_print print,
397 				       void *print_priv,
398 				       uint32_t threshold)
399 {
400 	if (threshold)
401 		print(print_priv, "APIs requested allocations >= %u no of time",
402 		      threshold);
403 	print(print_priv,
404 	      "--------------------------------------------------------------");
405 	print(print_priv,
406 	      " count    size     total    filename     caller    timestamp");
407 	print(print_priv,
408 	      "--------------------------------------------------------------");
409 }
410 
411 /**
412  * qdf_mem_meta_table_print() - memory metadata table print logic
413  * @table: the memory metadata table to print
414  * @print: the print adapter function
415  * @print_priv: the private data to be consumed by @print
416  * @threshold: the threshold value set by user to list top allocations
417  *
418  * Return: None
419  */
420 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
421 				     qdf_abstract_print print,
422 				     void *print_priv,
423 				     uint32_t threshold)
424 {
425 	int i;
426 	char debug_str[QDF_DEBUG_STRING_SIZE];
427 	size_t len = 0;
428 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
429 
430 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
431 			     "%s", debug_prefix);
432 
433 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
434 		if (!table[i].count)
435 			break;
436 
437 		print(print_priv,
438 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
439 		      table[i].count,
440 		      table[i].size,
441 		      table[i].count * table[i].size,
442 		      table[i].func,
443 		      table[i].line, table[i].caller,
444 		      table[i].time);
445 		len += qdf_scnprintf(debug_str + len,
446 				     sizeof(debug_str) - len,
447 				     " @ %s:%u %pS",
448 				     table[i].func,
449 				     table[i].line,
450 				     table[i].caller);
451 	}
452 	print(print_priv, "%s", debug_str);
453 }
454 
455 /**
456  * qdf_print_major_alloc() - memory metadata table print logic
457  * @table: the memory metadata table to print
458  * @print: the print adapter function
459  * @print_priv: the private data to be consumed by @print
460  * @threshold: the threshold value set by uset to list top allocations
461  *
462  * Return: None
463  */
464 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
465 				  qdf_abstract_print print,
466 				  void *print_priv,
467 				  uint32_t threshold)
468 {
469 	int i;
470 
471 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 		if (!table[i].count)
473 			break;
474 		if (table[i].count >= threshold)
475 			print(print_priv,
476 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
477 			      table[i].count,
478 			      table[i].size,
479 			      table[i].count * table[i].size,
480 			      table[i].func,
481 			      table[i].line, table[i].caller,
482 			      table[i].time);
483 	}
484 }
485 
486 /**
487  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
488  * @table: the memory metadata table to insert into
489  * @meta: the memory metadata to insert
490  *
491  * Return: true if the table is full after inserting, false otherwise
492  */
493 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
494 				      struct qdf_mem_header *meta)
495 {
496 	int i;
497 
498 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
499 		if (!table[i].count) {
500 			qdf_str_lcopy(table[i].func, meta->func,
501 				      QDF_MEM_FUNC_NAME_SIZE);
502 			table[i].line = meta->line;
503 			table[i].size = meta->size;
504 			table[i].count = 1;
505 			table[i].caller = meta->caller;
506 			table[i].time = meta->time;
507 			break;
508 		}
509 
510 		if (qdf_str_eq(table[i].func, meta->func) &&
511 		    table[i].line == meta->line &&
512 		    table[i].size == meta->size &&
513 		    table[i].caller == meta->caller) {
514 			table[i].count++;
515 			break;
516 		}
517 	}
518 
519 	/* return true if the table is now full */
520 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
521 }
522 
523 /**
524  * qdf_mem_domain_print() - output agnostic memory domain print logic
525  * @domain: the memory domain to print
526  * @print: the print adapter function
527  * @print_priv: the private data to be consumed by @print
528  * @threshold: the threshold value set by uset to list top allocations
529  * @mem_print: pointer to function which prints the memory allocation data
530  *
531  * Return: None
532  */
533 static void qdf_mem_domain_print(qdf_list_t *domain,
534 				 qdf_abstract_print print,
535 				 void *print_priv,
536 				 uint32_t threshold,
537 				 void (*mem_print)(struct __qdf_mem_info *,
538 						   qdf_abstract_print,
539 						   void *, uint32_t))
540 {
541 	QDF_STATUS status;
542 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
543 	qdf_list_node_t *node;
544 
545 	qdf_mem_zero(table, sizeof(table));
546 	qdf_mem_debug_print_header(print, print_priv, threshold);
547 
548 	/* hold lock while inserting to avoid use-after free of the metadata */
549 	qdf_spin_lock(&qdf_mem_list_lock);
550 	status = qdf_list_peek_front(domain, &node);
551 	while (QDF_IS_STATUS_SUCCESS(status)) {
552 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
553 		bool is_full = qdf_mem_meta_table_insert(table, meta);
554 
555 		qdf_spin_unlock(&qdf_mem_list_lock);
556 
557 		if (is_full) {
558 			(*mem_print)(table, print, print_priv, threshold);
559 			qdf_mem_zero(table, sizeof(table));
560 		}
561 
562 		qdf_spin_lock(&qdf_mem_list_lock);
563 		status = qdf_list_peek_next(domain, node, &node);
564 	}
565 	qdf_spin_unlock(&qdf_mem_list_lock);
566 
567 	(*mem_print)(table, print, print_priv, threshold);
568 }
569 
570 /**
571  * qdf_mem_seq_start() - sequential callback to start
572  * @seq: seq_file handle
573  * @pos: The start position of the sequence
574  *
575  * Return: iterator pointer, or NULL if iteration is complete
576  */
577 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
578 {
579 	enum qdf_debug_domain domain = *pos;
580 
581 	if (!qdf_debug_domain_valid(domain))
582 		return NULL;
583 
584 	/* just use the current position as our iterator */
585 	return pos;
586 }
587 
588 /**
589  * qdf_mem_seq_next() - next sequential callback
590  * @seq: seq_file handle
591  * @v: the current iterator
592  * @pos: the current position
593  *
594  * Get the next node and release previous node.
595  *
596  * Return: iterator pointer, or NULL if iteration is complete
597  */
598 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
599 {
600 	++*pos;
601 
602 	return qdf_mem_seq_start(seq, pos);
603 }
604 
605 /**
606  * qdf_mem_seq_stop() - stop sequential callback
607  * @seq: seq_file handle
608  * @v: current iterator
609  *
610  * Return: None
611  */
612 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
613 
614 /**
615  * qdf_mem_seq_show() - print sequential callback
616  * @seq: seq_file handle
617  * @v: current iterator
618  *
619  * Return: 0 - success
620  */
621 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
622 {
623 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
624 
625 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
626 		   qdf_debug_domain_name(domain_id), domain_id);
627 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
628 			     seq_printf_printer,
629 			     seq,
630 			     0,
631 			     qdf_mem_meta_table_print);
632 
633 	return 0;
634 }
635 
636 /* sequential file operation table */
637 static const struct seq_operations qdf_mem_seq_ops = {
638 	.start = qdf_mem_seq_start,
639 	.next  = qdf_mem_seq_next,
640 	.stop  = qdf_mem_seq_stop,
641 	.show  = qdf_mem_seq_show,
642 };
643 
644 
645 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
646 {
647 	return seq_open(file, &qdf_mem_seq_ops);
648 }
649 
650 /**
651  * qdf_major_alloc_show() - print sequential callback
652  * @seq: seq_file handle
653  * @v: current iterator
654  *
655  * Return: 0 - success
656  */
657 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
658 {
659 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
660 	struct major_alloc_priv *priv;
661 	qdf_list_t *list;
662 
663 	priv = (struct major_alloc_priv *)seq->private;
664 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
665 		   qdf_debug_domain_name(domain_id), domain_id);
666 
667 	switch (priv->type) {
668 	case LIST_TYPE_MEM:
669 		list = qdf_mem_list_get(domain_id);
670 		break;
671 	case LIST_TYPE_DMA:
672 		list = qdf_mem_dma_list(domain_id);
673 		break;
674 	default:
675 		list = NULL;
676 		break;
677 	}
678 
679 	if (list)
680 		qdf_mem_domain_print(list,
681 				     seq_printf_printer,
682 				     seq,
683 				     priv->threshold,
684 				     qdf_print_major_alloc);
685 
686 	return 0;
687 }
688 
689 /* sequential file operation table created to track major allocs */
690 static const struct seq_operations qdf_major_allocs_seq_ops = {
691 	.start = qdf_mem_seq_start,
692 	.next = qdf_mem_seq_next,
693 	.stop = qdf_mem_seq_stop,
694 	.show = qdf_major_alloc_show,
695 };
696 
697 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
698 {
699 	void *private = inode->i_private;
700 	struct seq_file *seq;
701 	int rc;
702 
703 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
704 	if (rc == 0) {
705 		seq = file->private_data;
706 		seq->private = private;
707 	}
708 	return rc;
709 }
710 
711 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
712 					     const char __user *user_buf,
713 					     size_t count,
714 					     loff_t *pos)
715 {
716 	char buf[32];
717 	ssize_t buf_size;
718 	uint32_t threshold;
719 	struct seq_file *seq = file->private_data;
720 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
721 
722 	buf_size = min(count, (sizeof(buf) - 1));
723 	if (buf_size <= 0)
724 		return 0;
725 	if (copy_from_user(buf, user_buf, buf_size))
726 		return -EFAULT;
727 	buf[buf_size] = '\0';
728 	if (!kstrtou32(buf, 10, &threshold))
729 		priv->threshold = threshold;
730 	return buf_size;
731 }
732 
733 /**
734  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
735  * @threshold: the threshold value set by uset to list top allocations
736  * @print: the print adapter function
737  * @print_priv: the private data to be consumed by @print
738  * @mem_print: pointer to function which prints the memory allocation data
739  *
740  * Return: None
741  */
742 static void
743 qdf_print_major_nbuf_allocs(uint32_t threshold,
744 			    qdf_abstract_print print,
745 			    void *print_priv,
746 			    void (*mem_print)(struct __qdf_mem_info *,
747 					      qdf_abstract_print,
748 					      void *, uint32_t))
749 {
750 	uint32_t nbuf_iter;
751 	unsigned long irq_flag = 0;
752 	QDF_NBUF_TRACK *p_node;
753 	QDF_NBUF_TRACK *p_prev;
754 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
755 	struct qdf_mem_header meta;
756 	bool is_full;
757 
758 	qdf_mem_zero(table, sizeof(table));
759 	qdf_mem_debug_print_header(print, print_priv, threshold);
760 
761 	if (is_initial_mem_debug_disabled)
762 		return;
763 
764 	qdf_rl_info("major nbuf print with threshold %u", threshold);
765 
766 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
767 	     nbuf_iter++) {
768 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
769 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
770 		while (p_node) {
771 			meta.line = p_node->line_num;
772 			meta.size = p_node->size;
773 			meta.caller = NULL;
774 			meta.time = p_node->time;
775 			qdf_str_lcopy(meta.func, p_node->func_name,
776 				      QDF_MEM_FUNC_NAME_SIZE);
777 
778 			is_full = qdf_mem_meta_table_insert(table, &meta);
779 
780 			if (is_full) {
781 				(*mem_print)(table, print,
782 					     print_priv, threshold);
783 				qdf_mem_zero(table, sizeof(table));
784 			}
785 
786 			p_prev = p_node;
787 			p_node = p_node->p_next;
788 		}
789 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
790 	}
791 
792 	(*mem_print)(table, print, print_priv, threshold);
793 
794 	qdf_rl_info("major nbuf print end");
795 }
796 
797 /**
798  * qdf_major_nbuf_alloc_show() - print sequential callback
799  * @seq: seq_file handle
800  * @v: current iterator
801  *
802  * Return: 0 - success
803  */
804 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
805 {
806 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
807 
808 	if (!priv) {
809 		qdf_err("priv is null");
810 		return -EINVAL;
811 	}
812 
813 	qdf_print_major_nbuf_allocs(priv->threshold,
814 				    seq_printf_printer,
815 				    seq,
816 				    qdf_print_major_alloc);
817 
818 	return 0;
819 }
820 
821 /**
822  * qdf_nbuf_seq_start() - sequential callback to start
823  * @seq: seq_file handle
824  * @pos: The start position of the sequence
825  *
826  * Return: iterator pointer, or NULL if iteration is complete
827  */
828 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
829 {
830 	enum qdf_debug_domain domain = *pos;
831 
832 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
833 		return NULL;
834 
835 	return pos;
836 }
837 
838 /**
839  * qdf_nbuf_seq_next() - next sequential callback
840  * @seq: seq_file handle
841  * @v: the current iterator
842  * @pos: the current position
843  *
844  * Get the next node and release previous node.
845  *
846  * Return: iterator pointer, or NULL if iteration is complete
847  */
848 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
849 {
850 	++*pos;
851 
852 	return qdf_nbuf_seq_start(seq, pos);
853 }
854 
855 /**
856  * qdf_nbuf_seq_stop() - stop sequential callback
857  * @seq: seq_file handle
858  * @v: current iterator
859  *
860  * Return: None
861  */
862 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
863 
864 /* sequential file operation table created to track major skb allocs */
865 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
866 	.start = qdf_nbuf_seq_start,
867 	.next = qdf_nbuf_seq_next,
868 	.stop = qdf_nbuf_seq_stop,
869 	.show = qdf_major_nbuf_alloc_show,
870 };
871 
872 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
873 {
874 	void *private = inode->i_private;
875 	struct seq_file *seq;
876 	int rc;
877 
878 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
879 	if (rc == 0) {
880 		seq = file->private_data;
881 		seq->private = private;
882 	}
883 	return rc;
884 }
885 
886 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
887 						  const char __user *user_buf,
888 						  size_t count,
889 						  loff_t *pos)
890 {
891 	char buf[32];
892 	ssize_t buf_size;
893 	uint32_t threshold;
894 	struct seq_file *seq = file->private_data;
895 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
896 
897 	buf_size = min(count, (sizeof(buf) - 1));
898 	if (buf_size <= 0)
899 		return 0;
900 	if (copy_from_user(buf, user_buf, buf_size))
901 		return -EFAULT;
902 	buf[buf_size] = '\0';
903 	if (!kstrtou32(buf, 10, &threshold))
904 		priv->threshold = threshold;
905 	return buf_size;
906 }
907 
908 /* file operation table for listing major allocs */
909 static const struct file_operations fops_qdf_major_allocs = {
910 	.owner = THIS_MODULE,
911 	.open = qdf_major_allocs_open,
912 	.read = seq_read,
913 	.llseek = seq_lseek,
914 	.release = seq_release,
915 	.write = qdf_major_alloc_set_threshold,
916 };
917 
918 /* debugfs file operation table */
919 static const struct file_operations fops_qdf_mem_debugfs = {
920 	.owner = THIS_MODULE,
921 	.open = qdf_mem_debugfs_open,
922 	.read = seq_read,
923 	.llseek = seq_lseek,
924 	.release = seq_release,
925 };
926 
927 /* file operation table for listing major allocs */
928 static const struct file_operations fops_qdf_nbuf_major_allocs = {
929 	.owner = THIS_MODULE,
930 	.open = qdf_major_nbuf_allocs_open,
931 	.read = seq_read,
932 	.llseek = seq_lseek,
933 	.release = seq_release,
934 	.write = qdf_major_nbuf_alloc_set_threshold,
935 };
936 
937 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
938 {
939 	if (is_initial_mem_debug_disabled)
940 		return QDF_STATUS_SUCCESS;
941 
942 	if (!qdf_mem_debugfs_root)
943 		return QDF_STATUS_E_FAILURE;
944 
945 	debugfs_create_file("list",
946 			    S_IRUSR,
947 			    qdf_mem_debugfs_root,
948 			    NULL,
949 			    &fops_qdf_mem_debugfs);
950 
951 	debugfs_create_file("major_mem_allocs",
952 			    0600,
953 			    qdf_mem_debugfs_root,
954 			    &mem_priv,
955 			    &fops_qdf_major_allocs);
956 
957 	debugfs_create_file("major_dma_allocs",
958 			    0600,
959 			    qdf_mem_debugfs_root,
960 			    &dma_priv,
961 			    &fops_qdf_major_allocs);
962 
963 	debugfs_create_file("major_nbuf_allocs",
964 			    0600,
965 			    qdf_mem_debugfs_root,
966 			    &nbuf_priv,
967 			    &fops_qdf_nbuf_major_allocs);
968 
969 	return QDF_STATUS_SUCCESS;
970 }
971 
972 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
973 {
974 	return QDF_STATUS_SUCCESS;
975 }
976 
977 #else /* MEMORY_DEBUG */
978 
979 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
980 {
981 	return QDF_STATUS_E_NOSUPPORT;
982 }
983 
984 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
985 {
986 	return QDF_STATUS_E_NOSUPPORT;
987 }
988 
989 #endif /* MEMORY_DEBUG */
990 
991 
992 static void qdf_mem_debugfs_exit(void)
993 {
994 	debugfs_remove_recursive(qdf_mem_debugfs_root);
995 	qdf_mem_debugfs_root = NULL;
996 }
997 
998 static QDF_STATUS qdf_mem_debugfs_init(void)
999 {
1000 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1001 
1002 	if (!qdf_debugfs_root)
1003 		return QDF_STATUS_E_FAILURE;
1004 
1005 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1006 
1007 	if (!qdf_mem_debugfs_root)
1008 		return QDF_STATUS_E_FAILURE;
1009 
1010 
1011 	debugfs_create_atomic_t("kmalloc",
1012 				S_IRUSR,
1013 				qdf_mem_debugfs_root,
1014 				&qdf_mem_stat.kmalloc);
1015 
1016 	debugfs_create_atomic_t("dma",
1017 				S_IRUSR,
1018 				qdf_mem_debugfs_root,
1019 				&qdf_mem_stat.dma);
1020 
1021 	debugfs_create_atomic_t("skb",
1022 				S_IRUSR,
1023 				qdf_mem_debugfs_root,
1024 				&qdf_mem_stat.skb);
1025 
1026 	return QDF_STATUS_SUCCESS;
1027 }
1028 
1029 #else /* WLAN_DEBUGFS */
1030 
1031 static QDF_STATUS qdf_mem_debugfs_init(void)
1032 {
1033 	return QDF_STATUS_E_NOSUPPORT;
1034 }
1035 static void qdf_mem_debugfs_exit(void) {}
1036 
1037 
1038 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1039 {
1040 	return QDF_STATUS_E_NOSUPPORT;
1041 }
1042 
1043 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1044 {
1045 	return QDF_STATUS_E_NOSUPPORT;
1046 }
1047 
1048 #endif /* WLAN_DEBUGFS */
1049 
1050 void qdf_mem_kmalloc_inc(qdf_size_t size)
1051 {
1052 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1053 }
1054 
1055 static void qdf_mem_dma_inc(qdf_size_t size)
1056 {
1057 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1058 }
1059 
1060 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1061 void qdf_mem_skb_inc(qdf_size_t size)
1062 {
1063 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1064 }
1065 
1066 void qdf_mem_skb_dec(qdf_size_t size)
1067 {
1068 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1069 }
1070 
1071 void qdf_mem_skb_total_inc(qdf_size_t size)
1072 {
1073 	int32_t skb_mem_max = 0;
1074 
1075 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1076 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1077 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1078 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1079 }
1080 
1081 void qdf_mem_skb_total_dec(qdf_size_t size)
1082 {
1083 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1084 }
1085 
1086 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1087 {
1088 	int32_t curr_dp_tx_skb_mem_max = 0;
1089 
1090 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1091 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1092 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1093 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1094 }
1095 
1096 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1097 {
1098 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1099 }
1100 
1101 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1102 {
1103 	int32_t curr_dp_rx_skb_mem_max = 0;
1104 
1105 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1106 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1107 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1108 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1109 }
1110 
1111 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1112 {
1113 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1114 }
1115 
1116 void qdf_mem_dp_tx_skb_cnt_inc(void)
1117 {
1118 	int32_t curr_dp_tx_skb_count_max = 0;
1119 
1120 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1121 	curr_dp_tx_skb_count_max =
1122 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1123 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1124 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1125 }
1126 
1127 void qdf_mem_dp_tx_skb_cnt_dec(void)
1128 {
1129 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1130 }
1131 
1132 void qdf_mem_dp_rx_skb_cnt_inc(void)
1133 {
1134 	int32_t curr_dp_rx_skb_count_max = 0;
1135 
1136 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1137 	curr_dp_rx_skb_count_max =
1138 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1139 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1140 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1141 }
1142 
1143 void qdf_mem_dp_rx_skb_cnt_dec(void)
1144 {
1145 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1146 }
1147 #endif
1148 
1149 void qdf_mem_kmalloc_dec(qdf_size_t size)
1150 {
1151 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1152 }
1153 
1154 static inline void qdf_mem_dma_dec(qdf_size_t size)
1155 {
1156 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1157 }
1158 
1159 /**
1160  * __qdf_mempool_init() - Create and initialize memory pool
1161  *
1162  * @osdev: platform device object
1163  * @pool_addr: address of the pool created
1164  * @elem_cnt: no. of elements in pool
1165  * @elem_size: size of each pool element in bytes
1166  * @flags: flags
1167  *
1168  * return: Handle to memory pool or NULL if allocation failed
1169  */
1170 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1171 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1172 {
1173 	__qdf_mempool_ctxt_t *new_pool = NULL;
1174 	u_int32_t align = L1_CACHE_BYTES;
1175 	unsigned long aligned_pool_mem;
1176 	int pool_id;
1177 	int i;
1178 
1179 	if (prealloc_disabled) {
1180 		/* TBD: We can maintain a list of pools in qdf_device_t
1181 		 * to help debugging
1182 		 * when pre-allocation is not enabled
1183 		 */
1184 		new_pool = (__qdf_mempool_ctxt_t *)
1185 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1186 		if (!new_pool)
1187 			return QDF_STATUS_E_NOMEM;
1188 
1189 		memset(new_pool, 0, sizeof(*new_pool));
1190 		/* TBD: define flags for zeroing buffers etc */
1191 		new_pool->flags = flags;
1192 		new_pool->elem_size = elem_size;
1193 		new_pool->max_elem = elem_cnt;
1194 		*pool_addr = new_pool;
1195 		return 0;
1196 	}
1197 
1198 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1199 		if (!osdev->mem_pool[pool_id])
1200 			break;
1201 	}
1202 
1203 	if (pool_id == MAX_MEM_POOLS)
1204 		return -ENOMEM;
1205 
1206 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1207 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1208 	if (!new_pool)
1209 		return -ENOMEM;
1210 
1211 	memset(new_pool, 0, sizeof(*new_pool));
1212 	/* TBD: define flags for zeroing buffers etc */
1213 	new_pool->flags = flags;
1214 	new_pool->pool_id = pool_id;
1215 
1216 	/* Round up the element size to cacheline */
1217 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1218 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1219 				((align)?(align - 1):0);
1220 
1221 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1222 	if (!new_pool->pool_mem) {
1223 			/* TBD: Check if we need get_free_pages above */
1224 		kfree(new_pool);
1225 		osdev->mem_pool[pool_id] = NULL;
1226 		return -ENOMEM;
1227 	}
1228 
1229 	spin_lock_init(&new_pool->lock);
1230 
1231 	/* Initialize free list */
1232 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1233 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1234 	STAILQ_INIT(&new_pool->free_list);
1235 
1236 	for (i = 0; i < elem_cnt; i++)
1237 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1238 			(mempool_elem_t *)(aligned_pool_mem +
1239 			(new_pool->elem_size * i)), mempool_entry);
1240 
1241 
1242 	new_pool->free_cnt = elem_cnt;
1243 	*pool_addr = new_pool;
1244 	return 0;
1245 }
1246 qdf_export_symbol(__qdf_mempool_init);
1247 
1248 /**
1249  * __qdf_mempool_destroy() - Destroy memory pool
1250  * @osdev: platform device object
1251  * @Handle: to memory pool
1252  *
1253  * Returns: none
1254  */
1255 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1256 {
1257 	int pool_id = 0;
1258 
1259 	if (!pool)
1260 		return;
1261 
1262 	if (prealloc_disabled) {
1263 		kfree(pool);
1264 		return;
1265 	}
1266 
1267 	pool_id = pool->pool_id;
1268 
1269 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1270 	kfree(pool->pool_mem);
1271 	kfree(pool);
1272 	osdev->mem_pool[pool_id] = NULL;
1273 }
1274 qdf_export_symbol(__qdf_mempool_destroy);
1275 
1276 /**
1277  * __qdf_mempool_alloc() - Allocate an element memory pool
1278  *
1279  * @osdev: platform device object
1280  * @Handle: to memory pool
1281  *
1282  * Return: Pointer to the allocated element or NULL if the pool is empty
1283  */
1284 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1285 {
1286 	void *buf = NULL;
1287 
1288 	if (!pool)
1289 		return NULL;
1290 
1291 	if (prealloc_disabled)
1292 		return  qdf_mem_malloc(pool->elem_size);
1293 
1294 	spin_lock_bh(&pool->lock);
1295 
1296 	buf = STAILQ_FIRST(&pool->free_list);
1297 	if (buf) {
1298 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1299 		pool->free_cnt--;
1300 	}
1301 
1302 	/* TBD: Update free count if debug is enabled */
1303 	spin_unlock_bh(&pool->lock);
1304 
1305 	return buf;
1306 }
1307 qdf_export_symbol(__qdf_mempool_alloc);
1308 
1309 /**
1310  * __qdf_mempool_free() - Free a memory pool element
1311  * @osdev: Platform device object
1312  * @pool: Handle to memory pool
1313  * @buf: Element to be freed
1314  *
1315  * Returns: none
1316  */
1317 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1318 {
1319 	if (!pool)
1320 		return;
1321 
1322 
1323 	if (prealloc_disabled)
1324 		return qdf_mem_free(buf);
1325 
1326 	spin_lock_bh(&pool->lock);
1327 	pool->free_cnt++;
1328 
1329 	STAILQ_INSERT_TAIL
1330 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1331 	spin_unlock_bh(&pool->lock);
1332 }
1333 qdf_export_symbol(__qdf_mempool_free);
1334 
1335 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
1336 /**
1337  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1338  * @size: the number of bytes to allocate
1339  *
1340  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1341  * a chunk of pre-allocated memory. If size if less than or equal to
1342  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1343  *
1344  * Return: NULL on failure, non-NULL on success
1345  */
1346 static void *qdf_mem_prealloc_get(size_t size)
1347 {
1348 	void *ptr;
1349 
1350 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1351 		return NULL;
1352 
1353 	ptr = wcnss_prealloc_get(size);
1354 	if (!ptr)
1355 		return NULL;
1356 
1357 	memset(ptr, 0, size);
1358 
1359 	return ptr;
1360 }
1361 
1362 static inline bool qdf_mem_prealloc_put(void *ptr)
1363 {
1364 	return wcnss_prealloc_put(ptr);
1365 }
1366 #else
1367 static inline void *qdf_mem_prealloc_get(size_t size)
1368 {
1369 	return NULL;
1370 }
1371 
1372 static inline bool qdf_mem_prealloc_put(void *ptr)
1373 {
1374 	return false;
1375 }
1376 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1377 
1378 static int qdf_mem_malloc_flags(void)
1379 {
1380 	if (in_interrupt() || irqs_disabled() || in_atomic())
1381 		return GFP_ATOMIC;
1382 
1383 	return GFP_KERNEL;
1384 }
1385 
1386 /* External Function implementation */
1387 #ifdef MEMORY_DEBUG
1388 /**
1389  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1390  *
1391  * Return: value of mem_debug_disabled qdf module argument
1392  */
1393 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1394 bool qdf_mem_debug_config_get(void)
1395 {
1396 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1397 	return false;
1398 }
1399 #else
1400 bool qdf_mem_debug_config_get(void)
1401 {
1402 	return mem_debug_disabled;
1403 }
1404 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1405 
1406 /**
1407  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1408  *
1409  * Return: none
1410  */
1411 static void qdf_mem_debug_init(void)
1412 {
1413 	int i;
1414 
1415 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1416 
1417 	if (is_initial_mem_debug_disabled)
1418 		return;
1419 
1420 	/* Initalizing the list with maximum size of 60000 */
1421 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1422 		qdf_list_create(&qdf_mem_domains[i], 60000);
1423 	qdf_spinlock_create(&qdf_mem_list_lock);
1424 
1425 	/* dma */
1426 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1427 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1428 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1429 }
1430 
1431 static uint32_t
1432 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1433 			       qdf_list_t *mem_list)
1434 {
1435 	if (is_initial_mem_debug_disabled)
1436 		return 0;
1437 
1438 	if (qdf_list_empty(mem_list))
1439 		return 0;
1440 
1441 	qdf_err("Memory leaks detected in %s domain!",
1442 		qdf_debug_domain_name(domain));
1443 	qdf_mem_domain_print(mem_list,
1444 			     qdf_err_printer,
1445 			     NULL,
1446 			     0,
1447 			     qdf_mem_meta_table_print);
1448 
1449 	return mem_list->count;
1450 }
1451 
1452 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1453 {
1454 	uint32_t leak_count = 0;
1455 	int i;
1456 
1457 	if (is_initial_mem_debug_disabled)
1458 		return;
1459 
1460 	/* detect and print leaks */
1461 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1462 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1463 
1464 	if (leak_count)
1465 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1466 				   leak_count);
1467 }
1468 
1469 /**
1470  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1471  *
1472  * Return: none
1473  */
1474 static void qdf_mem_debug_exit(void)
1475 {
1476 	int i;
1477 
1478 	if (is_initial_mem_debug_disabled)
1479 		return;
1480 
1481 	/* mem */
1482 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1483 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1484 		qdf_list_destroy(qdf_mem_list_get(i));
1485 
1486 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1487 
1488 	/* dma */
1489 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1490 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1491 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1492 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1493 }
1494 
1495 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1496 			   void *caller, uint32_t flag)
1497 {
1498 	QDF_STATUS status;
1499 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1500 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1501 	struct qdf_mem_header *header;
1502 	void *ptr;
1503 	unsigned long start, duration;
1504 
1505 	if (is_initial_mem_debug_disabled)
1506 		return __qdf_mem_malloc(size, func, line);
1507 
1508 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1509 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1510 		return NULL;
1511 	}
1512 
1513 	ptr = qdf_mem_prealloc_get(size);
1514 	if (ptr)
1515 		return ptr;
1516 
1517 	if (!flag)
1518 		flag = qdf_mem_malloc_flags();
1519 
1520 	start = qdf_mc_timer_get_system_time();
1521 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1522 	duration = qdf_mc_timer_get_system_time() - start;
1523 
1524 	if (duration > QDF_MEM_WARN_THRESHOLD)
1525 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1526 			 duration, size, func, line);
1527 
1528 	if (!header) {
1529 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1530 		return NULL;
1531 	}
1532 
1533 	qdf_mem_header_init(header, size, func, line, caller);
1534 	qdf_mem_trailer_init(header);
1535 	ptr = qdf_mem_get_ptr(header);
1536 
1537 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1538 	status = qdf_list_insert_front(mem_list, &header->node);
1539 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1540 	if (QDF_IS_STATUS_ERROR(status))
1541 		qdf_err("Failed to insert memory header; status %d", status);
1542 
1543 	qdf_mem_kmalloc_inc(ksize(header));
1544 
1545 	return ptr;
1546 }
1547 qdf_export_symbol(qdf_mem_malloc_debug);
1548 
1549 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1550 {
1551 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1552 	struct qdf_mem_header *header;
1553 	enum qdf_mem_validation_bitmap error_bitmap;
1554 
1555 	if (is_initial_mem_debug_disabled) {
1556 		__qdf_mem_free(ptr);
1557 		return;
1558 	}
1559 
1560 	/* freeing a null pointer is valid */
1561 	if (qdf_unlikely(!ptr))
1562 		return;
1563 
1564 	if (qdf_mem_prealloc_put(ptr))
1565 		return;
1566 
1567 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1568 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1569 				   ptr);
1570 
1571 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1572 
1573 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1574 	header = qdf_mem_get_header(ptr);
1575 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1576 	error_bitmap |= qdf_mem_trailer_validate(header);
1577 
1578 	if (!error_bitmap) {
1579 		header->freed = true;
1580 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1581 				     &header->node);
1582 	}
1583 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1584 
1585 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1586 				    func, line);
1587 
1588 	qdf_mem_kmalloc_dec(ksize(header));
1589 	kfree(header);
1590 }
1591 qdf_export_symbol(qdf_mem_free_debug);
1592 
1593 void qdf_mem_check_for_leaks(void)
1594 {
1595 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1596 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1597 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1598 	uint32_t leaks_count = 0;
1599 
1600 	if (is_initial_mem_debug_disabled)
1601 		return;
1602 
1603 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1604 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1605 
1606 	if (leaks_count)
1607 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1608 				   leaks_count);
1609 }
1610 
1611 /**
1612  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1613  * qdf_mem_multi_pages_alloc
1614  * @osdev: OS device handle pointer
1615  * @pages: Multi page information storage
1616  * @element_size: Each element size
1617  * @element_num: Total number of elements should be allocated
1618  * @memctxt: Memory context
1619  * @cacheable: Coherent memory or cacheable memory
1620  * @func: Caller of this allocator
1621  * @line: Line number of the caller
1622  * @caller: Return address of the caller
1623  *
1624  * This function will allocate large size of memory over multiple pages.
1625  * Large size of contiguous memory allocation will fail frequently, then
1626  * instead of allocate large memory by one shot, allocate through multiple, non
1627  * contiguous memory and combine pages when actual usage
1628  *
1629  * Return: None
1630  */
1631 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1632 				     struct qdf_mem_multi_page_t *pages,
1633 				     size_t element_size, uint16_t element_num,
1634 				     qdf_dma_context_t memctxt, bool cacheable,
1635 				     const char *func, uint32_t line,
1636 				     void *caller)
1637 {
1638 	uint16_t page_idx;
1639 	struct qdf_mem_dma_page_t *dma_pages;
1640 	void **cacheable_pages = NULL;
1641 	uint16_t i;
1642 
1643 	if (!pages->page_size)
1644 		pages->page_size = qdf_page_size;
1645 
1646 	pages->num_element_per_page = pages->page_size / element_size;
1647 	if (!pages->num_element_per_page) {
1648 		qdf_print("Invalid page %d or element size %d",
1649 			  (int)pages->page_size, (int)element_size);
1650 		goto out_fail;
1651 	}
1652 
1653 	pages->num_pages = element_num / pages->num_element_per_page;
1654 	if (element_num % pages->num_element_per_page)
1655 		pages->num_pages++;
1656 
1657 	if (cacheable) {
1658 		/* Pages information storage */
1659 		pages->cacheable_pages = qdf_mem_malloc_debug(
1660 			pages->num_pages * sizeof(pages->cacheable_pages),
1661 			func, line, caller, 0);
1662 		if (!pages->cacheable_pages)
1663 			goto out_fail;
1664 
1665 		cacheable_pages = pages->cacheable_pages;
1666 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1667 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1668 				pages->page_size, func, line, caller, 0);
1669 			if (!cacheable_pages[page_idx])
1670 				goto page_alloc_fail;
1671 		}
1672 		pages->dma_pages = NULL;
1673 	} else {
1674 		pages->dma_pages = qdf_mem_malloc_debug(
1675 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1676 			func, line, caller, 0);
1677 		if (!pages->dma_pages)
1678 			goto out_fail;
1679 
1680 		dma_pages = pages->dma_pages;
1681 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1682 			dma_pages->page_v_addr_start =
1683 				qdf_mem_alloc_consistent_debug(
1684 					osdev, osdev->dev, pages->page_size,
1685 					&dma_pages->page_p_addr,
1686 					func, line, caller);
1687 			if (!dma_pages->page_v_addr_start) {
1688 				qdf_print("dmaable page alloc fail pi %d",
1689 					  page_idx);
1690 				goto page_alloc_fail;
1691 			}
1692 			dma_pages->page_v_addr_end =
1693 				dma_pages->page_v_addr_start + pages->page_size;
1694 			dma_pages++;
1695 		}
1696 		pages->cacheable_pages = NULL;
1697 	}
1698 	return;
1699 
1700 page_alloc_fail:
1701 	if (cacheable) {
1702 		for (i = 0; i < page_idx; i++)
1703 			qdf_mem_free_debug(pages->cacheable_pages[i],
1704 					   func, line);
1705 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1706 	} else {
1707 		dma_pages = pages->dma_pages;
1708 		for (i = 0; i < page_idx; i++) {
1709 			qdf_mem_free_consistent_debug(
1710 				osdev, osdev->dev,
1711 				pages->page_size, dma_pages->page_v_addr_start,
1712 				dma_pages->page_p_addr, memctxt, func, line);
1713 			dma_pages++;
1714 		}
1715 		qdf_mem_free_debug(pages->dma_pages, func, line);
1716 	}
1717 
1718 out_fail:
1719 	pages->cacheable_pages = NULL;
1720 	pages->dma_pages = NULL;
1721 	pages->num_pages = 0;
1722 }
1723 
1724 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1725 
1726 /**
1727  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1728  * @osdev: OS device handle pointer
1729  * @pages: Multi page information storage
1730  * @memctxt: Memory context
1731  * @cacheable: Coherent memory or cacheable memory
1732  * @func: Caller of this allocator
1733  * @line: Line number of the caller
1734  *
1735  * This function will free large size of memory over multiple pages.
1736  *
1737  * Return: None
1738  */
1739 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1740 				    struct qdf_mem_multi_page_t *pages,
1741 				    qdf_dma_context_t memctxt, bool cacheable,
1742 				    const char *func, uint32_t line)
1743 {
1744 	unsigned int page_idx;
1745 	struct qdf_mem_dma_page_t *dma_pages;
1746 
1747 	if (!pages->page_size)
1748 		pages->page_size = qdf_page_size;
1749 
1750 	if (cacheable) {
1751 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1752 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1753 					   func, line);
1754 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1755 	} else {
1756 		dma_pages = pages->dma_pages;
1757 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1758 			qdf_mem_free_consistent_debug(
1759 				osdev, osdev->dev, pages->page_size,
1760 				dma_pages->page_v_addr_start,
1761 				dma_pages->page_p_addr, memctxt, func, line);
1762 			dma_pages++;
1763 		}
1764 		qdf_mem_free_debug(pages->dma_pages, func, line);
1765 	}
1766 
1767 	pages->cacheable_pages = NULL;
1768 	pages->dma_pages = NULL;
1769 	pages->num_pages = 0;
1770 }
1771 
1772 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1773 
1774 #else
1775 static void qdf_mem_debug_init(void) {}
1776 
1777 static void qdf_mem_debug_exit(void) {}
1778 
1779 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1780 {
1781 	void *ptr;
1782 
1783 	ptr = qdf_mem_prealloc_get(size);
1784 	if (ptr)
1785 		return ptr;
1786 
1787 	ptr = kzalloc(size, GFP_ATOMIC);
1788 	if (!ptr) {
1789 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1790 			      size, func, line);
1791 		return NULL;
1792 	}
1793 
1794 	qdf_mem_kmalloc_inc(ksize(ptr));
1795 
1796 	return ptr;
1797 }
1798 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1799 
1800 /**
1801  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1802  * @osdev: OS device handle pointer
1803  * @pages: Multi page information storage
1804  * @element_size: Each element size
1805  * @element_num: Total number of elements should be allocated
1806  * @memctxt: Memory context
1807  * @cacheable: Coherent memory or cacheable memory
1808  *
1809  * This function will allocate large size of memory over multiple pages.
1810  * Large size of contiguous memory allocation will fail frequently, then
1811  * instead of allocate large memory by one shot, allocate through multiple, non
1812  * contiguous memory and combine pages when actual usage
1813  *
1814  * Return: None
1815  */
1816 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1817 			       struct qdf_mem_multi_page_t *pages,
1818 			       size_t element_size, uint16_t element_num,
1819 			       qdf_dma_context_t memctxt, bool cacheable)
1820 {
1821 	uint16_t page_idx;
1822 	struct qdf_mem_dma_page_t *dma_pages;
1823 	void **cacheable_pages = NULL;
1824 	uint16_t i;
1825 
1826 	if (!pages->page_size)
1827 		pages->page_size = qdf_page_size;
1828 
1829 	pages->num_element_per_page = pages->page_size / element_size;
1830 	if (!pages->num_element_per_page) {
1831 		qdf_print("Invalid page %d or element size %d",
1832 			  (int)pages->page_size, (int)element_size);
1833 		goto out_fail;
1834 	}
1835 
1836 	pages->num_pages = element_num / pages->num_element_per_page;
1837 	if (element_num % pages->num_element_per_page)
1838 		pages->num_pages++;
1839 
1840 	if (cacheable) {
1841 		/* Pages information storage */
1842 		pages->cacheable_pages = qdf_mem_malloc(
1843 			pages->num_pages * sizeof(pages->cacheable_pages));
1844 		if (!pages->cacheable_pages)
1845 			goto out_fail;
1846 
1847 		cacheable_pages = pages->cacheable_pages;
1848 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1849 			cacheable_pages[page_idx] =
1850 				qdf_mem_malloc(pages->page_size);
1851 			if (!cacheable_pages[page_idx])
1852 				goto page_alloc_fail;
1853 		}
1854 		pages->dma_pages = NULL;
1855 	} else {
1856 		pages->dma_pages = qdf_mem_malloc(
1857 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1858 		if (!pages->dma_pages)
1859 			goto out_fail;
1860 
1861 		dma_pages = pages->dma_pages;
1862 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1863 			dma_pages->page_v_addr_start =
1864 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1865 					 pages->page_size,
1866 					&dma_pages->page_p_addr);
1867 			if (!dma_pages->page_v_addr_start) {
1868 				qdf_print("dmaable page alloc fail pi %d",
1869 					page_idx);
1870 				goto page_alloc_fail;
1871 			}
1872 			dma_pages->page_v_addr_end =
1873 				dma_pages->page_v_addr_start + pages->page_size;
1874 			dma_pages++;
1875 		}
1876 		pages->cacheable_pages = NULL;
1877 	}
1878 	return;
1879 
1880 page_alloc_fail:
1881 	if (cacheable) {
1882 		for (i = 0; i < page_idx; i++)
1883 			qdf_mem_free(pages->cacheable_pages[i]);
1884 		qdf_mem_free(pages->cacheable_pages);
1885 	} else {
1886 		dma_pages = pages->dma_pages;
1887 		for (i = 0; i < page_idx; i++) {
1888 			qdf_mem_free_consistent(
1889 				osdev, osdev->dev, pages->page_size,
1890 				dma_pages->page_v_addr_start,
1891 				dma_pages->page_p_addr, memctxt);
1892 			dma_pages++;
1893 		}
1894 		qdf_mem_free(pages->dma_pages);
1895 	}
1896 
1897 out_fail:
1898 	pages->cacheable_pages = NULL;
1899 	pages->dma_pages = NULL;
1900 	pages->num_pages = 0;
1901 	return;
1902 }
1903 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1904 
1905 /**
1906  * qdf_mem_multi_pages_free() - free large size of kernel memory
1907  * @osdev: OS device handle pointer
1908  * @pages: Multi page information storage
1909  * @memctxt: Memory context
1910  * @cacheable: Coherent memory or cacheable memory
1911  *
1912  * This function will free large size of memory over multiple pages.
1913  *
1914  * Return: None
1915  */
1916 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1917 			      struct qdf_mem_multi_page_t *pages,
1918 			      qdf_dma_context_t memctxt, bool cacheable)
1919 {
1920 	unsigned int page_idx;
1921 	struct qdf_mem_dma_page_t *dma_pages;
1922 
1923 	if (!pages->page_size)
1924 		pages->page_size = qdf_page_size;
1925 
1926 	if (cacheable) {
1927 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1928 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1929 		qdf_mem_free(pages->cacheable_pages);
1930 	} else {
1931 		dma_pages = pages->dma_pages;
1932 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1933 			qdf_mem_free_consistent(
1934 				osdev, osdev->dev, pages->page_size,
1935 				dma_pages->page_v_addr_start,
1936 				dma_pages->page_p_addr, memctxt);
1937 			dma_pages++;
1938 		}
1939 		qdf_mem_free(pages->dma_pages);
1940 	}
1941 
1942 	pages->cacheable_pages = NULL;
1943 	pages->dma_pages = NULL;
1944 	pages->num_pages = 0;
1945 	return;
1946 }
1947 qdf_export_symbol(qdf_mem_multi_pages_free);
1948 #endif
1949 
1950 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
1951 			      bool cacheable)
1952 {
1953 	unsigned int page_idx;
1954 	struct qdf_mem_dma_page_t *dma_pages;
1955 
1956 	if (!pages->page_size)
1957 		pages->page_size = qdf_page_size;
1958 
1959 	if (cacheable) {
1960 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1961 			qdf_mem_zero(pages->cacheable_pages[page_idx],
1962 				     pages->page_size);
1963 	} else {
1964 		dma_pages = pages->dma_pages;
1965 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1966 			qdf_mem_zero(dma_pages->page_v_addr_start,
1967 				     pages->page_size);
1968 			dma_pages++;
1969 		}
1970 	}
1971 }
1972 
1973 qdf_export_symbol(qdf_mem_multi_pages_zero);
1974 
1975 void __qdf_mem_free(void *ptr)
1976 {
1977 	if (!ptr)
1978 		return;
1979 
1980 	if (qdf_mem_prealloc_put(ptr))
1981 		return;
1982 
1983 	qdf_mem_kmalloc_dec(ksize(ptr));
1984 
1985 	kfree(ptr);
1986 }
1987 
1988 qdf_export_symbol(__qdf_mem_free);
1989 
1990 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
1991 {
1992 	void *ptr;
1993 
1994 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1995 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1996 			     line);
1997 		return NULL;
1998 	}
1999 
2000 	ptr = qdf_mem_prealloc_get(size);
2001 	if (ptr)
2002 		return ptr;
2003 
2004 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2005 	if (!ptr)
2006 		return NULL;
2007 
2008 	qdf_mem_kmalloc_inc(ksize(ptr));
2009 
2010 	return ptr;
2011 }
2012 
2013 qdf_export_symbol(__qdf_mem_malloc);
2014 
2015 void *qdf_aligned_malloc_fl(uint32_t *size,
2016 			    void **vaddr_unaligned,
2017 				qdf_dma_addr_t *paddr_unaligned,
2018 				qdf_dma_addr_t *paddr_aligned,
2019 				uint32_t align,
2020 			    const char *func, uint32_t line)
2021 {
2022 	void *vaddr_aligned;
2023 	uint32_t align_alloc_size;
2024 
2025 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2026 			line);
2027 	if (!*vaddr_unaligned) {
2028 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2029 		return NULL;
2030 	}
2031 
2032 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2033 
2034 	/* Re-allocate additional bytes to align base address only if
2035 	 * above allocation returns unaligned address. Reason for
2036 	 * trying exact size allocation above is, OS tries to allocate
2037 	 * blocks of size power-of-2 pages and then free extra pages.
2038 	 * e.g., of a ring size of 1MB, the allocation below will
2039 	 * request 1MB plus 7 bytes for alignment, which will cause a
2040 	 * 2MB block allocation,and that is failing sometimes due to
2041 	 * memory fragmentation.
2042 	 */
2043 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2044 		align_alloc_size = *size + align - 1;
2045 
2046 		qdf_mem_free(*vaddr_unaligned);
2047 		*vaddr_unaligned = qdf_mem_malloc_fl(
2048 				(qdf_size_t)align_alloc_size, func, line);
2049 		if (!*vaddr_unaligned) {
2050 			qdf_warn("Failed to alloc %uB @ %s:%d",
2051 				 align_alloc_size, func, line);
2052 			return NULL;
2053 		}
2054 
2055 		*paddr_unaligned = qdf_mem_virt_to_phys(
2056 				*vaddr_unaligned);
2057 		*size = align_alloc_size;
2058 	}
2059 
2060 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2061 		((unsigned long)(*paddr_unaligned), align);
2062 
2063 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2064 			((unsigned long)(*paddr_aligned) -
2065 			 (unsigned long)(*paddr_unaligned)));
2066 
2067 	return vaddr_aligned;
2068 }
2069 
2070 qdf_export_symbol(qdf_aligned_malloc_fl);
2071 
2072 /**
2073  * qdf_mem_multi_page_link() - Make links for multi page elements
2074  * @osdev: OS device handle pointer
2075  * @pages: Multi page information storage
2076  * @elem_size: Single element size
2077  * @elem_count: elements count should be linked
2078  * @cacheable: Coherent memory or cacheable memory
2079  *
2080  * This function will make links for multi page allocated structure
2081  *
2082  * Return: 0 success
2083  */
2084 int qdf_mem_multi_page_link(qdf_device_t osdev,
2085 		struct qdf_mem_multi_page_t *pages,
2086 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2087 {
2088 	uint16_t i, i_int;
2089 	void *page_info;
2090 	void **c_elem = NULL;
2091 	uint32_t num_link = 0;
2092 
2093 	for (i = 0; i < pages->num_pages; i++) {
2094 		if (cacheable)
2095 			page_info = pages->cacheable_pages[i];
2096 		else
2097 			page_info = pages->dma_pages[i].page_v_addr_start;
2098 
2099 		if (!page_info)
2100 			return -ENOMEM;
2101 
2102 		c_elem = (void **)page_info;
2103 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2104 			if (i_int == (pages->num_element_per_page - 1)) {
2105 				if (cacheable)
2106 					*c_elem = pages->
2107 						cacheable_pages[i + 1];
2108 				else
2109 					*c_elem = pages->
2110 						dma_pages[i + 1].
2111 							page_v_addr_start;
2112 				num_link++;
2113 				break;
2114 			} else {
2115 				*c_elem =
2116 					(void *)(((char *)c_elem) + elem_size);
2117 			}
2118 			num_link++;
2119 			c_elem = (void **)*c_elem;
2120 
2121 			/* Last link established exit */
2122 			if (num_link == (elem_count - 1))
2123 				break;
2124 		}
2125 	}
2126 
2127 	if (c_elem)
2128 		*c_elem = NULL;
2129 
2130 	return 0;
2131 }
2132 qdf_export_symbol(qdf_mem_multi_page_link);
2133 
2134 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2135 {
2136 	/* special case where dst_addr or src_addr can be NULL */
2137 	if (!num_bytes)
2138 		return;
2139 
2140 	QDF_BUG(dst_addr);
2141 	QDF_BUG(src_addr);
2142 	if (!dst_addr || !src_addr)
2143 		return;
2144 
2145 	memcpy(dst_addr, src_addr, num_bytes);
2146 }
2147 qdf_export_symbol(qdf_mem_copy);
2148 
2149 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2150 {
2151 	qdf_shared_mem_t *shared_mem;
2152 	qdf_dma_addr_t dma_addr, paddr;
2153 	int ret;
2154 
2155 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2156 	if (!shared_mem)
2157 		return NULL;
2158 
2159 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2160 				size, qdf_mem_get_dma_addr_ptr(osdev,
2161 						&shared_mem->mem_info));
2162 	if (!shared_mem->vaddr) {
2163 		qdf_err("Unable to allocate DMA memory for shared resource");
2164 		qdf_mem_free(shared_mem);
2165 		return NULL;
2166 	}
2167 
2168 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2169 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2170 
2171 	qdf_mem_zero(shared_mem->vaddr, size);
2172 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2173 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2174 
2175 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2176 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2177 				      shared_mem->vaddr, dma_addr, size);
2178 	if (ret) {
2179 		qdf_err("Unable to get DMA sgtable");
2180 		qdf_mem_free_consistent(osdev, osdev->dev,
2181 					shared_mem->mem_info.size,
2182 					shared_mem->vaddr,
2183 					dma_addr,
2184 					qdf_get_dma_mem_context(shared_mem,
2185 								memctx));
2186 		qdf_mem_free(shared_mem);
2187 		return NULL;
2188 	}
2189 
2190 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2191 
2192 	return shared_mem;
2193 }
2194 
2195 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2196 
2197 /**
2198  * qdf_mem_copy_toio() - copy memory
2199  * @dst_addr: Pointer to destination memory location (to copy to)
2200  * @src_addr: Pointer to source memory location (to copy from)
2201  * @num_bytes: Number of bytes to copy.
2202  *
2203  * Return: none
2204  */
2205 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2206 {
2207 	if (0 == num_bytes) {
2208 		/* special case where dst_addr or src_addr can be NULL */
2209 		return;
2210 	}
2211 
2212 	if ((!dst_addr) || (!src_addr)) {
2213 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2214 			  "%s called with NULL parameter, source:%pK destination:%pK",
2215 			  __func__, src_addr, dst_addr);
2216 		QDF_ASSERT(0);
2217 		return;
2218 	}
2219 	memcpy_toio(dst_addr, src_addr, num_bytes);
2220 }
2221 
2222 qdf_export_symbol(qdf_mem_copy_toio);
2223 
2224 /**
2225  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2226  * @ptr: Pointer to memory that will be set
2227  * @value: Byte set in memory
2228  * @num_bytes: Number of bytes to be set
2229  *
2230  * Return: None
2231  */
2232 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2233 {
2234 	if (!ptr) {
2235 		qdf_print("%s called with NULL parameter ptr", __func__);
2236 		return;
2237 	}
2238 	memset_io(ptr, value, num_bytes);
2239 }
2240 
2241 qdf_export_symbol(qdf_mem_set_io);
2242 
2243 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2244 {
2245 	QDF_BUG(ptr);
2246 	if (!ptr)
2247 		return;
2248 
2249 	memset(ptr, value, num_bytes);
2250 }
2251 qdf_export_symbol(qdf_mem_set);
2252 
2253 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2254 {
2255 	/* special case where dst_addr or src_addr can be NULL */
2256 	if (!num_bytes)
2257 		return;
2258 
2259 	QDF_BUG(dst_addr);
2260 	QDF_BUG(src_addr);
2261 	if (!dst_addr || !src_addr)
2262 		return;
2263 
2264 	memmove(dst_addr, src_addr, num_bytes);
2265 }
2266 qdf_export_symbol(qdf_mem_move);
2267 
2268 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2269 {
2270 	QDF_BUG(left);
2271 	QDF_BUG(right);
2272 
2273 	return memcmp(left, right, size);
2274 }
2275 qdf_export_symbol(qdf_mem_cmp);
2276 
2277 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2278 /**
2279  * qdf_mem_dma_alloc() - allocates memory for dma
2280  * @osdev: OS device handle
2281  * @dev: Pointer to device handle
2282  * @size: Size to be allocated
2283  * @phy_addr: Physical address
2284  *
2285  * Return: pointer of allocated memory or null if memory alloc fails
2286  */
2287 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2288 				      qdf_size_t size,
2289 				      qdf_dma_addr_t *phy_addr)
2290 {
2291 	void *vaddr;
2292 
2293 	vaddr = qdf_mem_malloc(size);
2294 	*phy_addr = ((uintptr_t) vaddr);
2295 	/* using this type conversion to suppress "cast from pointer to integer
2296 	 * of different size" warning on some platforms
2297 	 */
2298 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2299 	return vaddr;
2300 }
2301 
2302 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2303 	!defined(QCA_WIFI_QCN9000)
2304 
2305 #define QCA8074_RAM_BASE 0x50000000
2306 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2307 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2308 			qdf_dma_addr_t *phy_addr)
2309 {
2310 	void *vaddr = NULL;
2311 	int i;
2312 
2313 	*phy_addr = 0;
2314 
2315 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2316 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2317 					   qdf_mem_malloc_flags());
2318 
2319 		if (!vaddr) {
2320 			qdf_err("%s failed , size: %zu!", __func__, size);
2321 			return NULL;
2322 		}
2323 
2324 		if (*phy_addr >= QCA8074_RAM_BASE)
2325 			return vaddr;
2326 
2327 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2328 	}
2329 
2330 	return NULL;
2331 }
2332 
2333 #else
2334 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2335 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2336 {
2337 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2338 }
2339 #endif
2340 
2341 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2342 static inline void
2343 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2344 {
2345 	qdf_mem_free(vaddr);
2346 }
2347 #else
2348 
2349 static inline void
2350 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2351 {
2352 	dma_free_coherent(dev, size, vaddr, paddr);
2353 }
2354 #endif
2355 
2356 #ifdef MEMORY_DEBUG
2357 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2358 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2359 				     const char *func, uint32_t line,
2360 				     void *caller)
2361 {
2362 	QDF_STATUS status;
2363 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2364 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2365 	struct qdf_mem_header *header;
2366 	void *vaddr;
2367 
2368 	if (is_initial_mem_debug_disabled)
2369 		return __qdf_mem_alloc_consistent(osdev, dev,
2370 						  size, paddr,
2371 						  func, line);
2372 
2373 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2374 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2375 		return NULL;
2376 	}
2377 
2378 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2379 				   paddr);
2380 
2381 	if (!vaddr) {
2382 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2383 		return NULL;
2384 	}
2385 
2386 	header = qdf_mem_dma_get_header(vaddr, size);
2387 	/* For DMA buffers we only add trailers, this function will init
2388 	 * the header structure at the tail
2389 	 * Prefix the header into DMA buffer causes SMMU faults, so
2390 	 * do not prefix header into the DMA buffers
2391 	 */
2392 	qdf_mem_header_init(header, size, func, line, caller);
2393 
2394 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2395 	status = qdf_list_insert_front(mem_list, &header->node);
2396 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2397 	if (QDF_IS_STATUS_ERROR(status))
2398 		qdf_err("Failed to insert memory header; status %d", status);
2399 
2400 	qdf_mem_dma_inc(size);
2401 
2402 	return vaddr;
2403 }
2404 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2405 
2406 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2407 				   qdf_size_t size, void *vaddr,
2408 				   qdf_dma_addr_t paddr,
2409 				   qdf_dma_context_t memctx,
2410 				   const char *func, uint32_t line)
2411 {
2412 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2413 	struct qdf_mem_header *header;
2414 	enum qdf_mem_validation_bitmap error_bitmap;
2415 
2416 	if (is_initial_mem_debug_disabled) {
2417 		__qdf_mem_free_consistent(
2418 					  osdev, dev,
2419 					  size, vaddr,
2420 					  paddr, memctx);
2421 		return;
2422 	}
2423 
2424 	/* freeing a null pointer is valid */
2425 	if (qdf_unlikely(!vaddr))
2426 		return;
2427 
2428 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2429 
2430 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2431 	/* For DMA buffers we only add trailers, this function will retrieve
2432 	 * the header structure at the tail
2433 	 * Prefix the header into DMA buffer causes SMMU faults, so
2434 	 * do not prefix header into the DMA buffers
2435 	 */
2436 	header = qdf_mem_dma_get_header(vaddr, size);
2437 	error_bitmap = qdf_mem_header_validate(header, domain);
2438 	if (!error_bitmap) {
2439 		header->freed = true;
2440 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2441 				     &header->node);
2442 	}
2443 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2444 
2445 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2446 
2447 	qdf_mem_dma_dec(header->size);
2448 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2449 }
2450 qdf_export_symbol(qdf_mem_free_consistent_debug);
2451 #endif /* MEMORY_DEBUG */
2452 
2453 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2454 			       qdf_size_t size, void *vaddr,
2455 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2456 {
2457 	qdf_mem_dma_dec(size);
2458 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2459 }
2460 
2461 qdf_export_symbol(__qdf_mem_free_consistent);
2462 
2463 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2464 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2465 				 const char *func, uint32_t line)
2466 {
2467 	void *vaddr;
2468 
2469 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2470 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2471 			     size, func, line);
2472 		return NULL;
2473 	}
2474 
2475 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2476 
2477 	if (vaddr)
2478 		qdf_mem_dma_inc(size);
2479 
2480 	return vaddr;
2481 }
2482 
2483 qdf_export_symbol(__qdf_mem_alloc_consistent);
2484 
2485 void *qdf_aligned_mem_alloc_consistent_fl(
2486 	qdf_device_t osdev, uint32_t *size,
2487 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2488 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2489 	const char *func, uint32_t line)
2490 {
2491 	void *vaddr_aligned;
2492 	uint32_t align_alloc_size;
2493 
2494 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2495 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2496 	if (!*vaddr_unaligned) {
2497 		qdf_warn("Failed to alloc %uB @ %s:%d",
2498 			 *size, func, line);
2499 		return NULL;
2500 	}
2501 
2502 	/* Re-allocate additional bytes to align base address only if
2503 	 * above allocation returns unaligned address. Reason for
2504 	 * trying exact size allocation above is, OS tries to allocate
2505 	 * blocks of size power-of-2 pages and then free extra pages.
2506 	 * e.g., of a ring size of 1MB, the allocation below will
2507 	 * request 1MB plus 7 bytes for alignment, which will cause a
2508 	 * 2MB block allocation,and that is failing sometimes due to
2509 	 * memory fragmentation.
2510 	 */
2511 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2512 		align_alloc_size = *size + align - 1;
2513 
2514 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2515 					*vaddr_unaligned,
2516 					*paddr_unaligned, 0);
2517 
2518 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2519 				osdev, osdev->dev, align_alloc_size,
2520 				paddr_unaligned);
2521 		if (!*vaddr_unaligned) {
2522 			qdf_warn("Failed to alloc %uB @ %s:%d",
2523 				 align_alloc_size, func, line);
2524 			return NULL;
2525 		}
2526 
2527 		*size = align_alloc_size;
2528 	}
2529 
2530 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2531 			(unsigned long)(*paddr_unaligned), align);
2532 
2533 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2534 				 ((unsigned long)(*paddr_aligned) -
2535 				  (unsigned long)(*paddr_unaligned)));
2536 
2537 	return vaddr_aligned;
2538 }
2539 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2540 
2541 /**
2542  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2543  * @osdev: OS device handle
2544  * @bus_addr: dma address to give to the device
2545  * @size: Size of the memory block
2546  * @direction: direction data will be DMAed
2547  *
2548  * Assign memory to the remote device.
2549  * The cache lines are flushed to ram or invalidated as needed.
2550  *
2551  * Return: none
2552  */
2553 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2554 					qdf_dma_addr_t bus_addr,
2555 					qdf_size_t size,
2556 					enum dma_data_direction direction)
2557 {
2558 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2559 }
2560 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2561 
2562 /**
2563  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2564  * @osdev: OS device handle
2565  * @bus_addr: dma address to give to the cpu
2566  * @size: Size of the memory block
2567  * @direction: direction data will be DMAed
2568  *
2569  * Assign memory to the CPU.
2570  *
2571  * Return: none
2572  */
2573 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2574 				     qdf_dma_addr_t bus_addr,
2575 				     qdf_size_t size,
2576 				     enum dma_data_direction direction)
2577 {
2578 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2579 }
2580 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2581 
2582 void qdf_mem_init(void)
2583 {
2584 	qdf_mem_debug_init();
2585 	qdf_net_buf_debug_init();
2586 	qdf_frag_debug_init();
2587 	qdf_mem_debugfs_init();
2588 	qdf_mem_debug_debugfs_init();
2589 }
2590 qdf_export_symbol(qdf_mem_init);
2591 
2592 void qdf_mem_exit(void)
2593 {
2594 	qdf_mem_debug_debugfs_exit();
2595 	qdf_mem_debugfs_exit();
2596 	qdf_frag_debug_exit();
2597 	qdf_net_buf_debug_exit();
2598 	qdf_mem_debug_exit();
2599 }
2600 qdf_export_symbol(qdf_mem_exit);
2601 
2602 /**
2603  * qdf_ether_addr_copy() - copy an Ethernet address
2604  *
2605  * @dst_addr: A six-byte array Ethernet address destination
2606  * @src_addr: A six-byte array Ethernet address source
2607  *
2608  * Please note: dst & src must both be aligned to u16.
2609  *
2610  * Return: none
2611  */
2612 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2613 {
2614 	if ((!dst_addr) || (!src_addr)) {
2615 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2616 			  "%s called with NULL parameter, source:%pK destination:%pK",
2617 			  __func__, src_addr, dst_addr);
2618 		QDF_ASSERT(0);
2619 		return;
2620 	}
2621 	ether_addr_copy(dst_addr, src_addr);
2622 }
2623 qdf_export_symbol(qdf_ether_addr_copy);
2624 
2625 int32_t qdf_dma_mem_stats_read(void)
2626 {
2627 	return qdf_atomic_read(&qdf_mem_stat.dma);
2628 }
2629 
2630 qdf_export_symbol(qdf_dma_mem_stats_read);
2631 
2632 int32_t qdf_heap_mem_stats_read(void)
2633 {
2634 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2635 }
2636 
2637 qdf_export_symbol(qdf_heap_mem_stats_read);
2638 
2639 int32_t qdf_skb_mem_stats_read(void)
2640 {
2641 	return qdf_atomic_read(&qdf_mem_stat.skb);
2642 }
2643 
2644 qdf_export_symbol(qdf_skb_mem_stats_read);
2645 
2646 int32_t qdf_skb_total_mem_stats_read(void)
2647 {
2648 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2649 }
2650 
2651 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2652 
2653 int32_t qdf_skb_max_mem_stats_read(void)
2654 {
2655 	return qdf_mem_stat.skb_mem_max;
2656 }
2657 
2658 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2659 
2660 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2661 {
2662 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2663 }
2664 
2665 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2666 
2667 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2668 {
2669 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2670 }
2671 
2672 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2673 
2674 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2675 {
2676 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2677 }
2678 
2679 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2680 
2681 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2682 {
2683 	return qdf_mem_stat.dp_tx_skb_count_max;
2684 }
2685 
2686 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2687 
2688 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2689 {
2690 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2691 }
2692 
2693 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2694 
2695 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2696 {
2697 	return qdf_mem_stat.dp_rx_skb_count_max;
2698 }
2699 
2700 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2701 
2702 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2703 {
2704 	return qdf_mem_stat.dp_tx_skb_mem_max;
2705 }
2706 
2707 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2708 
2709 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2710 {
2711 	return qdf_mem_stat.dp_rx_skb_mem_max;
2712 }
2713 
2714 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2715 
2716 int32_t qdf_mem_tx_desc_cnt_read(void)
2717 {
2718 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2719 }
2720 
2721 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2722 
2723 int32_t qdf_mem_tx_desc_max_read(void)
2724 {
2725 	return qdf_mem_stat.tx_descs_max;
2726 }
2727 
2728 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2729 
2730 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2731 				int32_t tx_descs_max)
2732 {
2733 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2734 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2735 }
2736 
2737 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2738 
2739 void qdf_mem_stats_init(void)
2740 {
2741 	qdf_mem_stat.skb_mem_max = 0;
2742 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
2743 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
2744 	qdf_mem_stat.dp_tx_skb_count_max = 0;
2745 	qdf_mem_stat.dp_rx_skb_count_max = 0;
2746 	qdf_mem_stat.tx_descs_max = 0;
2747 }
2748 
2749 qdf_export_symbol(qdf_mem_stats_init);
2750 
2751