xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision a64d8a0dbea74a9757ced8bd24c04bf658d196c7)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_str.h"
32 #include "qdf_talloc.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 #include <qdf_list.h>
37 
38 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
39 #include <net/cnss_prealloc.h>
40 #endif
41 
42 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
43 static bool mem_debug_disabled;
44 qdf_declare_param(mem_debug_disabled, bool);
45 qdf_export_symbol(mem_debug_disabled);
46 #endif
47 
48 #ifdef MEMORY_DEBUG
49 static bool is_initial_mem_debug_disabled;
50 #endif
51 
52 /* Preprocessor Definitions and Constants */
53 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
54 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
55 #define QDF_DEBUG_STRING_SIZE 512
56 
57 /**
58  * struct __qdf_mem_stat - qdf memory statistics
59  * @kmalloc: total kmalloc allocations
60  * @dma: total dma allocations
61  * @skb: total skb allocations
62  * @skb_total: total skb allocations in host driver
63  * @dp_tx_skb: total Tx skb allocations in datapath
64  * @dp_rx_skb: total Rx skb allocations in datapath
65  * @skb_mem_max: high watermark for skb allocations
66  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
67  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
68  * @dp_tx_skb_count: DP Tx buffer count
69  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
70  * @dp_rx_skb_count: DP Rx buffer count
71  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
72  * @tx_descs_outstanding: Current pending Tx descs count
73  * @tx_descs_max: High watermark for pending Tx descs count
74  */
75 static struct __qdf_mem_stat {
76 	qdf_atomic_t kmalloc;
77 	qdf_atomic_t dma;
78 	qdf_atomic_t skb;
79 	qdf_atomic_t skb_total;
80 	qdf_atomic_t dp_tx_skb;
81 	qdf_atomic_t dp_rx_skb;
82 	int32_t skb_mem_max;
83 	int32_t dp_tx_skb_mem_max;
84 	int32_t dp_rx_skb_mem_max;
85 	qdf_atomic_t dp_tx_skb_count;
86 	int32_t dp_tx_skb_count_max;
87 	qdf_atomic_t dp_rx_skb_count;
88 	int32_t dp_rx_skb_count_max;
89 	qdf_atomic_t tx_descs_outstanding;
90 	int32_t tx_descs_max;
91 } qdf_mem_stat;
92 
93 #ifdef MEMORY_DEBUG
94 #include "qdf_debug_domain.h"
95 
96 enum list_type {
97 	LIST_TYPE_MEM = 0,
98 	LIST_TYPE_DMA = 1,
99 	LIST_TYPE_NBUF = 2,
100 	LIST_TYPE_MAX,
101 };
102 
103 /**
104  * major_alloc_priv: private data registered to debugfs entry created to list
105  *                   the list major allocations
106  * @type:            type of the list to be parsed
107  * @threshold:       configured by user by overwriting the respective debugfs
108  *                   sys entry. This is to list the functions which requested
109  *                   memory/dma allocations more than threshold nubmer of times.
110  */
111 struct major_alloc_priv {
112 	enum list_type type;
113 	uint32_t threshold;
114 };
115 
116 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
117 static qdf_spinlock_t qdf_mem_list_lock;
118 
119 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
120 static qdf_spinlock_t qdf_mem_dma_list_lock;
121 
122 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
123 {
124 	return &qdf_mem_domains[domain];
125 }
126 
127 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
128 {
129 	return &qdf_mem_dma_domains[domain];
130 }
131 
132 /**
133  * struct qdf_mem_header - memory object to dubug
134  * @node: node to the list
135  * @domain: the active memory domain at time of allocation
136  * @freed: flag set during free, used to detect double frees
137  *	Use uint8_t so we can detect corruption
138  * @func: name of the function the allocation was made from
139  * @line: line number of the file the allocation was made from
140  * @size: size of the allocation in bytes
141  * @caller: Caller of the function for which memory is allocated
142  * @header: a known value, used to detect out-of-bounds access
143  * @time: timestamp at which allocation was made
144  */
145 struct qdf_mem_header {
146 	qdf_list_node_t node;
147 	enum qdf_debug_domain domain;
148 	uint8_t freed;
149 	char func[QDF_MEM_FUNC_NAME_SIZE];
150 	uint32_t line;
151 	uint32_t size;
152 	void *caller;
153 	uint64_t header;
154 	uint64_t time;
155 };
156 
157 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
158 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
159 
160 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
161 {
162 	return (struct qdf_mem_header *)ptr - 1;
163 }
164 
165 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
166 							    qdf_size_t size)
167 {
168 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
169 }
170 
171 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
172 {
173 	return (uint64_t *)((void *)(header + 1) + header->size);
174 }
175 
176 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
177 {
178 	return (void *)(header + 1);
179 }
180 
181 /* number of bytes needed for the qdf memory debug information */
182 #define QDF_MEM_DEBUG_SIZE \
183 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
184 
185 /* number of bytes needed for the qdf dma memory debug information */
186 #define QDF_DMA_MEM_DEBUG_SIZE \
187 	(sizeof(struct qdf_mem_header))
188 
189 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
190 {
191 	QDF_BUG(header);
192 	if (!header)
193 		return;
194 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
195 }
196 
197 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
198 				const char *func, uint32_t line, void *caller)
199 {
200 	QDF_BUG(header);
201 	if (!header)
202 		return;
203 
204 	header->domain = qdf_debug_domain_get();
205 	header->freed = false;
206 
207 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
208 
209 	header->line = line;
210 	header->size = size;
211 	header->caller = caller;
212 	header->header = WLAN_MEM_HEADER;
213 	header->time = qdf_get_log_timestamp();
214 }
215 
216 enum qdf_mem_validation_bitmap {
217 	QDF_MEM_BAD_HEADER = 1 << 0,
218 	QDF_MEM_BAD_TRAILER = 1 << 1,
219 	QDF_MEM_BAD_SIZE = 1 << 2,
220 	QDF_MEM_DOUBLE_FREE = 1 << 3,
221 	QDF_MEM_BAD_FREED = 1 << 4,
222 	QDF_MEM_BAD_NODE = 1 << 5,
223 	QDF_MEM_BAD_DOMAIN = 1 << 6,
224 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
225 };
226 
227 static enum qdf_mem_validation_bitmap
228 qdf_mem_trailer_validate(struct qdf_mem_header *header)
229 {
230 	enum qdf_mem_validation_bitmap error_bitmap = 0;
231 
232 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
233 		error_bitmap |= QDF_MEM_BAD_TRAILER;
234 	return error_bitmap;
235 }
236 
237 static enum qdf_mem_validation_bitmap
238 qdf_mem_header_validate(struct qdf_mem_header *header,
239 			enum qdf_debug_domain domain)
240 {
241 	enum qdf_mem_validation_bitmap error_bitmap = 0;
242 
243 	if (header->header != WLAN_MEM_HEADER)
244 		error_bitmap |= QDF_MEM_BAD_HEADER;
245 
246 	if (header->size > QDF_MEM_MAX_MALLOC)
247 		error_bitmap |= QDF_MEM_BAD_SIZE;
248 
249 	if (header->freed == true)
250 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
251 	else if (header->freed)
252 		error_bitmap |= QDF_MEM_BAD_FREED;
253 
254 	if (!qdf_list_node_in_any_list(&header->node))
255 		error_bitmap |= QDF_MEM_BAD_NODE;
256 
257 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
258 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
259 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
260 	else if (header->domain != domain)
261 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
262 
263 	return error_bitmap;
264 }
265 
266 static void
267 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
268 			    enum qdf_debug_domain current_domain,
269 			    enum qdf_mem_validation_bitmap error_bitmap,
270 			    const char *func,
271 			    uint32_t line)
272 {
273 	if (!error_bitmap)
274 		return;
275 
276 	if (error_bitmap & QDF_MEM_BAD_HEADER)
277 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
278 			header->header, WLAN_MEM_HEADER);
279 
280 	if (error_bitmap & QDF_MEM_BAD_SIZE)
281 		qdf_err("Corrupted memory size %u (expected < %d)",
282 			header->size, QDF_MEM_MAX_MALLOC);
283 
284 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
285 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
286 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
287 
288 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
289 		qdf_err("Memory has previously been freed");
290 
291 	if (error_bitmap & QDF_MEM_BAD_FREED)
292 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
293 
294 	if (error_bitmap & QDF_MEM_BAD_NODE)
295 		qdf_err("Corrupted memory header node or double free");
296 
297 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
298 		qdf_err("Corrupted memory domain 0x%x", header->domain);
299 
300 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
301 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
302 			qdf_debug_domain_name(header->domain), header->domain,
303 			qdf_debug_domain_name(current_domain), current_domain);
304 
305 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
306 }
307 
308 /**
309  * struct __qdf_mem_info - memory statistics
310  * @func: the function which allocated memory
311  * @line: the line at which allocation happened
312  * @size: the size of allocation
313  * @caller: Address of the caller function
314  * @count: how many allocations of same type
315  * @time: timestamp at which allocation happened
316  */
317 struct __qdf_mem_info {
318 	char func[QDF_MEM_FUNC_NAME_SIZE];
319 	uint32_t line;
320 	uint32_t size;
321 	void *caller;
322 	uint32_t count;
323 	uint64_t time;
324 };
325 
326 /*
327  * The table depth defines the de-duplication proximity scope.
328  * A deeper table takes more time, so choose any optimum value.
329  */
330 #define QDF_MEM_STAT_TABLE_SIZE 8
331 
332 /**
333  * qdf_mem_debug_print_header() - memory debug header print logic
334  * @print: the print adapter function
335  * @print_priv: the private data to be consumed by @print
336  * @threshold: the threshold value set by user to list top allocations
337  *
338  * Return: None
339  */
340 static void qdf_mem_debug_print_header(qdf_abstract_print print,
341 				       void *print_priv,
342 				       uint32_t threshold)
343 {
344 	if (threshold)
345 		print(print_priv, "APIs requested allocations >= %u no of time",
346 		      threshold);
347 	print(print_priv,
348 	      "--------------------------------------------------------------");
349 	print(print_priv,
350 	      " count    size     total    filename     caller    timestamp");
351 	print(print_priv,
352 	      "--------------------------------------------------------------");
353 }
354 
355 /**
356  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
357  * @table: the memory metadata table to insert into
358  * @meta: the memory metadata to insert
359  *
360  * Return: true if the table is full after inserting, false otherwise
361  */
362 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
363 				      struct qdf_mem_header *meta)
364 {
365 	int i;
366 
367 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
368 		if (!table[i].count) {
369 			qdf_str_lcopy(table[i].func, meta->func,
370 				      QDF_MEM_FUNC_NAME_SIZE);
371 			table[i].line = meta->line;
372 			table[i].size = meta->size;
373 			table[i].count = 1;
374 			table[i].caller = meta->caller;
375 			table[i].time = meta->time;
376 			break;
377 		}
378 
379 		if (qdf_str_eq(table[i].func, meta->func) &&
380 		    table[i].line == meta->line &&
381 		    table[i].size == meta->size &&
382 		    table[i].caller == meta->caller) {
383 			table[i].count++;
384 			break;
385 		}
386 	}
387 
388 	/* return true if the table is now full */
389 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
390 }
391 
392 /**
393  * qdf_mem_domain_print() - output agnostic memory domain print logic
394  * @domain: the memory domain to print
395  * @print: the print adapter function
396  * @print_priv: the private data to be consumed by @print
397  * @threshold: the threshold value set by uset to list top allocations
398  * @mem_print: pointer to function which prints the memory allocation data
399  *
400  * Return: None
401  */
402 static void qdf_mem_domain_print(qdf_list_t *domain,
403 				 qdf_abstract_print print,
404 				 void *print_priv,
405 				 uint32_t threshold,
406 				 void (*mem_print)(struct __qdf_mem_info *,
407 						   qdf_abstract_print,
408 						   void *, uint32_t))
409 {
410 	QDF_STATUS status;
411 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
412 	qdf_list_node_t *node;
413 
414 	qdf_mem_zero(table, sizeof(table));
415 	qdf_mem_debug_print_header(print, print_priv, threshold);
416 
417 	/* hold lock while inserting to avoid use-after free of the metadata */
418 	qdf_spin_lock(&qdf_mem_list_lock);
419 	status = qdf_list_peek_front(domain, &node);
420 	while (QDF_IS_STATUS_SUCCESS(status)) {
421 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
422 		bool is_full = qdf_mem_meta_table_insert(table, meta);
423 
424 		qdf_spin_unlock(&qdf_mem_list_lock);
425 
426 		if (is_full) {
427 			(*mem_print)(table, print, print_priv, threshold);
428 			qdf_mem_zero(table, sizeof(table));
429 		}
430 
431 		qdf_spin_lock(&qdf_mem_list_lock);
432 		status = qdf_list_peek_next(domain, node, &node);
433 	}
434 	qdf_spin_unlock(&qdf_mem_list_lock);
435 
436 	(*mem_print)(table, print, print_priv, threshold);
437 }
438 
439 /**
440  * qdf_mem_meta_table_print() - memory metadata table print logic
441  * @table: the memory metadata table to print
442  * @print: the print adapter function
443  * @print_priv: the private data to be consumed by @print
444  * @threshold: the threshold value set by user to list top allocations
445  *
446  * Return: None
447  */
448 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
449 				     qdf_abstract_print print,
450 				     void *print_priv,
451 				     uint32_t threshold)
452 {
453 	int i;
454 	char debug_str[QDF_DEBUG_STRING_SIZE];
455 	size_t len = 0;
456 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
457 
458 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
459 			     "%s", debug_prefix);
460 
461 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
462 		if (!table[i].count)
463 			break;
464 
465 		print(print_priv,
466 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
467 		      table[i].count,
468 		      table[i].size,
469 		      table[i].count * table[i].size,
470 		      table[i].func,
471 		      table[i].line, table[i].caller,
472 		      table[i].time);
473 		len += qdf_scnprintf(debug_str + len,
474 				     sizeof(debug_str) - len,
475 				     " @ %s:%u %pS",
476 				     table[i].func,
477 				     table[i].line,
478 				     table[i].caller);
479 	}
480 	print(print_priv, "%s", debug_str);
481 }
482 
483 static int qdf_err_printer(void *priv, const char *fmt, ...)
484 {
485 	va_list args;
486 
487 	va_start(args, fmt);
488 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
489 	va_end(args);
490 
491 	return 0;
492 }
493 
494 #endif /* MEMORY_DEBUG */
495 
496 bool prealloc_disabled = 1;
497 qdf_declare_param(prealloc_disabled, bool);
498 qdf_export_symbol(prealloc_disabled);
499 
500 /**
501  * qdf_prealloc_disabled_config_get() - Get the user configuration of
502  *                                       prealloc_disabled
503  *
504  * Return: value of prealloc_disabled qdf module argument
505  */
506 bool qdf_prealloc_disabled_config_get(void)
507 {
508 	return prealloc_disabled;
509 }
510 
511 qdf_export_symbol(qdf_prealloc_disabled_config_get);
512 
513 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
514 /**
515  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
516  * @str_value: value of the module param
517  *
518  * This function will set qdf module param prealloc_disabled
519  *
520  * Return: QDF_STATUS_SUCCESS on Success
521  */
522 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
523 {
524 	QDF_STATUS status;
525 
526 	status = qdf_bool_parse(str_value, &prealloc_disabled);
527 	return status;
528 }
529 #endif
530 
531 #if defined WLAN_DEBUGFS
532 
533 /* Debugfs root directory for qdf_mem */
534 static struct dentry *qdf_mem_debugfs_root;
535 
536 #ifdef MEMORY_DEBUG
537 static int seq_printf_printer(void *priv, const char *fmt, ...)
538 {
539 	struct seq_file *file = priv;
540 	va_list args;
541 
542 	va_start(args, fmt);
543 	seq_vprintf(file, fmt, args);
544 	seq_puts(file, "\n");
545 	va_end(args);
546 
547 	return 0;
548 }
549 
550 /**
551  * qdf_print_major_alloc() - memory metadata table print logic
552  * @table: the memory metadata table to print
553  * @print: the print adapter function
554  * @print_priv: the private data to be consumed by @print
555  * @threshold: the threshold value set by uset to list top allocations
556  *
557  * Return: None
558  */
559 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
560 				  qdf_abstract_print print,
561 				  void *print_priv,
562 				  uint32_t threshold)
563 {
564 	int i;
565 
566 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
567 		if (!table[i].count)
568 			break;
569 		if (table[i].count >= threshold)
570 			print(print_priv,
571 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
572 			      table[i].count,
573 			      table[i].size,
574 			      table[i].count * table[i].size,
575 			      table[i].func,
576 			      table[i].line, table[i].caller,
577 			      table[i].time);
578 	}
579 }
580 
581 /**
582  * qdf_mem_seq_start() - sequential callback to start
583  * @seq: seq_file handle
584  * @pos: The start position of the sequence
585  *
586  * Return: iterator pointer, or NULL if iteration is complete
587  */
588 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
589 {
590 	enum qdf_debug_domain domain = *pos;
591 
592 	if (!qdf_debug_domain_valid(domain))
593 		return NULL;
594 
595 	/* just use the current position as our iterator */
596 	return pos;
597 }
598 
599 /**
600  * qdf_mem_seq_next() - next sequential callback
601  * @seq: seq_file handle
602  * @v: the current iterator
603  * @pos: the current position
604  *
605  * Get the next node and release previous node.
606  *
607  * Return: iterator pointer, or NULL if iteration is complete
608  */
609 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
610 {
611 	++*pos;
612 
613 	return qdf_mem_seq_start(seq, pos);
614 }
615 
616 /**
617  * qdf_mem_seq_stop() - stop sequential callback
618  * @seq: seq_file handle
619  * @v: current iterator
620  *
621  * Return: None
622  */
623 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
624 
625 /**
626  * qdf_mem_seq_show() - print sequential callback
627  * @seq: seq_file handle
628  * @v: current iterator
629  *
630  * Return: 0 - success
631  */
632 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
633 {
634 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
635 
636 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
637 		   qdf_debug_domain_name(domain_id), domain_id);
638 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
639 			     seq_printf_printer,
640 			     seq,
641 			     0,
642 			     qdf_mem_meta_table_print);
643 
644 	return 0;
645 }
646 
647 /* sequential file operation table */
648 static const struct seq_operations qdf_mem_seq_ops = {
649 	.start = qdf_mem_seq_start,
650 	.next  = qdf_mem_seq_next,
651 	.stop  = qdf_mem_seq_stop,
652 	.show  = qdf_mem_seq_show,
653 };
654 
655 
656 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
657 {
658 	return seq_open(file, &qdf_mem_seq_ops);
659 }
660 
661 /**
662  * qdf_major_alloc_show() - print sequential callback
663  * @seq: seq_file handle
664  * @v: current iterator
665  *
666  * Return: 0 - success
667  */
668 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
669 {
670 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
671 	struct major_alloc_priv *priv;
672 	qdf_list_t *list;
673 
674 	priv = (struct major_alloc_priv *)seq->private;
675 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
676 		   qdf_debug_domain_name(domain_id), domain_id);
677 
678 	switch (priv->type) {
679 	case LIST_TYPE_MEM:
680 		list = qdf_mem_list_get(domain_id);
681 		break;
682 	case LIST_TYPE_DMA:
683 		list = qdf_mem_dma_list(domain_id);
684 		break;
685 	default:
686 		list = NULL;
687 		break;
688 	}
689 
690 	if (list)
691 		qdf_mem_domain_print(list,
692 				     seq_printf_printer,
693 				     seq,
694 				     priv->threshold,
695 				     qdf_print_major_alloc);
696 
697 	return 0;
698 }
699 
700 /* sequential file operation table created to track major allocs */
701 static const struct seq_operations qdf_major_allocs_seq_ops = {
702 	.start = qdf_mem_seq_start,
703 	.next = qdf_mem_seq_next,
704 	.stop = qdf_mem_seq_stop,
705 	.show = qdf_major_alloc_show,
706 };
707 
708 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
709 {
710 	void *private = inode->i_private;
711 	struct seq_file *seq;
712 	int rc;
713 
714 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
715 	if (rc == 0) {
716 		seq = file->private_data;
717 		seq->private = private;
718 	}
719 	return rc;
720 }
721 
722 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
723 					     const char __user *user_buf,
724 					     size_t count,
725 					     loff_t *pos)
726 {
727 	char buf[32];
728 	ssize_t buf_size;
729 	uint32_t threshold;
730 	struct seq_file *seq = file->private_data;
731 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
732 
733 	buf_size = min(count, (sizeof(buf) - 1));
734 	if (buf_size <= 0)
735 		return 0;
736 	if (copy_from_user(buf, user_buf, buf_size))
737 		return -EFAULT;
738 	buf[buf_size] = '\0';
739 	if (!kstrtou32(buf, 10, &threshold))
740 		priv->threshold = threshold;
741 	return buf_size;
742 }
743 
744 /**
745  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
746  * @threshold: the threshold value set by uset to list top allocations
747  * @print: the print adapter function
748  * @print_priv: the private data to be consumed by @print
749  * @mem_print: pointer to function which prints the memory allocation data
750  *
751  * Return: None
752  */
753 static void
754 qdf_print_major_nbuf_allocs(uint32_t threshold,
755 			    qdf_abstract_print print,
756 			    void *print_priv,
757 			    void (*mem_print)(struct __qdf_mem_info *,
758 					      qdf_abstract_print,
759 					      void *, uint32_t))
760 {
761 	uint32_t nbuf_iter;
762 	unsigned long irq_flag = 0;
763 	QDF_NBUF_TRACK *p_node;
764 	QDF_NBUF_TRACK *p_prev;
765 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
766 	struct qdf_mem_header meta;
767 	bool is_full;
768 
769 	qdf_mem_zero(table, sizeof(table));
770 	qdf_mem_debug_print_header(print, print_priv, threshold);
771 
772 	if (is_initial_mem_debug_disabled)
773 		return;
774 
775 	qdf_rl_info("major nbuf print with threshold %u", threshold);
776 
777 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
778 	     nbuf_iter++) {
779 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
780 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
781 		while (p_node) {
782 			meta.line = p_node->line_num;
783 			meta.size = p_node->size;
784 			meta.caller = NULL;
785 			meta.time = p_node->time;
786 			qdf_str_lcopy(meta.func, p_node->func_name,
787 				      QDF_MEM_FUNC_NAME_SIZE);
788 
789 			is_full = qdf_mem_meta_table_insert(table, &meta);
790 
791 			if (is_full) {
792 				(*mem_print)(table, print,
793 					     print_priv, threshold);
794 				qdf_mem_zero(table, sizeof(table));
795 			}
796 
797 			p_prev = p_node;
798 			p_node = p_node->p_next;
799 		}
800 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
801 	}
802 
803 	(*mem_print)(table, print, print_priv, threshold);
804 
805 	qdf_rl_info("major nbuf print end");
806 }
807 
808 /**
809  * qdf_major_nbuf_alloc_show() - print sequential callback
810  * @seq: seq_file handle
811  * @v: current iterator
812  *
813  * Return: 0 - success
814  */
815 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
816 {
817 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
818 
819 	if (!priv) {
820 		qdf_err("priv is null");
821 		return -EINVAL;
822 	}
823 
824 	qdf_print_major_nbuf_allocs(priv->threshold,
825 				    seq_printf_printer,
826 				    seq,
827 				    qdf_print_major_alloc);
828 
829 	return 0;
830 }
831 
832 /**
833  * qdf_nbuf_seq_start() - sequential callback to start
834  * @seq: seq_file handle
835  * @pos: The start position of the sequence
836  *
837  * Return: iterator pointer, or NULL if iteration is complete
838  */
839 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
840 {
841 	enum qdf_debug_domain domain = *pos;
842 
843 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
844 		return NULL;
845 
846 	return pos;
847 }
848 
849 /**
850  * qdf_nbuf_seq_next() - next sequential callback
851  * @seq: seq_file handle
852  * @v: the current iterator
853  * @pos: the current position
854  *
855  * Get the next node and release previous node.
856  *
857  * Return: iterator pointer, or NULL if iteration is complete
858  */
859 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
860 {
861 	++*pos;
862 
863 	return qdf_nbuf_seq_start(seq, pos);
864 }
865 
866 /**
867  * qdf_nbuf_seq_stop() - stop sequential callback
868  * @seq: seq_file handle
869  * @v: current iterator
870  *
871  * Return: None
872  */
873 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
874 
875 /* sequential file operation table created to track major skb allocs */
876 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
877 	.start = qdf_nbuf_seq_start,
878 	.next = qdf_nbuf_seq_next,
879 	.stop = qdf_nbuf_seq_stop,
880 	.show = qdf_major_nbuf_alloc_show,
881 };
882 
883 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
884 {
885 	void *private = inode->i_private;
886 	struct seq_file *seq;
887 	int rc;
888 
889 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
890 	if (rc == 0) {
891 		seq = file->private_data;
892 		seq->private = private;
893 	}
894 	return rc;
895 }
896 
897 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
898 						  const char __user *user_buf,
899 						  size_t count,
900 						  loff_t *pos)
901 {
902 	char buf[32];
903 	ssize_t buf_size;
904 	uint32_t threshold;
905 	struct seq_file *seq = file->private_data;
906 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
907 
908 	buf_size = min(count, (sizeof(buf) - 1));
909 	if (buf_size <= 0)
910 		return 0;
911 	if (copy_from_user(buf, user_buf, buf_size))
912 		return -EFAULT;
913 	buf[buf_size] = '\0';
914 	if (!kstrtou32(buf, 10, &threshold))
915 		priv->threshold = threshold;
916 	return buf_size;
917 }
918 
919 /* file operation table for listing major allocs */
920 static const struct file_operations fops_qdf_major_allocs = {
921 	.owner = THIS_MODULE,
922 	.open = qdf_major_allocs_open,
923 	.read = seq_read,
924 	.llseek = seq_lseek,
925 	.release = seq_release,
926 	.write = qdf_major_alloc_set_threshold,
927 };
928 
929 /* debugfs file operation table */
930 static const struct file_operations fops_qdf_mem_debugfs = {
931 	.owner = THIS_MODULE,
932 	.open = qdf_mem_debugfs_open,
933 	.read = seq_read,
934 	.llseek = seq_lseek,
935 	.release = seq_release,
936 };
937 
938 /* file operation table for listing major allocs */
939 static const struct file_operations fops_qdf_nbuf_major_allocs = {
940 	.owner = THIS_MODULE,
941 	.open = qdf_major_nbuf_allocs_open,
942 	.read = seq_read,
943 	.llseek = seq_lseek,
944 	.release = seq_release,
945 	.write = qdf_major_nbuf_alloc_set_threshold,
946 };
947 
948 static struct major_alloc_priv mem_priv = {
949 	/* List type set to mem */
950 	LIST_TYPE_MEM,
951 	/* initial threshold to list APIs which allocates mem >= 50 times */
952 	50
953 };
954 
955 static struct major_alloc_priv dma_priv = {
956 	/* List type set to DMA */
957 	LIST_TYPE_DMA,
958 	/* initial threshold to list APIs which allocates dma >= 50 times */
959 	50
960 };
961 
962 static struct major_alloc_priv nbuf_priv = {
963 	/* List type set to NBUF */
964 	LIST_TYPE_NBUF,
965 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
966 	50
967 };
968 
969 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
970 {
971 	if (is_initial_mem_debug_disabled)
972 		return QDF_STATUS_SUCCESS;
973 
974 	if (!qdf_mem_debugfs_root)
975 		return QDF_STATUS_E_FAILURE;
976 
977 	debugfs_create_file("list",
978 			    S_IRUSR,
979 			    qdf_mem_debugfs_root,
980 			    NULL,
981 			    &fops_qdf_mem_debugfs);
982 
983 	debugfs_create_file("major_mem_allocs",
984 			    0600,
985 			    qdf_mem_debugfs_root,
986 			    &mem_priv,
987 			    &fops_qdf_major_allocs);
988 
989 	debugfs_create_file("major_dma_allocs",
990 			    0600,
991 			    qdf_mem_debugfs_root,
992 			    &dma_priv,
993 			    &fops_qdf_major_allocs);
994 
995 	debugfs_create_file("major_nbuf_allocs",
996 			    0600,
997 			    qdf_mem_debugfs_root,
998 			    &nbuf_priv,
999 			    &fops_qdf_nbuf_major_allocs);
1000 
1001 	return QDF_STATUS_SUCCESS;
1002 }
1003 
1004 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1005 {
1006 	return QDF_STATUS_SUCCESS;
1007 }
1008 
1009 #else /* MEMORY_DEBUG */
1010 
1011 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1012 {
1013 	return QDF_STATUS_E_NOSUPPORT;
1014 }
1015 
1016 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1017 {
1018 	return QDF_STATUS_E_NOSUPPORT;
1019 }
1020 
1021 #endif /* MEMORY_DEBUG */
1022 
1023 
1024 static void qdf_mem_debugfs_exit(void)
1025 {
1026 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1027 	qdf_mem_debugfs_root = NULL;
1028 }
1029 
1030 static QDF_STATUS qdf_mem_debugfs_init(void)
1031 {
1032 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1033 
1034 	if (!qdf_debugfs_root)
1035 		return QDF_STATUS_E_FAILURE;
1036 
1037 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1038 
1039 	if (!qdf_mem_debugfs_root)
1040 		return QDF_STATUS_E_FAILURE;
1041 
1042 
1043 	debugfs_create_atomic_t("kmalloc",
1044 				S_IRUSR,
1045 				qdf_mem_debugfs_root,
1046 				&qdf_mem_stat.kmalloc);
1047 
1048 	debugfs_create_atomic_t("dma",
1049 				S_IRUSR,
1050 				qdf_mem_debugfs_root,
1051 				&qdf_mem_stat.dma);
1052 
1053 	debugfs_create_atomic_t("skb",
1054 				S_IRUSR,
1055 				qdf_mem_debugfs_root,
1056 				&qdf_mem_stat.skb);
1057 
1058 	return QDF_STATUS_SUCCESS;
1059 }
1060 
1061 #else /* WLAN_DEBUGFS */
1062 
1063 static QDF_STATUS qdf_mem_debugfs_init(void)
1064 {
1065 	return QDF_STATUS_E_NOSUPPORT;
1066 }
1067 static void qdf_mem_debugfs_exit(void) {}
1068 
1069 
1070 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1071 {
1072 	return QDF_STATUS_E_NOSUPPORT;
1073 }
1074 
1075 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1076 {
1077 	return QDF_STATUS_E_NOSUPPORT;
1078 }
1079 
1080 #endif /* WLAN_DEBUGFS */
1081 
1082 void qdf_mem_kmalloc_inc(qdf_size_t size)
1083 {
1084 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1085 }
1086 
1087 static void qdf_mem_dma_inc(qdf_size_t size)
1088 {
1089 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1090 }
1091 
1092 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1093 void qdf_mem_skb_inc(qdf_size_t size)
1094 {
1095 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1096 }
1097 
1098 void qdf_mem_skb_dec(qdf_size_t size)
1099 {
1100 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1101 }
1102 
1103 void qdf_mem_skb_total_inc(qdf_size_t size)
1104 {
1105 	int32_t skb_mem_max = 0;
1106 
1107 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1108 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1109 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1110 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1111 }
1112 
1113 void qdf_mem_skb_total_dec(qdf_size_t size)
1114 {
1115 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1116 }
1117 
1118 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1119 {
1120 	int32_t curr_dp_tx_skb_mem_max = 0;
1121 
1122 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1123 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1124 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1125 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1126 }
1127 
1128 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1129 {
1130 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1131 }
1132 
1133 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1134 {
1135 	int32_t curr_dp_rx_skb_mem_max = 0;
1136 
1137 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1138 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1139 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1140 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1141 }
1142 
1143 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1144 {
1145 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1146 }
1147 
1148 void qdf_mem_dp_tx_skb_cnt_inc(void)
1149 {
1150 	int32_t curr_dp_tx_skb_count_max = 0;
1151 
1152 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1153 	curr_dp_tx_skb_count_max =
1154 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1155 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1156 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1157 }
1158 
1159 void qdf_mem_dp_tx_skb_cnt_dec(void)
1160 {
1161 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1162 }
1163 
1164 void qdf_mem_dp_rx_skb_cnt_inc(void)
1165 {
1166 	int32_t curr_dp_rx_skb_count_max = 0;
1167 
1168 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1169 	curr_dp_rx_skb_count_max =
1170 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1171 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1172 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1173 }
1174 
1175 void qdf_mem_dp_rx_skb_cnt_dec(void)
1176 {
1177 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1178 }
1179 #endif
1180 
1181 void qdf_mem_kmalloc_dec(qdf_size_t size)
1182 {
1183 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1184 }
1185 
1186 static inline void qdf_mem_dma_dec(qdf_size_t size)
1187 {
1188 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1189 }
1190 
1191 /**
1192  * __qdf_mempool_init() - Create and initialize memory pool
1193  *
1194  * @osdev: platform device object
1195  * @pool_addr: address of the pool created
1196  * @elem_cnt: no. of elements in pool
1197  * @elem_size: size of each pool element in bytes
1198  * @flags: flags
1199  *
1200  * return: Handle to memory pool or NULL if allocation failed
1201  */
1202 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1203 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1204 {
1205 	__qdf_mempool_ctxt_t *new_pool = NULL;
1206 	u_int32_t align = L1_CACHE_BYTES;
1207 	unsigned long aligned_pool_mem;
1208 	int pool_id;
1209 	int i;
1210 
1211 	if (prealloc_disabled) {
1212 		/* TBD: We can maintain a list of pools in qdf_device_t
1213 		 * to help debugging
1214 		 * when pre-allocation is not enabled
1215 		 */
1216 		new_pool = (__qdf_mempool_ctxt_t *)
1217 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1218 		if (!new_pool)
1219 			return QDF_STATUS_E_NOMEM;
1220 
1221 		memset(new_pool, 0, sizeof(*new_pool));
1222 		/* TBD: define flags for zeroing buffers etc */
1223 		new_pool->flags = flags;
1224 		new_pool->elem_size = elem_size;
1225 		new_pool->max_elem = elem_cnt;
1226 		*pool_addr = new_pool;
1227 		return 0;
1228 	}
1229 
1230 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1231 		if (!osdev->mem_pool[pool_id])
1232 			break;
1233 	}
1234 
1235 	if (pool_id == MAX_MEM_POOLS)
1236 		return -ENOMEM;
1237 
1238 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1239 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1240 	if (!new_pool)
1241 		return -ENOMEM;
1242 
1243 	memset(new_pool, 0, sizeof(*new_pool));
1244 	/* TBD: define flags for zeroing buffers etc */
1245 	new_pool->flags = flags;
1246 	new_pool->pool_id = pool_id;
1247 
1248 	/* Round up the element size to cacheline */
1249 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1250 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1251 				((align)?(align - 1):0);
1252 
1253 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1254 	if (!new_pool->pool_mem) {
1255 			/* TBD: Check if we need get_free_pages above */
1256 		kfree(new_pool);
1257 		osdev->mem_pool[pool_id] = NULL;
1258 		return -ENOMEM;
1259 	}
1260 
1261 	spin_lock_init(&new_pool->lock);
1262 
1263 	/* Initialize free list */
1264 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1265 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1266 	STAILQ_INIT(&new_pool->free_list);
1267 
1268 	for (i = 0; i < elem_cnt; i++)
1269 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1270 			(mempool_elem_t *)(aligned_pool_mem +
1271 			(new_pool->elem_size * i)), mempool_entry);
1272 
1273 
1274 	new_pool->free_cnt = elem_cnt;
1275 	*pool_addr = new_pool;
1276 	return 0;
1277 }
1278 qdf_export_symbol(__qdf_mempool_init);
1279 
1280 /**
1281  * __qdf_mempool_destroy() - Destroy memory pool
1282  * @osdev: platform device object
1283  * @Handle: to memory pool
1284  *
1285  * Returns: none
1286  */
1287 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1288 {
1289 	int pool_id = 0;
1290 
1291 	if (!pool)
1292 		return;
1293 
1294 	if (prealloc_disabled) {
1295 		kfree(pool);
1296 		return;
1297 	}
1298 
1299 	pool_id = pool->pool_id;
1300 
1301 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1302 	kfree(pool->pool_mem);
1303 	kfree(pool);
1304 	osdev->mem_pool[pool_id] = NULL;
1305 }
1306 qdf_export_symbol(__qdf_mempool_destroy);
1307 
1308 /**
1309  * __qdf_mempool_alloc() - Allocate an element memory pool
1310  *
1311  * @osdev: platform device object
1312  * @Handle: to memory pool
1313  *
1314  * Return: Pointer to the allocated element or NULL if the pool is empty
1315  */
1316 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1317 {
1318 	void *buf = NULL;
1319 
1320 	if (!pool)
1321 		return NULL;
1322 
1323 	if (prealloc_disabled)
1324 		return  qdf_mem_malloc(pool->elem_size);
1325 
1326 	spin_lock_bh(&pool->lock);
1327 
1328 	buf = STAILQ_FIRST(&pool->free_list);
1329 	if (buf) {
1330 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1331 		pool->free_cnt--;
1332 	}
1333 
1334 	/* TBD: Update free count if debug is enabled */
1335 	spin_unlock_bh(&pool->lock);
1336 
1337 	return buf;
1338 }
1339 qdf_export_symbol(__qdf_mempool_alloc);
1340 
1341 /**
1342  * __qdf_mempool_free() - Free a memory pool element
1343  * @osdev: Platform device object
1344  * @pool: Handle to memory pool
1345  * @buf: Element to be freed
1346  *
1347  * Returns: none
1348  */
1349 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1350 {
1351 	if (!pool)
1352 		return;
1353 
1354 
1355 	if (prealloc_disabled)
1356 		return qdf_mem_free(buf);
1357 
1358 	spin_lock_bh(&pool->lock);
1359 	pool->free_cnt++;
1360 
1361 	STAILQ_INSERT_TAIL
1362 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1363 	spin_unlock_bh(&pool->lock);
1364 }
1365 qdf_export_symbol(__qdf_mempool_free);
1366 
1367 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
1368 static bool qdf_might_be_prealloc(void *ptr)
1369 {
1370 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1371 		return true;
1372 	else
1373 		return false;
1374 }
1375 
1376 /**
1377  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1378  * @size: the number of bytes to allocate
1379  *
1380  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1381  * a chunk of pre-allocated memory. If size if less than or equal to
1382  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1383  *
1384  * Return: NULL on failure, non-NULL on success
1385  */
1386 static void *qdf_mem_prealloc_get(size_t size)
1387 {
1388 	void *ptr;
1389 
1390 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1391 		return NULL;
1392 
1393 	ptr = wcnss_prealloc_get(size);
1394 	if (!ptr)
1395 		return NULL;
1396 
1397 	memset(ptr, 0, size);
1398 
1399 	return ptr;
1400 }
1401 
1402 static inline bool qdf_mem_prealloc_put(void *ptr)
1403 {
1404 	return wcnss_prealloc_put(ptr);
1405 }
1406 #else
1407 static bool qdf_might_be_prealloc(void *ptr)
1408 {
1409 	return false;
1410 }
1411 
1412 static inline void *qdf_mem_prealloc_get(size_t size)
1413 {
1414 	return NULL;
1415 }
1416 
1417 static inline bool qdf_mem_prealloc_put(void *ptr)
1418 {
1419 	return false;
1420 }
1421 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1422 
1423 static int qdf_mem_malloc_flags(void)
1424 {
1425 	if (in_interrupt() || irqs_disabled() || in_atomic())
1426 		return GFP_ATOMIC;
1427 
1428 	return GFP_KERNEL;
1429 }
1430 
1431 /* External Function implementation */
1432 #ifdef MEMORY_DEBUG
1433 /**
1434  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1435  *
1436  * Return: value of mem_debug_disabled qdf module argument
1437  */
1438 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1439 bool qdf_mem_debug_config_get(void)
1440 {
1441 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1442 	return false;
1443 }
1444 #else
1445 bool qdf_mem_debug_config_get(void)
1446 {
1447 	return mem_debug_disabled;
1448 }
1449 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1450 
1451 /**
1452  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
1453  * @str_value: value of the module param
1454  *
1455  * This function will se qdf module param mem_debug_disabled
1456  *
1457  * Return: QDF_STATUS_SUCCESS on Success
1458  */
1459 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1460 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1461 {
1462 	QDF_STATUS status;
1463 
1464 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1465 	return status;
1466 }
1467 #endif
1468 
1469 /**
1470  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1471  *
1472  * Return: none
1473  */
1474 static void qdf_mem_debug_init(void)
1475 {
1476 	int i;
1477 
1478 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1479 
1480 	if (is_initial_mem_debug_disabled)
1481 		return;
1482 
1483 	/* Initalizing the list with maximum size of 60000 */
1484 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1485 		qdf_list_create(&qdf_mem_domains[i], 60000);
1486 	qdf_spinlock_create(&qdf_mem_list_lock);
1487 
1488 	/* dma */
1489 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1490 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1491 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1492 }
1493 
1494 static uint32_t
1495 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1496 			       qdf_list_t *mem_list)
1497 {
1498 	if (is_initial_mem_debug_disabled)
1499 		return 0;
1500 
1501 	if (qdf_list_empty(mem_list))
1502 		return 0;
1503 
1504 	qdf_err("Memory leaks detected in %s domain!",
1505 		qdf_debug_domain_name(domain));
1506 	qdf_mem_domain_print(mem_list,
1507 			     qdf_err_printer,
1508 			     NULL,
1509 			     0,
1510 			     qdf_mem_meta_table_print);
1511 
1512 	return mem_list->count;
1513 }
1514 
1515 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1516 {
1517 	uint32_t leak_count = 0;
1518 	int i;
1519 
1520 	if (is_initial_mem_debug_disabled)
1521 		return;
1522 
1523 	/* detect and print leaks */
1524 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1525 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1526 
1527 	if (leak_count)
1528 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1529 				   leak_count);
1530 }
1531 
1532 /**
1533  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1534  *
1535  * Return: none
1536  */
1537 static void qdf_mem_debug_exit(void)
1538 {
1539 	int i;
1540 
1541 	if (is_initial_mem_debug_disabled)
1542 		return;
1543 
1544 	/* mem */
1545 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1546 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1547 		qdf_list_destroy(qdf_mem_list_get(i));
1548 
1549 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1550 
1551 	/* dma */
1552 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1553 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1554 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1555 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1556 }
1557 
1558 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1559 			   void *caller, uint32_t flag)
1560 {
1561 	QDF_STATUS status;
1562 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1563 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1564 	struct qdf_mem_header *header;
1565 	void *ptr;
1566 	unsigned long start, duration;
1567 
1568 	if (is_initial_mem_debug_disabled)
1569 		return __qdf_mem_malloc(size, func, line);
1570 
1571 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1572 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1573 		return NULL;
1574 	}
1575 
1576 	ptr = qdf_mem_prealloc_get(size);
1577 	if (ptr)
1578 		return ptr;
1579 
1580 	if (!flag)
1581 		flag = qdf_mem_malloc_flags();
1582 
1583 	start = qdf_mc_timer_get_system_time();
1584 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1585 	duration = qdf_mc_timer_get_system_time() - start;
1586 
1587 	if (duration > QDF_MEM_WARN_THRESHOLD)
1588 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1589 			 duration, size, func, line);
1590 
1591 	if (!header) {
1592 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1593 		return NULL;
1594 	}
1595 
1596 	qdf_mem_header_init(header, size, func, line, caller);
1597 	qdf_mem_trailer_init(header);
1598 	ptr = qdf_mem_get_ptr(header);
1599 
1600 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1601 	status = qdf_list_insert_front(mem_list, &header->node);
1602 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1603 	if (QDF_IS_STATUS_ERROR(status))
1604 		qdf_err("Failed to insert memory header; status %d", status);
1605 
1606 	qdf_mem_kmalloc_inc(ksize(header));
1607 
1608 	return ptr;
1609 }
1610 qdf_export_symbol(qdf_mem_malloc_debug);
1611 
1612 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1613 {
1614 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1615 	struct qdf_mem_header *header;
1616 	enum qdf_mem_validation_bitmap error_bitmap;
1617 
1618 	if (is_initial_mem_debug_disabled) {
1619 		__qdf_mem_free(ptr);
1620 		return;
1621 	}
1622 
1623 	/* freeing a null pointer is valid */
1624 	if (qdf_unlikely(!ptr))
1625 		return;
1626 
1627 	if (qdf_mem_prealloc_put(ptr))
1628 		return;
1629 
1630 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1631 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1632 				   ptr);
1633 
1634 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1635 
1636 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1637 	header = qdf_mem_get_header(ptr);
1638 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1639 	error_bitmap |= qdf_mem_trailer_validate(header);
1640 
1641 	if (!error_bitmap) {
1642 		header->freed = true;
1643 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1644 				     &header->node);
1645 	}
1646 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1647 
1648 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1649 				    func, line);
1650 
1651 	qdf_mem_kmalloc_dec(ksize(header));
1652 	kfree(header);
1653 }
1654 qdf_export_symbol(qdf_mem_free_debug);
1655 
1656 void qdf_mem_check_for_leaks(void)
1657 {
1658 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1659 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1660 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1661 	uint32_t leaks_count = 0;
1662 
1663 	if (is_initial_mem_debug_disabled)
1664 		return;
1665 
1666 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1667 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1668 
1669 	if (leaks_count)
1670 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1671 				   leaks_count);
1672 }
1673 
1674 /**
1675  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1676  * qdf_mem_multi_pages_alloc
1677  * @osdev: OS device handle pointer
1678  * @pages: Multi page information storage
1679  * @element_size: Each element size
1680  * @element_num: Total number of elements should be allocated
1681  * @memctxt: Memory context
1682  * @cacheable: Coherent memory or cacheable memory
1683  * @func: Caller of this allocator
1684  * @line: Line number of the caller
1685  * @caller: Return address of the caller
1686  *
1687  * This function will allocate large size of memory over multiple pages.
1688  * Large size of contiguous memory allocation will fail frequently, then
1689  * instead of allocate large memory by one shot, allocate through multiple, non
1690  * contiguous memory and combine pages when actual usage
1691  *
1692  * Return: None
1693  */
1694 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1695 				     struct qdf_mem_multi_page_t *pages,
1696 				     size_t element_size, uint16_t element_num,
1697 				     qdf_dma_context_t memctxt, bool cacheable,
1698 				     const char *func, uint32_t line,
1699 				     void *caller)
1700 {
1701 	uint16_t page_idx;
1702 	struct qdf_mem_dma_page_t *dma_pages;
1703 	void **cacheable_pages = NULL;
1704 	uint16_t i;
1705 
1706 	if (!pages->page_size)
1707 		pages->page_size = qdf_page_size;
1708 
1709 	pages->num_element_per_page = pages->page_size / element_size;
1710 	if (!pages->num_element_per_page) {
1711 		qdf_print("Invalid page %d or element size %d",
1712 			  (int)pages->page_size, (int)element_size);
1713 		goto out_fail;
1714 	}
1715 
1716 	pages->num_pages = element_num / pages->num_element_per_page;
1717 	if (element_num % pages->num_element_per_page)
1718 		pages->num_pages++;
1719 
1720 	if (cacheable) {
1721 		/* Pages information storage */
1722 		pages->cacheable_pages = qdf_mem_malloc_debug(
1723 			pages->num_pages * sizeof(pages->cacheable_pages),
1724 			func, line, caller, 0);
1725 		if (!pages->cacheable_pages)
1726 			goto out_fail;
1727 
1728 		cacheable_pages = pages->cacheable_pages;
1729 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1730 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1731 				pages->page_size, func, line, caller, 0);
1732 			if (!cacheable_pages[page_idx])
1733 				goto page_alloc_fail;
1734 		}
1735 		pages->dma_pages = NULL;
1736 	} else {
1737 		pages->dma_pages = qdf_mem_malloc_debug(
1738 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1739 			func, line, caller, 0);
1740 		if (!pages->dma_pages)
1741 			goto out_fail;
1742 
1743 		dma_pages = pages->dma_pages;
1744 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1745 			dma_pages->page_v_addr_start =
1746 				qdf_mem_alloc_consistent_debug(
1747 					osdev, osdev->dev, pages->page_size,
1748 					&dma_pages->page_p_addr,
1749 					func, line, caller);
1750 			if (!dma_pages->page_v_addr_start) {
1751 				qdf_print("dmaable page alloc fail pi %d",
1752 					  page_idx);
1753 				goto page_alloc_fail;
1754 			}
1755 			dma_pages->page_v_addr_end =
1756 				dma_pages->page_v_addr_start + pages->page_size;
1757 			dma_pages++;
1758 		}
1759 		pages->cacheable_pages = NULL;
1760 	}
1761 	return;
1762 
1763 page_alloc_fail:
1764 	if (cacheable) {
1765 		for (i = 0; i < page_idx; i++)
1766 			qdf_mem_free_debug(pages->cacheable_pages[i],
1767 					   func, line);
1768 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1769 	} else {
1770 		dma_pages = pages->dma_pages;
1771 		for (i = 0; i < page_idx; i++) {
1772 			qdf_mem_free_consistent_debug(
1773 				osdev, osdev->dev,
1774 				pages->page_size, dma_pages->page_v_addr_start,
1775 				dma_pages->page_p_addr, memctxt, func, line);
1776 			dma_pages++;
1777 		}
1778 		qdf_mem_free_debug(pages->dma_pages, func, line);
1779 	}
1780 
1781 out_fail:
1782 	pages->cacheable_pages = NULL;
1783 	pages->dma_pages = NULL;
1784 	pages->num_pages = 0;
1785 }
1786 
1787 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1788 
1789 /**
1790  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1791  * @osdev: OS device handle pointer
1792  * @pages: Multi page information storage
1793  * @memctxt: Memory context
1794  * @cacheable: Coherent memory or cacheable memory
1795  * @func: Caller of this allocator
1796  * @line: Line number of the caller
1797  *
1798  * This function will free large size of memory over multiple pages.
1799  *
1800  * Return: None
1801  */
1802 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1803 				    struct qdf_mem_multi_page_t *pages,
1804 				    qdf_dma_context_t memctxt, bool cacheable,
1805 				    const char *func, uint32_t line)
1806 {
1807 	unsigned int page_idx;
1808 	struct qdf_mem_dma_page_t *dma_pages;
1809 
1810 	if (!pages->page_size)
1811 		pages->page_size = qdf_page_size;
1812 
1813 	if (cacheable) {
1814 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1815 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1816 					   func, line);
1817 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1818 	} else {
1819 		dma_pages = pages->dma_pages;
1820 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1821 			qdf_mem_free_consistent_debug(
1822 				osdev, osdev->dev, pages->page_size,
1823 				dma_pages->page_v_addr_start,
1824 				dma_pages->page_p_addr, memctxt, func, line);
1825 			dma_pages++;
1826 		}
1827 		qdf_mem_free_debug(pages->dma_pages, func, line);
1828 	}
1829 
1830 	pages->cacheable_pages = NULL;
1831 	pages->dma_pages = NULL;
1832 	pages->num_pages = 0;
1833 }
1834 
1835 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1836 
1837 #else
1838 static void qdf_mem_debug_init(void) {}
1839 
1840 static void qdf_mem_debug_exit(void) {}
1841 
1842 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1843 {
1844 	void *ptr;
1845 
1846 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1847 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1848 			     line);
1849 		return NULL;
1850 	}
1851 
1852 	ptr = qdf_mem_prealloc_get(size);
1853 	if (ptr)
1854 		return ptr;
1855 
1856 	ptr = kzalloc(size, GFP_ATOMIC);
1857 	if (!ptr) {
1858 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1859 			      size, func, line);
1860 		return NULL;
1861 	}
1862 
1863 	qdf_mem_kmalloc_inc(ksize(ptr));
1864 
1865 	return ptr;
1866 }
1867 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1868 
1869 /**
1870  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1871  * @osdev: OS device handle pointer
1872  * @pages: Multi page information storage
1873  * @element_size: Each element size
1874  * @element_num: Total number of elements should be allocated
1875  * @memctxt: Memory context
1876  * @cacheable: Coherent memory or cacheable memory
1877  *
1878  * This function will allocate large size of memory over multiple pages.
1879  * Large size of contiguous memory allocation will fail frequently, then
1880  * instead of allocate large memory by one shot, allocate through multiple, non
1881  * contiguous memory and combine pages when actual usage
1882  *
1883  * Return: None
1884  */
1885 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1886 			       struct qdf_mem_multi_page_t *pages,
1887 			       size_t element_size, uint16_t element_num,
1888 			       qdf_dma_context_t memctxt, bool cacheable)
1889 {
1890 	uint16_t page_idx;
1891 	struct qdf_mem_dma_page_t *dma_pages;
1892 	void **cacheable_pages = NULL;
1893 	uint16_t i;
1894 
1895 	if (!pages->page_size)
1896 		pages->page_size = qdf_page_size;
1897 
1898 	pages->num_element_per_page = pages->page_size / element_size;
1899 	if (!pages->num_element_per_page) {
1900 		qdf_print("Invalid page %d or element size %d",
1901 			  (int)pages->page_size, (int)element_size);
1902 		goto out_fail;
1903 	}
1904 
1905 	pages->num_pages = element_num / pages->num_element_per_page;
1906 	if (element_num % pages->num_element_per_page)
1907 		pages->num_pages++;
1908 
1909 	if (cacheable) {
1910 		/* Pages information storage */
1911 		pages->cacheable_pages = qdf_mem_malloc(
1912 			pages->num_pages * sizeof(pages->cacheable_pages));
1913 		if (!pages->cacheable_pages)
1914 			goto out_fail;
1915 
1916 		cacheable_pages = pages->cacheable_pages;
1917 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1918 			cacheable_pages[page_idx] =
1919 				qdf_mem_malloc(pages->page_size);
1920 			if (!cacheable_pages[page_idx])
1921 				goto page_alloc_fail;
1922 		}
1923 		pages->dma_pages = NULL;
1924 	} else {
1925 		pages->dma_pages = qdf_mem_malloc(
1926 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1927 		if (!pages->dma_pages)
1928 			goto out_fail;
1929 
1930 		dma_pages = pages->dma_pages;
1931 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1932 			dma_pages->page_v_addr_start =
1933 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1934 					 pages->page_size,
1935 					&dma_pages->page_p_addr);
1936 			if (!dma_pages->page_v_addr_start) {
1937 				qdf_print("dmaable page alloc fail pi %d",
1938 					page_idx);
1939 				goto page_alloc_fail;
1940 			}
1941 			dma_pages->page_v_addr_end =
1942 				dma_pages->page_v_addr_start + pages->page_size;
1943 			dma_pages++;
1944 		}
1945 		pages->cacheable_pages = NULL;
1946 	}
1947 	return;
1948 
1949 page_alloc_fail:
1950 	if (cacheable) {
1951 		for (i = 0; i < page_idx; i++)
1952 			qdf_mem_free(pages->cacheable_pages[i]);
1953 		qdf_mem_free(pages->cacheable_pages);
1954 	} else {
1955 		dma_pages = pages->dma_pages;
1956 		for (i = 0; i < page_idx; i++) {
1957 			qdf_mem_free_consistent(
1958 				osdev, osdev->dev, pages->page_size,
1959 				dma_pages->page_v_addr_start,
1960 				dma_pages->page_p_addr, memctxt);
1961 			dma_pages++;
1962 		}
1963 		qdf_mem_free(pages->dma_pages);
1964 	}
1965 
1966 out_fail:
1967 	pages->cacheable_pages = NULL;
1968 	pages->dma_pages = NULL;
1969 	pages->num_pages = 0;
1970 	return;
1971 }
1972 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1973 
1974 /**
1975  * qdf_mem_multi_pages_free() - free large size of kernel memory
1976  * @osdev: OS device handle pointer
1977  * @pages: Multi page information storage
1978  * @memctxt: Memory context
1979  * @cacheable: Coherent memory or cacheable memory
1980  *
1981  * This function will free large size of memory over multiple pages.
1982  *
1983  * Return: None
1984  */
1985 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1986 			      struct qdf_mem_multi_page_t *pages,
1987 			      qdf_dma_context_t memctxt, bool cacheable)
1988 {
1989 	unsigned int page_idx;
1990 	struct qdf_mem_dma_page_t *dma_pages;
1991 
1992 	if (!pages->page_size)
1993 		pages->page_size = qdf_page_size;
1994 
1995 	if (cacheable) {
1996 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1997 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1998 		qdf_mem_free(pages->cacheable_pages);
1999 	} else {
2000 		dma_pages = pages->dma_pages;
2001 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2002 			qdf_mem_free_consistent(
2003 				osdev, osdev->dev, pages->page_size,
2004 				dma_pages->page_v_addr_start,
2005 				dma_pages->page_p_addr, memctxt);
2006 			dma_pages++;
2007 		}
2008 		qdf_mem_free(pages->dma_pages);
2009 	}
2010 
2011 	pages->cacheable_pages = NULL;
2012 	pages->dma_pages = NULL;
2013 	pages->num_pages = 0;
2014 	return;
2015 }
2016 qdf_export_symbol(qdf_mem_multi_pages_free);
2017 #endif
2018 
2019 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2020 			      bool cacheable)
2021 {
2022 	unsigned int page_idx;
2023 	struct qdf_mem_dma_page_t *dma_pages;
2024 
2025 	if (!pages->page_size)
2026 		pages->page_size = qdf_page_size;
2027 
2028 	if (cacheable) {
2029 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2030 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2031 				     pages->page_size);
2032 	} else {
2033 		dma_pages = pages->dma_pages;
2034 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2035 			qdf_mem_zero(dma_pages->page_v_addr_start,
2036 				     pages->page_size);
2037 			dma_pages++;
2038 		}
2039 	}
2040 }
2041 
2042 qdf_export_symbol(qdf_mem_multi_pages_zero);
2043 
2044 void __qdf_mem_free(void *ptr)
2045 {
2046 	if (!ptr)
2047 		return;
2048 
2049 	if (qdf_might_be_prealloc(ptr)) {
2050 		if (qdf_mem_prealloc_put(ptr))
2051 			return;
2052 	}
2053 
2054 	qdf_mem_kmalloc_dec(ksize(ptr));
2055 
2056 	kfree(ptr);
2057 }
2058 
2059 qdf_export_symbol(__qdf_mem_free);
2060 
2061 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2062 {
2063 	void *ptr;
2064 
2065 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2066 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2067 			     line);
2068 		return NULL;
2069 	}
2070 
2071 	ptr = qdf_mem_prealloc_get(size);
2072 	if (ptr)
2073 		return ptr;
2074 
2075 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2076 	if (!ptr)
2077 		return NULL;
2078 
2079 	qdf_mem_kmalloc_inc(ksize(ptr));
2080 
2081 	return ptr;
2082 }
2083 
2084 qdf_export_symbol(__qdf_mem_malloc);
2085 
2086 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2087 void __qdf_untracked_mem_free(void *ptr)
2088 {
2089 	if (!ptr)
2090 		return;
2091 
2092 	kfree(ptr);
2093 }
2094 
2095 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2096 {
2097 	void *ptr;
2098 
2099 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2100 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2101 			     line);
2102 		return NULL;
2103 	}
2104 
2105 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2106 	if (!ptr)
2107 		return NULL;
2108 
2109 	return ptr;
2110 }
2111 #endif
2112 
2113 void *qdf_aligned_malloc_fl(uint32_t *size,
2114 			    void **vaddr_unaligned,
2115 				qdf_dma_addr_t *paddr_unaligned,
2116 				qdf_dma_addr_t *paddr_aligned,
2117 				uint32_t align,
2118 			    const char *func, uint32_t line)
2119 {
2120 	void *vaddr_aligned;
2121 	uint32_t align_alloc_size;
2122 
2123 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2124 			line);
2125 	if (!*vaddr_unaligned) {
2126 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2127 		return NULL;
2128 	}
2129 
2130 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2131 
2132 	/* Re-allocate additional bytes to align base address only if
2133 	 * above allocation returns unaligned address. Reason for
2134 	 * trying exact size allocation above is, OS tries to allocate
2135 	 * blocks of size power-of-2 pages and then free extra pages.
2136 	 * e.g., of a ring size of 1MB, the allocation below will
2137 	 * request 1MB plus 7 bytes for alignment, which will cause a
2138 	 * 2MB block allocation,and that is failing sometimes due to
2139 	 * memory fragmentation.
2140 	 */
2141 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2142 		align_alloc_size = *size + align - 1;
2143 
2144 		qdf_mem_free(*vaddr_unaligned);
2145 		*vaddr_unaligned = qdf_mem_malloc_fl(
2146 				(qdf_size_t)align_alloc_size, func, line);
2147 		if (!*vaddr_unaligned) {
2148 			qdf_warn("Failed to alloc %uB @ %s:%d",
2149 				 align_alloc_size, func, line);
2150 			return NULL;
2151 		}
2152 
2153 		*paddr_unaligned = qdf_mem_virt_to_phys(
2154 				*vaddr_unaligned);
2155 		*size = align_alloc_size;
2156 	}
2157 
2158 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2159 		((unsigned long)(*paddr_unaligned), align);
2160 
2161 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2162 			((unsigned long)(*paddr_aligned) -
2163 			 (unsigned long)(*paddr_unaligned)));
2164 
2165 	return vaddr_aligned;
2166 }
2167 
2168 qdf_export_symbol(qdf_aligned_malloc_fl);
2169 
2170 /**
2171  * qdf_mem_multi_page_link() - Make links for multi page elements
2172  * @osdev: OS device handle pointer
2173  * @pages: Multi page information storage
2174  * @elem_size: Single element size
2175  * @elem_count: elements count should be linked
2176  * @cacheable: Coherent memory or cacheable memory
2177  *
2178  * This function will make links for multi page allocated structure
2179  *
2180  * Return: 0 success
2181  */
2182 int qdf_mem_multi_page_link(qdf_device_t osdev,
2183 		struct qdf_mem_multi_page_t *pages,
2184 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2185 {
2186 	uint16_t i, i_int;
2187 	void *page_info;
2188 	void **c_elem = NULL;
2189 	uint32_t num_link = 0;
2190 
2191 	for (i = 0; i < pages->num_pages; i++) {
2192 		if (cacheable)
2193 			page_info = pages->cacheable_pages[i];
2194 		else
2195 			page_info = pages->dma_pages[i].page_v_addr_start;
2196 
2197 		if (!page_info)
2198 			return -ENOMEM;
2199 
2200 		c_elem = (void **)page_info;
2201 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2202 			if (i_int == (pages->num_element_per_page - 1)) {
2203 				if (cacheable)
2204 					*c_elem = pages->
2205 						cacheable_pages[i + 1];
2206 				else
2207 					*c_elem = pages->
2208 						dma_pages[i + 1].
2209 							page_v_addr_start;
2210 				num_link++;
2211 				break;
2212 			} else {
2213 				*c_elem =
2214 					(void *)(((char *)c_elem) + elem_size);
2215 			}
2216 			num_link++;
2217 			c_elem = (void **)*c_elem;
2218 
2219 			/* Last link established exit */
2220 			if (num_link == (elem_count - 1))
2221 				break;
2222 		}
2223 	}
2224 
2225 	if (c_elem)
2226 		*c_elem = NULL;
2227 
2228 	return 0;
2229 }
2230 qdf_export_symbol(qdf_mem_multi_page_link);
2231 
2232 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2233 {
2234 	/* special case where dst_addr or src_addr can be NULL */
2235 	if (!num_bytes)
2236 		return;
2237 
2238 	QDF_BUG(dst_addr);
2239 	QDF_BUG(src_addr);
2240 	if (!dst_addr || !src_addr)
2241 		return;
2242 
2243 	memcpy(dst_addr, src_addr, num_bytes);
2244 }
2245 qdf_export_symbol(qdf_mem_copy);
2246 
2247 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2248 {
2249 	qdf_shared_mem_t *shared_mem;
2250 	qdf_dma_addr_t dma_addr, paddr;
2251 	int ret;
2252 
2253 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2254 	if (!shared_mem)
2255 		return NULL;
2256 
2257 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2258 				size, qdf_mem_get_dma_addr_ptr(osdev,
2259 						&shared_mem->mem_info));
2260 	if (!shared_mem->vaddr) {
2261 		qdf_err("Unable to allocate DMA memory for shared resource");
2262 		qdf_mem_free(shared_mem);
2263 		return NULL;
2264 	}
2265 
2266 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2267 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2268 
2269 	qdf_mem_zero(shared_mem->vaddr, size);
2270 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2271 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2272 
2273 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2274 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2275 				      shared_mem->vaddr, dma_addr, size);
2276 	if (ret) {
2277 		qdf_err("Unable to get DMA sgtable");
2278 		qdf_mem_free_consistent(osdev, osdev->dev,
2279 					shared_mem->mem_info.size,
2280 					shared_mem->vaddr,
2281 					dma_addr,
2282 					qdf_get_dma_mem_context(shared_mem,
2283 								memctx));
2284 		qdf_mem_free(shared_mem);
2285 		return NULL;
2286 	}
2287 
2288 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2289 
2290 	return shared_mem;
2291 }
2292 
2293 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2294 
2295 /**
2296  * qdf_mem_copy_toio() - copy memory
2297  * @dst_addr: Pointer to destination memory location (to copy to)
2298  * @src_addr: Pointer to source memory location (to copy from)
2299  * @num_bytes: Number of bytes to copy.
2300  *
2301  * Return: none
2302  */
2303 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2304 {
2305 	if (0 == num_bytes) {
2306 		/* special case where dst_addr or src_addr can be NULL */
2307 		return;
2308 	}
2309 
2310 	if ((!dst_addr) || (!src_addr)) {
2311 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2312 			  "%s called with NULL parameter, source:%pK destination:%pK",
2313 			  __func__, src_addr, dst_addr);
2314 		QDF_ASSERT(0);
2315 		return;
2316 	}
2317 	memcpy_toio(dst_addr, src_addr, num_bytes);
2318 }
2319 
2320 qdf_export_symbol(qdf_mem_copy_toio);
2321 
2322 /**
2323  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2324  * @ptr: Pointer to memory that will be set
2325  * @value: Byte set in memory
2326  * @num_bytes: Number of bytes to be set
2327  *
2328  * Return: None
2329  */
2330 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2331 {
2332 	if (!ptr) {
2333 		qdf_print("%s called with NULL parameter ptr", __func__);
2334 		return;
2335 	}
2336 	memset_io(ptr, value, num_bytes);
2337 }
2338 
2339 qdf_export_symbol(qdf_mem_set_io);
2340 
2341 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2342 {
2343 	QDF_BUG(ptr);
2344 	if (!ptr)
2345 		return;
2346 
2347 	memset(ptr, value, num_bytes);
2348 }
2349 qdf_export_symbol(qdf_mem_set);
2350 
2351 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2352 {
2353 	/* special case where dst_addr or src_addr can be NULL */
2354 	if (!num_bytes)
2355 		return;
2356 
2357 	QDF_BUG(dst_addr);
2358 	QDF_BUG(src_addr);
2359 	if (!dst_addr || !src_addr)
2360 		return;
2361 
2362 	memmove(dst_addr, src_addr, num_bytes);
2363 }
2364 qdf_export_symbol(qdf_mem_move);
2365 
2366 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2367 {
2368 	QDF_BUG(left);
2369 	QDF_BUG(right);
2370 
2371 	return memcmp(left, right, size);
2372 }
2373 qdf_export_symbol(qdf_mem_cmp);
2374 
2375 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2376 /**
2377  * qdf_mem_dma_alloc() - allocates memory for dma
2378  * @osdev: OS device handle
2379  * @dev: Pointer to device handle
2380  * @size: Size to be allocated
2381  * @phy_addr: Physical address
2382  *
2383  * Return: pointer of allocated memory or null if memory alloc fails
2384  */
2385 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2386 				      qdf_size_t size,
2387 				      qdf_dma_addr_t *phy_addr)
2388 {
2389 	void *vaddr;
2390 
2391 	vaddr = qdf_mem_malloc(size);
2392 	*phy_addr = ((uintptr_t) vaddr);
2393 	/* using this type conversion to suppress "cast from pointer to integer
2394 	 * of different size" warning on some platforms
2395 	 */
2396 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2397 	return vaddr;
2398 }
2399 
2400 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2401 	!defined(QCA_WIFI_QCN9000)
2402 
2403 #define QCA8074_RAM_BASE 0x50000000
2404 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2405 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2406 			qdf_dma_addr_t *phy_addr)
2407 {
2408 	void *vaddr = NULL;
2409 	int i;
2410 
2411 	*phy_addr = 0;
2412 
2413 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2414 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2415 					   qdf_mem_malloc_flags());
2416 
2417 		if (!vaddr) {
2418 			qdf_err("%s failed , size: %zu!", __func__, size);
2419 			return NULL;
2420 		}
2421 
2422 		if (*phy_addr >= QCA8074_RAM_BASE)
2423 			return vaddr;
2424 
2425 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2426 	}
2427 
2428 	return NULL;
2429 }
2430 
2431 #else
2432 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2433 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2434 {
2435 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2436 }
2437 #endif
2438 
2439 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2440 static inline void
2441 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2442 {
2443 	qdf_mem_free(vaddr);
2444 }
2445 #else
2446 
2447 static inline void
2448 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2449 {
2450 	dma_free_coherent(dev, size, vaddr, paddr);
2451 }
2452 #endif
2453 
2454 #ifdef MEMORY_DEBUG
2455 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2456 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2457 				     const char *func, uint32_t line,
2458 				     void *caller)
2459 {
2460 	QDF_STATUS status;
2461 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2462 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2463 	struct qdf_mem_header *header;
2464 	void *vaddr;
2465 
2466 	if (is_initial_mem_debug_disabled)
2467 		return __qdf_mem_alloc_consistent(osdev, dev,
2468 						  size, paddr,
2469 						  func, line);
2470 
2471 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2472 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2473 		return NULL;
2474 	}
2475 
2476 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2477 				   paddr);
2478 
2479 	if (!vaddr) {
2480 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2481 		return NULL;
2482 	}
2483 
2484 	header = qdf_mem_dma_get_header(vaddr, size);
2485 	/* For DMA buffers we only add trailers, this function will init
2486 	 * the header structure at the tail
2487 	 * Prefix the header into DMA buffer causes SMMU faults, so
2488 	 * do not prefix header into the DMA buffers
2489 	 */
2490 	qdf_mem_header_init(header, size, func, line, caller);
2491 
2492 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2493 	status = qdf_list_insert_front(mem_list, &header->node);
2494 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2495 	if (QDF_IS_STATUS_ERROR(status))
2496 		qdf_err("Failed to insert memory header; status %d", status);
2497 
2498 	qdf_mem_dma_inc(size);
2499 
2500 	return vaddr;
2501 }
2502 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2503 
2504 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2505 				   qdf_size_t size, void *vaddr,
2506 				   qdf_dma_addr_t paddr,
2507 				   qdf_dma_context_t memctx,
2508 				   const char *func, uint32_t line)
2509 {
2510 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2511 	struct qdf_mem_header *header;
2512 	enum qdf_mem_validation_bitmap error_bitmap;
2513 
2514 	if (is_initial_mem_debug_disabled) {
2515 		__qdf_mem_free_consistent(
2516 					  osdev, dev,
2517 					  size, vaddr,
2518 					  paddr, memctx);
2519 		return;
2520 	}
2521 
2522 	/* freeing a null pointer is valid */
2523 	if (qdf_unlikely(!vaddr))
2524 		return;
2525 
2526 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2527 
2528 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2529 	/* For DMA buffers we only add trailers, this function will retrieve
2530 	 * the header structure at the tail
2531 	 * Prefix the header into DMA buffer causes SMMU faults, so
2532 	 * do not prefix header into the DMA buffers
2533 	 */
2534 	header = qdf_mem_dma_get_header(vaddr, size);
2535 	error_bitmap = qdf_mem_header_validate(header, domain);
2536 	if (!error_bitmap) {
2537 		header->freed = true;
2538 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2539 				     &header->node);
2540 	}
2541 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2542 
2543 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2544 
2545 	qdf_mem_dma_dec(header->size);
2546 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2547 }
2548 qdf_export_symbol(qdf_mem_free_consistent_debug);
2549 #endif /* MEMORY_DEBUG */
2550 
2551 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2552 			       qdf_size_t size, void *vaddr,
2553 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2554 {
2555 	qdf_mem_dma_dec(size);
2556 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2557 }
2558 
2559 qdf_export_symbol(__qdf_mem_free_consistent);
2560 
2561 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2562 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2563 				 const char *func, uint32_t line)
2564 {
2565 	void *vaddr;
2566 
2567 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2568 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2569 			     size, func, line);
2570 		return NULL;
2571 	}
2572 
2573 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2574 
2575 	if (vaddr)
2576 		qdf_mem_dma_inc(size);
2577 
2578 	return vaddr;
2579 }
2580 
2581 qdf_export_symbol(__qdf_mem_alloc_consistent);
2582 
2583 void *qdf_aligned_mem_alloc_consistent_fl(
2584 	qdf_device_t osdev, uint32_t *size,
2585 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2586 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2587 	const char *func, uint32_t line)
2588 {
2589 	void *vaddr_aligned;
2590 	uint32_t align_alloc_size;
2591 
2592 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2593 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2594 	if (!*vaddr_unaligned) {
2595 		qdf_warn("Failed to alloc %uB @ %s:%d",
2596 			 *size, func, line);
2597 		return NULL;
2598 	}
2599 
2600 	/* Re-allocate additional bytes to align base address only if
2601 	 * above allocation returns unaligned address. Reason for
2602 	 * trying exact size allocation above is, OS tries to allocate
2603 	 * blocks of size power-of-2 pages and then free extra pages.
2604 	 * e.g., of a ring size of 1MB, the allocation below will
2605 	 * request 1MB plus 7 bytes for alignment, which will cause a
2606 	 * 2MB block allocation,and that is failing sometimes due to
2607 	 * memory fragmentation.
2608 	 */
2609 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2610 		align_alloc_size = *size + align - 1;
2611 
2612 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2613 					*vaddr_unaligned,
2614 					*paddr_unaligned, 0);
2615 
2616 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2617 				osdev, osdev->dev, align_alloc_size,
2618 				paddr_unaligned);
2619 		if (!*vaddr_unaligned) {
2620 			qdf_warn("Failed to alloc %uB @ %s:%d",
2621 				 align_alloc_size, func, line);
2622 			return NULL;
2623 		}
2624 
2625 		*size = align_alloc_size;
2626 	}
2627 
2628 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2629 			(unsigned long)(*paddr_unaligned), align);
2630 
2631 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2632 				 ((unsigned long)(*paddr_aligned) -
2633 				  (unsigned long)(*paddr_unaligned)));
2634 
2635 	return vaddr_aligned;
2636 }
2637 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2638 
2639 /**
2640  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2641  * @osdev: OS device handle
2642  * @bus_addr: dma address to give to the device
2643  * @size: Size of the memory block
2644  * @direction: direction data will be DMAed
2645  *
2646  * Assign memory to the remote device.
2647  * The cache lines are flushed to ram or invalidated as needed.
2648  *
2649  * Return: none
2650  */
2651 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2652 					qdf_dma_addr_t bus_addr,
2653 					qdf_size_t size,
2654 					enum dma_data_direction direction)
2655 {
2656 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2657 }
2658 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2659 
2660 /**
2661  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2662  * @osdev: OS device handle
2663  * @bus_addr: dma address to give to the cpu
2664  * @size: Size of the memory block
2665  * @direction: direction data will be DMAed
2666  *
2667  * Assign memory to the CPU.
2668  *
2669  * Return: none
2670  */
2671 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2672 				     qdf_dma_addr_t bus_addr,
2673 				     qdf_size_t size,
2674 				     enum dma_data_direction direction)
2675 {
2676 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2677 }
2678 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2679 
2680 void qdf_mem_init(void)
2681 {
2682 	qdf_mem_debug_init();
2683 	qdf_net_buf_debug_init();
2684 	qdf_frag_debug_init();
2685 	qdf_mem_debugfs_init();
2686 	qdf_mem_debug_debugfs_init();
2687 }
2688 qdf_export_symbol(qdf_mem_init);
2689 
2690 void qdf_mem_exit(void)
2691 {
2692 	qdf_mem_debug_debugfs_exit();
2693 	qdf_mem_debugfs_exit();
2694 	qdf_frag_debug_exit();
2695 	qdf_net_buf_debug_exit();
2696 	qdf_mem_debug_exit();
2697 }
2698 qdf_export_symbol(qdf_mem_exit);
2699 
2700 /**
2701  * qdf_ether_addr_copy() - copy an Ethernet address
2702  *
2703  * @dst_addr: A six-byte array Ethernet address destination
2704  * @src_addr: A six-byte array Ethernet address source
2705  *
2706  * Please note: dst & src must both be aligned to u16.
2707  *
2708  * Return: none
2709  */
2710 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2711 {
2712 	if ((!dst_addr) || (!src_addr)) {
2713 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2714 			  "%s called with NULL parameter, source:%pK destination:%pK",
2715 			  __func__, src_addr, dst_addr);
2716 		QDF_ASSERT(0);
2717 		return;
2718 	}
2719 	ether_addr_copy(dst_addr, src_addr);
2720 }
2721 qdf_export_symbol(qdf_ether_addr_copy);
2722 
2723 int32_t qdf_dma_mem_stats_read(void)
2724 {
2725 	return qdf_atomic_read(&qdf_mem_stat.dma);
2726 }
2727 
2728 qdf_export_symbol(qdf_dma_mem_stats_read);
2729 
2730 int32_t qdf_heap_mem_stats_read(void)
2731 {
2732 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2733 }
2734 
2735 qdf_export_symbol(qdf_heap_mem_stats_read);
2736 
2737 int32_t qdf_skb_mem_stats_read(void)
2738 {
2739 	return qdf_atomic_read(&qdf_mem_stat.skb);
2740 }
2741 
2742 qdf_export_symbol(qdf_skb_mem_stats_read);
2743 
2744 int32_t qdf_skb_total_mem_stats_read(void)
2745 {
2746 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2747 }
2748 
2749 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2750 
2751 int32_t qdf_skb_max_mem_stats_read(void)
2752 {
2753 	return qdf_mem_stat.skb_mem_max;
2754 }
2755 
2756 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2757 
2758 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2759 {
2760 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2761 }
2762 
2763 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2764 
2765 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2766 {
2767 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2768 }
2769 
2770 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2771 
2772 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2773 {
2774 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2775 }
2776 
2777 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2778 
2779 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2780 {
2781 	return qdf_mem_stat.dp_tx_skb_count_max;
2782 }
2783 
2784 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2785 
2786 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2787 {
2788 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2789 }
2790 
2791 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2792 
2793 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2794 {
2795 	return qdf_mem_stat.dp_rx_skb_count_max;
2796 }
2797 
2798 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2799 
2800 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2801 {
2802 	return qdf_mem_stat.dp_tx_skb_mem_max;
2803 }
2804 
2805 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2806 
2807 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2808 {
2809 	return qdf_mem_stat.dp_rx_skb_mem_max;
2810 }
2811 
2812 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2813 
2814 int32_t qdf_mem_tx_desc_cnt_read(void)
2815 {
2816 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2817 }
2818 
2819 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2820 
2821 int32_t qdf_mem_tx_desc_max_read(void)
2822 {
2823 	return qdf_mem_stat.tx_descs_max;
2824 }
2825 
2826 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2827 
2828 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2829 				int32_t tx_descs_max)
2830 {
2831 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2832 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2833 }
2834 
2835 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2836 
2837 void qdf_mem_stats_init(void)
2838 {
2839 	qdf_mem_stat.skb_mem_max = 0;
2840 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
2841 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
2842 	qdf_mem_stat.dp_tx_skb_count_max = 0;
2843 	qdf_mem_stat.dp_rx_skb_count_max = 0;
2844 	qdf_mem_stat.tx_descs_max = 0;
2845 }
2846 
2847 qdf_export_symbol(qdf_mem_stats_init);
2848 
2849