xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision dd5f5c1afa4ab969b68717be955752f19527fb17)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * This file provides OS dependent memory management APIs
23  */
24 
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46 
47 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
48 static bool mem_debug_disabled;
49 qdf_declare_param(mem_debug_disabled, bool);
50 #endif
51 
52 #ifdef MEMORY_DEBUG
53 static bool is_initial_mem_debug_disabled;
54 #endif
55 
56 /* Preprocessor Definitions and Constants */
57 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
58 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
59 #define QDF_DEBUG_STRING_SIZE 512
60 
61 /**
62  * struct __qdf_mem_stat - qdf memory statistics
63  * @kmalloc: total kmalloc allocations
64  * @dma: total dma allocations
65  * @skb: total skb allocations
66  * @skb_total: total skb allocations in host driver
67  * @dp_tx_skb: total Tx skb allocations in datapath
68  * @dp_rx_skb: total Rx skb allocations in datapath
69  * @skb_mem_max: high watermark for skb allocations
70  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
71  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
72  * @dp_tx_skb_count: DP Tx buffer count
73  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
74  * @dp_rx_skb_count: DP Rx buffer count
75  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
76  * @tx_descs_outstanding: Current pending Tx descs count
77  * @tx_descs_max: High watermark for pending Tx descs count
78  */
79 static struct __qdf_mem_stat {
80 	qdf_atomic_t kmalloc;
81 	qdf_atomic_t dma;
82 	qdf_atomic_t skb;
83 	qdf_atomic_t skb_total;
84 	qdf_atomic_t dp_tx_skb;
85 	qdf_atomic_t dp_rx_skb;
86 	int32_t skb_mem_max;
87 	int32_t dp_tx_skb_mem_max;
88 	int32_t dp_rx_skb_mem_max;
89 	qdf_atomic_t dp_tx_skb_count;
90 	int32_t dp_tx_skb_count_max;
91 	qdf_atomic_t dp_rx_skb_count;
92 	int32_t dp_rx_skb_count_max;
93 	qdf_atomic_t tx_descs_outstanding;
94 	int32_t tx_descs_max;
95 } qdf_mem_stat;
96 
97 #ifdef MEMORY_DEBUG
98 #include "qdf_debug_domain.h"
99 
100 enum list_type {
101 	LIST_TYPE_MEM = 0,
102 	LIST_TYPE_DMA = 1,
103 	LIST_TYPE_NBUF = 2,
104 	LIST_TYPE_MAX,
105 };
106 
107 /**
108  * struct major_alloc_priv - private data registered to debugfs entry
109  *                           created to list the list major allocations
110  * @type:            type of the list to be parsed
111  * @threshold:       configured by user by overwriting the respective debugfs
112  *                   sys entry. This is to list the functions which requested
113  *                   memory/dma allocations more than threshold number of times.
114  */
115 struct major_alloc_priv {
116 	enum list_type type;
117 	uint32_t threshold;
118 };
119 
120 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
121 static qdf_spinlock_t qdf_mem_list_lock;
122 
123 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
124 static qdf_spinlock_t qdf_mem_dma_list_lock;
125 
126 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
127 {
128 	return &qdf_mem_domains[domain];
129 }
130 
131 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
132 {
133 	return &qdf_mem_dma_domains[domain];
134 }
135 
136 /**
137  * struct qdf_mem_header - memory object to dubug
138  * @node: node to the list
139  * @domain: the active memory domain at time of allocation
140  * @freed: flag set during free, used to detect double frees
141  *	Use uint8_t so we can detect corruption
142  * @func: name of the function the allocation was made from
143  * @line: line number of the file the allocation was made from
144  * @size: size of the allocation in bytes
145  * @caller: Caller of the function for which memory is allocated
146  * @header: a known value, used to detect out-of-bounds access
147  * @time: timestamp at which allocation was made
148  */
149 struct qdf_mem_header {
150 	qdf_list_node_t node;
151 	enum qdf_debug_domain domain;
152 	uint8_t freed;
153 	char func[QDF_MEM_FUNC_NAME_SIZE];
154 	uint32_t line;
155 	uint32_t size;
156 	void *caller;
157 	uint64_t header;
158 	uint64_t time;
159 };
160 
161 /* align the qdf_mem_header to 8 bytes */
162 #define QDF_DMA_MEM_HEADER_ALIGN 8
163 
164 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
165 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
166 
167 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
168 {
169 	return (struct qdf_mem_header *)ptr - 1;
170 }
171 
172 /* make sure the header pointer is 8bytes aligned */
173 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
174 							    qdf_size_t size)
175 {
176 	return (struct qdf_mem_header *)
177 				qdf_roundup((size_t)((uint8_t *)ptr + size),
178 					    QDF_DMA_MEM_HEADER_ALIGN);
179 }
180 
181 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
182 {
183 	return (uint64_t *)((void *)(header + 1) + header->size);
184 }
185 
186 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
187 {
188 	return (void *)(header + 1);
189 }
190 
191 /* number of bytes needed for the qdf memory debug information */
192 #define QDF_MEM_DEBUG_SIZE \
193 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
194 
195 /* number of bytes needed for the qdf dma memory debug information */
196 #define QDF_DMA_MEM_DEBUG_SIZE \
197 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
198 
199 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
200 {
201 	QDF_BUG(header);
202 	if (!header)
203 		return;
204 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
205 }
206 
207 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
208 				const char *func, uint32_t line, void *caller)
209 {
210 	QDF_BUG(header);
211 	if (!header)
212 		return;
213 
214 	header->domain = qdf_debug_domain_get();
215 	header->freed = false;
216 
217 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
218 
219 	header->line = line;
220 	header->size = size;
221 	header->caller = caller;
222 	header->header = WLAN_MEM_HEADER;
223 	header->time = qdf_get_log_timestamp();
224 }
225 
226 enum qdf_mem_validation_bitmap {
227 	QDF_MEM_BAD_HEADER = 1 << 0,
228 	QDF_MEM_BAD_TRAILER = 1 << 1,
229 	QDF_MEM_BAD_SIZE = 1 << 2,
230 	QDF_MEM_DOUBLE_FREE = 1 << 3,
231 	QDF_MEM_BAD_FREED = 1 << 4,
232 	QDF_MEM_BAD_NODE = 1 << 5,
233 	QDF_MEM_BAD_DOMAIN = 1 << 6,
234 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
235 };
236 
237 static enum qdf_mem_validation_bitmap
238 qdf_mem_trailer_validate(struct qdf_mem_header *header)
239 {
240 	enum qdf_mem_validation_bitmap error_bitmap = 0;
241 
242 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
243 		error_bitmap |= QDF_MEM_BAD_TRAILER;
244 	return error_bitmap;
245 }
246 
247 static enum qdf_mem_validation_bitmap
248 qdf_mem_header_validate(struct qdf_mem_header *header,
249 			enum qdf_debug_domain domain)
250 {
251 	enum qdf_mem_validation_bitmap error_bitmap = 0;
252 
253 	if (header->header != WLAN_MEM_HEADER)
254 		error_bitmap |= QDF_MEM_BAD_HEADER;
255 
256 	if (header->size > QDF_MEM_MAX_MALLOC)
257 		error_bitmap |= QDF_MEM_BAD_SIZE;
258 
259 	if (header->freed == true)
260 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
261 	else if (header->freed)
262 		error_bitmap |= QDF_MEM_BAD_FREED;
263 
264 	if (!qdf_list_node_in_any_list(&header->node))
265 		error_bitmap |= QDF_MEM_BAD_NODE;
266 
267 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
268 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
269 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
270 	else if (header->domain != domain)
271 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
272 
273 	return error_bitmap;
274 }
275 
276 static void
277 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
278 			    enum qdf_debug_domain current_domain,
279 			    enum qdf_mem_validation_bitmap error_bitmap,
280 			    const char *func,
281 			    uint32_t line)
282 {
283 	if (!error_bitmap)
284 		return;
285 
286 	if (error_bitmap & QDF_MEM_BAD_HEADER)
287 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
288 			header->header, WLAN_MEM_HEADER);
289 
290 	if (error_bitmap & QDF_MEM_BAD_SIZE)
291 		qdf_err("Corrupted memory size %u (expected < %d)",
292 			header->size, QDF_MEM_MAX_MALLOC);
293 
294 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
295 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
296 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
297 
298 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
299 		qdf_err("Memory has previously been freed");
300 
301 	if (error_bitmap & QDF_MEM_BAD_FREED)
302 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
303 
304 	if (error_bitmap & QDF_MEM_BAD_NODE)
305 		qdf_err("Corrupted memory header node or double free");
306 
307 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
308 		qdf_err("Corrupted memory domain 0x%x", header->domain);
309 
310 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
311 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
312 			qdf_debug_domain_name(header->domain), header->domain,
313 			qdf_debug_domain_name(current_domain), current_domain);
314 
315 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
316 }
317 
318 /**
319  * struct __qdf_mem_info - memory statistics
320  * @func: the function which allocated memory
321  * @line: the line at which allocation happened
322  * @size: the size of allocation
323  * @caller: Address of the caller function
324  * @count: how many allocations of same type
325  * @time: timestamp at which allocation happened
326  */
327 struct __qdf_mem_info {
328 	char func[QDF_MEM_FUNC_NAME_SIZE];
329 	uint32_t line;
330 	uint32_t size;
331 	void *caller;
332 	uint32_t count;
333 	uint64_t time;
334 };
335 
336 /*
337  * The table depth defines the de-duplication proximity scope.
338  * A deeper table takes more time, so choose any optimum value.
339  */
340 #define QDF_MEM_STAT_TABLE_SIZE 8
341 
342 /**
343  * qdf_mem_debug_print_header() - memory debug header print logic
344  * @print: the print adapter function
345  * @print_priv: the private data to be consumed by @print
346  * @threshold: the threshold value set by user to list top allocations
347  *
348  * Return: None
349  */
350 static void qdf_mem_debug_print_header(qdf_abstract_print print,
351 				       void *print_priv,
352 				       uint32_t threshold)
353 {
354 	if (threshold)
355 		print(print_priv, "APIs requested allocations >= %u no of time",
356 		      threshold);
357 	print(print_priv,
358 	      "--------------------------------------------------------------");
359 	print(print_priv,
360 	      " count    size     total    filename     caller    timestamp");
361 	print(print_priv,
362 	      "--------------------------------------------------------------");
363 }
364 
365 /**
366  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
367  * @table: the memory metadata table to insert into
368  * @meta: the memory metadata to insert
369  *
370  * Return: true if the table is full after inserting, false otherwise
371  */
372 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
373 				      struct qdf_mem_header *meta)
374 {
375 	int i;
376 
377 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
378 		if (!table[i].count) {
379 			qdf_str_lcopy(table[i].func, meta->func,
380 				      QDF_MEM_FUNC_NAME_SIZE);
381 			table[i].line = meta->line;
382 			table[i].size = meta->size;
383 			table[i].count = 1;
384 			table[i].caller = meta->caller;
385 			table[i].time = meta->time;
386 			break;
387 		}
388 
389 		if (qdf_str_eq(table[i].func, meta->func) &&
390 		    table[i].line == meta->line &&
391 		    table[i].size == meta->size &&
392 		    table[i].caller == meta->caller) {
393 			table[i].count++;
394 			break;
395 		}
396 	}
397 
398 	/* return true if the table is now full */
399 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
400 }
401 
402 /**
403  * qdf_mem_domain_print() - output agnostic memory domain print logic
404  * @domain: the memory domain to print
405  * @print: the print adapter function
406  * @print_priv: the private data to be consumed by @print
407  * @threshold: the threshold value set by uset to list top allocations
408  * @mem_print: pointer to function which prints the memory allocation data
409  *
410  * Return: None
411  */
412 static void qdf_mem_domain_print(qdf_list_t *domain,
413 				 qdf_abstract_print print,
414 				 void *print_priv,
415 				 uint32_t threshold,
416 				 void (*mem_print)(struct __qdf_mem_info *,
417 						   qdf_abstract_print,
418 						   void *, uint32_t))
419 {
420 	QDF_STATUS status;
421 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
422 	qdf_list_node_t *node;
423 
424 	qdf_mem_zero(table, sizeof(table));
425 	qdf_mem_debug_print_header(print, print_priv, threshold);
426 
427 	/* hold lock while inserting to avoid use-after free of the metadata */
428 	qdf_spin_lock(&qdf_mem_list_lock);
429 	status = qdf_list_peek_front(domain, &node);
430 	while (QDF_IS_STATUS_SUCCESS(status)) {
431 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
432 		bool is_full = qdf_mem_meta_table_insert(table, meta);
433 
434 		qdf_spin_unlock(&qdf_mem_list_lock);
435 
436 		if (is_full) {
437 			(*mem_print)(table, print, print_priv, threshold);
438 			qdf_mem_zero(table, sizeof(table));
439 		}
440 
441 		qdf_spin_lock(&qdf_mem_list_lock);
442 		status = qdf_list_peek_next(domain, node, &node);
443 	}
444 	qdf_spin_unlock(&qdf_mem_list_lock);
445 
446 	(*mem_print)(table, print, print_priv, threshold);
447 }
448 
449 /**
450  * qdf_mem_meta_table_print() - memory metadata table print logic
451  * @table: the memory metadata table to print
452  * @print: the print adapter function
453  * @print_priv: the private data to be consumed by @print
454  * @threshold: the threshold value set by user to list top allocations
455  *
456  * Return: None
457  */
458 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
459 				     qdf_abstract_print print,
460 				     void *print_priv,
461 				     uint32_t threshold)
462 {
463 	int i;
464 	char debug_str[QDF_DEBUG_STRING_SIZE];
465 	size_t len = 0;
466 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
467 
468 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
469 			     "%s", debug_prefix);
470 
471 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 		if (!table[i].count)
473 			break;
474 
475 		print(print_priv,
476 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
477 		      table[i].count,
478 		      table[i].size,
479 		      table[i].count * table[i].size,
480 		      table[i].func,
481 		      table[i].line, table[i].caller,
482 		      table[i].time);
483 		len += qdf_scnprintf(debug_str + len,
484 				     sizeof(debug_str) - len,
485 				     " @ %s:%u %pS",
486 				     table[i].func,
487 				     table[i].line,
488 				     table[i].caller);
489 	}
490 	print(print_priv, "%s", debug_str);
491 }
492 
493 static int qdf_err_printer(void *priv, const char *fmt, ...)
494 {
495 	va_list args;
496 
497 	va_start(args, fmt);
498 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
499 	va_end(args);
500 
501 	return 0;
502 }
503 
504 #endif /* MEMORY_DEBUG */
505 
506 bool prealloc_disabled = 1;
507 qdf_declare_param(prealloc_disabled, bool);
508 qdf_export_symbol(prealloc_disabled);
509 
510 int qdf_mem_malloc_flags(void)
511 {
512 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
513 		return GFP_ATOMIC;
514 
515 	return GFP_KERNEL;
516 }
517 
518 qdf_export_symbol(qdf_mem_malloc_flags);
519 
520 bool qdf_prealloc_disabled_config_get(void)
521 {
522 	return prealloc_disabled;
523 }
524 
525 qdf_export_symbol(qdf_prealloc_disabled_config_get);
526 
527 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
528 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
529 {
530 	QDF_STATUS status;
531 
532 	status = qdf_bool_parse(str_value, &prealloc_disabled);
533 	return status;
534 }
535 #endif
536 
537 #if defined WLAN_DEBUGFS
538 
539 /* Debugfs root directory for qdf_mem */
540 static struct dentry *qdf_mem_debugfs_root;
541 
542 #ifdef MEMORY_DEBUG
543 static int seq_printf_printer(void *priv, const char *fmt, ...)
544 {
545 	struct seq_file *file = priv;
546 	va_list args;
547 
548 	va_start(args, fmt);
549 	seq_vprintf(file, fmt, args);
550 	seq_puts(file, "\n");
551 	va_end(args);
552 
553 	return 0;
554 }
555 
556 /**
557  * qdf_print_major_alloc() - memory metadata table print logic
558  * @table: the memory metadata table to print
559  * @print: the print adapter function
560  * @print_priv: the private data to be consumed by @print
561  * @threshold: the threshold value set by uset to list top allocations
562  *
563  * Return: None
564  */
565 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
566 				  qdf_abstract_print print,
567 				  void *print_priv,
568 				  uint32_t threshold)
569 {
570 	int i;
571 
572 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
573 		if (!table[i].count)
574 			break;
575 		if (table[i].count >= threshold)
576 			print(print_priv,
577 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
578 			      table[i].count,
579 			      table[i].size,
580 			      table[i].count * table[i].size,
581 			      table[i].func,
582 			      table[i].line, table[i].caller,
583 			      table[i].time);
584 	}
585 }
586 
587 /**
588  * qdf_mem_seq_start() - sequential callback to start
589  * @seq: seq_file handle
590  * @pos: The start position of the sequence
591  *
592  * Return: iterator pointer, or NULL if iteration is complete
593  */
594 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
595 {
596 	enum qdf_debug_domain domain = *pos;
597 
598 	if (!qdf_debug_domain_valid(domain))
599 		return NULL;
600 
601 	/* just use the current position as our iterator */
602 	return pos;
603 }
604 
605 /**
606  * qdf_mem_seq_next() - next sequential callback
607  * @seq: seq_file handle
608  * @v: the current iterator
609  * @pos: the current position
610  *
611  * Get the next node and release previous node.
612  *
613  * Return: iterator pointer, or NULL if iteration is complete
614  */
615 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
616 {
617 	++*pos;
618 
619 	return qdf_mem_seq_start(seq, pos);
620 }
621 
622 /**
623  * qdf_mem_seq_stop() - stop sequential callback
624  * @seq: seq_file handle
625  * @v: current iterator
626  *
627  * Return: None
628  */
629 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
630 
631 /**
632  * qdf_mem_seq_show() - print sequential callback
633  * @seq: seq_file handle
634  * @v: current iterator
635  *
636  * Return: 0 - success
637  */
638 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
639 {
640 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
641 
642 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
643 		   qdf_debug_domain_name(domain_id), domain_id);
644 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
645 			     seq_printf_printer,
646 			     seq,
647 			     0,
648 			     qdf_mem_meta_table_print);
649 
650 	return 0;
651 }
652 
653 /* sequential file operation table */
654 static const struct seq_operations qdf_mem_seq_ops = {
655 	.start = qdf_mem_seq_start,
656 	.next  = qdf_mem_seq_next,
657 	.stop  = qdf_mem_seq_stop,
658 	.show  = qdf_mem_seq_show,
659 };
660 
661 
662 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
663 {
664 	return seq_open(file, &qdf_mem_seq_ops);
665 }
666 
667 /**
668  * qdf_major_alloc_show() - print sequential callback
669  * @seq: seq_file handle
670  * @v: current iterator
671  *
672  * Return: 0 - success
673  */
674 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
675 {
676 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
677 	struct major_alloc_priv *priv;
678 	qdf_list_t *list;
679 
680 	priv = (struct major_alloc_priv *)seq->private;
681 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
682 		   qdf_debug_domain_name(domain_id), domain_id);
683 
684 	switch (priv->type) {
685 	case LIST_TYPE_MEM:
686 		list = qdf_mem_list_get(domain_id);
687 		break;
688 	case LIST_TYPE_DMA:
689 		list = qdf_mem_dma_list(domain_id);
690 		break;
691 	default:
692 		list = NULL;
693 		break;
694 	}
695 
696 	if (list)
697 		qdf_mem_domain_print(list,
698 				     seq_printf_printer,
699 				     seq,
700 				     priv->threshold,
701 				     qdf_print_major_alloc);
702 
703 	return 0;
704 }
705 
706 /* sequential file operation table created to track major allocs */
707 static const struct seq_operations qdf_major_allocs_seq_ops = {
708 	.start = qdf_mem_seq_start,
709 	.next = qdf_mem_seq_next,
710 	.stop = qdf_mem_seq_stop,
711 	.show = qdf_major_alloc_show,
712 };
713 
714 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
715 {
716 	void *private = inode->i_private;
717 	struct seq_file *seq;
718 	int rc;
719 
720 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
721 	if (rc == 0) {
722 		seq = file->private_data;
723 		seq->private = private;
724 	}
725 	return rc;
726 }
727 
728 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
729 					     const char __user *user_buf,
730 					     size_t count,
731 					     loff_t *pos)
732 {
733 	char buf[32];
734 	ssize_t buf_size;
735 	uint32_t threshold;
736 	struct seq_file *seq = file->private_data;
737 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
738 
739 	buf_size = min(count, (sizeof(buf) - 1));
740 	if (buf_size <= 0)
741 		return 0;
742 	if (copy_from_user(buf, user_buf, buf_size))
743 		return -EFAULT;
744 	buf[buf_size] = '\0';
745 	if (!kstrtou32(buf, 10, &threshold))
746 		priv->threshold = threshold;
747 	return buf_size;
748 }
749 
750 /**
751  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
752  * @threshold: the threshold value set by uset to list top allocations
753  * @print: the print adapter function
754  * @print_priv: the private data to be consumed by @print
755  * @mem_print: pointer to function which prints the memory allocation data
756  *
757  * Return: None
758  */
759 static void
760 qdf_print_major_nbuf_allocs(uint32_t threshold,
761 			    qdf_abstract_print print,
762 			    void *print_priv,
763 			    void (*mem_print)(struct __qdf_mem_info *,
764 					      qdf_abstract_print,
765 					      void *, uint32_t))
766 {
767 	uint32_t nbuf_iter;
768 	unsigned long irq_flag = 0;
769 	QDF_NBUF_TRACK *p_node;
770 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
771 	struct qdf_mem_header meta;
772 	bool is_full;
773 
774 	qdf_mem_zero(table, sizeof(table));
775 	qdf_mem_debug_print_header(print, print_priv, threshold);
776 
777 	if (is_initial_mem_debug_disabled)
778 		return;
779 
780 	qdf_rl_info("major nbuf print with threshold %u", threshold);
781 
782 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
783 	     nbuf_iter++) {
784 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
785 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
786 		while (p_node) {
787 			meta.line = p_node->line_num;
788 			meta.size = p_node->size;
789 			meta.caller = NULL;
790 			meta.time = p_node->time;
791 			qdf_str_lcopy(meta.func, p_node->func_name,
792 				      QDF_MEM_FUNC_NAME_SIZE);
793 
794 			is_full = qdf_mem_meta_table_insert(table, &meta);
795 
796 			if (is_full) {
797 				(*mem_print)(table, print,
798 					     print_priv, threshold);
799 				qdf_mem_zero(table, sizeof(table));
800 			}
801 
802 			p_node = p_node->p_next;
803 		}
804 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
805 	}
806 
807 	(*mem_print)(table, print, print_priv, threshold);
808 
809 	qdf_rl_info("major nbuf print end");
810 }
811 
812 /**
813  * qdf_major_nbuf_alloc_show() - print sequential callback
814  * @seq: seq_file handle
815  * @v: current iterator
816  *
817  * Return: 0 - success
818  */
819 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
820 {
821 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
822 
823 	if (!priv) {
824 		qdf_err("priv is null");
825 		return -EINVAL;
826 	}
827 
828 	qdf_print_major_nbuf_allocs(priv->threshold,
829 				    seq_printf_printer,
830 				    seq,
831 				    qdf_print_major_alloc);
832 
833 	return 0;
834 }
835 
836 /**
837  * qdf_nbuf_seq_start() - sequential callback to start
838  * @seq: seq_file handle
839  * @pos: The start position of the sequence
840  *
841  * Return: iterator pointer, or NULL if iteration is complete
842  */
843 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
844 {
845 	enum qdf_debug_domain domain = *pos;
846 
847 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
848 		return NULL;
849 
850 	return pos;
851 }
852 
853 /**
854  * qdf_nbuf_seq_next() - next sequential callback
855  * @seq: seq_file handle
856  * @v: the current iterator
857  * @pos: the current position
858  *
859  * Get the next node and release previous node.
860  *
861  * Return: iterator pointer, or NULL if iteration is complete
862  */
863 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
864 {
865 	++*pos;
866 
867 	return qdf_nbuf_seq_start(seq, pos);
868 }
869 
870 /**
871  * qdf_nbuf_seq_stop() - stop sequential callback
872  * @seq: seq_file handle
873  * @v: current iterator
874  *
875  * Return: None
876  */
877 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
878 
879 /* sequential file operation table created to track major skb allocs */
880 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
881 	.start = qdf_nbuf_seq_start,
882 	.next = qdf_nbuf_seq_next,
883 	.stop = qdf_nbuf_seq_stop,
884 	.show = qdf_major_nbuf_alloc_show,
885 };
886 
887 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
888 {
889 	void *private = inode->i_private;
890 	struct seq_file *seq;
891 	int rc;
892 
893 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
894 	if (rc == 0) {
895 		seq = file->private_data;
896 		seq->private = private;
897 	}
898 	return rc;
899 }
900 
901 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
902 						  const char __user *user_buf,
903 						  size_t count,
904 						  loff_t *pos)
905 {
906 	char buf[32];
907 	ssize_t buf_size;
908 	uint32_t threshold;
909 	struct seq_file *seq = file->private_data;
910 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
911 
912 	buf_size = min(count, (sizeof(buf) - 1));
913 	if (buf_size <= 0)
914 		return 0;
915 	if (copy_from_user(buf, user_buf, buf_size))
916 		return -EFAULT;
917 	buf[buf_size] = '\0';
918 	if (!kstrtou32(buf, 10, &threshold))
919 		priv->threshold = threshold;
920 	return buf_size;
921 }
922 
923 /* file operation table for listing major allocs */
924 static const struct file_operations fops_qdf_major_allocs = {
925 	.owner = THIS_MODULE,
926 	.open = qdf_major_allocs_open,
927 	.read = seq_read,
928 	.llseek = seq_lseek,
929 	.release = seq_release,
930 	.write = qdf_major_alloc_set_threshold,
931 };
932 
933 /* debugfs file operation table */
934 static const struct file_operations fops_qdf_mem_debugfs = {
935 	.owner = THIS_MODULE,
936 	.open = qdf_mem_debugfs_open,
937 	.read = seq_read,
938 	.llseek = seq_lseek,
939 	.release = seq_release,
940 };
941 
942 /* file operation table for listing major allocs */
943 static const struct file_operations fops_qdf_nbuf_major_allocs = {
944 	.owner = THIS_MODULE,
945 	.open = qdf_major_nbuf_allocs_open,
946 	.read = seq_read,
947 	.llseek = seq_lseek,
948 	.release = seq_release,
949 	.write = qdf_major_nbuf_alloc_set_threshold,
950 };
951 
952 static struct major_alloc_priv mem_priv = {
953 	/* List type set to mem */
954 	LIST_TYPE_MEM,
955 	/* initial threshold to list APIs which allocates mem >= 50 times */
956 	50
957 };
958 
959 static struct major_alloc_priv dma_priv = {
960 	/* List type set to DMA */
961 	LIST_TYPE_DMA,
962 	/* initial threshold to list APIs which allocates dma >= 50 times */
963 	50
964 };
965 
966 static struct major_alloc_priv nbuf_priv = {
967 	/* List type set to NBUF */
968 	LIST_TYPE_NBUF,
969 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
970 	50
971 };
972 
973 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
974 {
975 	if (is_initial_mem_debug_disabled)
976 		return QDF_STATUS_SUCCESS;
977 
978 	if (!qdf_mem_debugfs_root)
979 		return QDF_STATUS_E_FAILURE;
980 
981 	debugfs_create_file("list",
982 			    S_IRUSR,
983 			    qdf_mem_debugfs_root,
984 			    NULL,
985 			    &fops_qdf_mem_debugfs);
986 
987 	debugfs_create_file("major_mem_allocs",
988 			    0600,
989 			    qdf_mem_debugfs_root,
990 			    &mem_priv,
991 			    &fops_qdf_major_allocs);
992 
993 	debugfs_create_file("major_dma_allocs",
994 			    0600,
995 			    qdf_mem_debugfs_root,
996 			    &dma_priv,
997 			    &fops_qdf_major_allocs);
998 
999 	debugfs_create_file("major_nbuf_allocs",
1000 			    0600,
1001 			    qdf_mem_debugfs_root,
1002 			    &nbuf_priv,
1003 			    &fops_qdf_nbuf_major_allocs);
1004 
1005 	return QDF_STATUS_SUCCESS;
1006 }
1007 
1008 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1009 {
1010 	return QDF_STATUS_SUCCESS;
1011 }
1012 
1013 #else /* MEMORY_DEBUG */
1014 
1015 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1016 {
1017 	return QDF_STATUS_E_NOSUPPORT;
1018 }
1019 
1020 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1021 {
1022 	return QDF_STATUS_E_NOSUPPORT;
1023 }
1024 
1025 #endif /* MEMORY_DEBUG */
1026 
1027 
1028 static void qdf_mem_debugfs_exit(void)
1029 {
1030 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1031 	qdf_mem_debugfs_root = NULL;
1032 }
1033 
1034 static QDF_STATUS qdf_mem_debugfs_init(void)
1035 {
1036 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1037 
1038 	if (!qdf_debugfs_root)
1039 		return QDF_STATUS_E_FAILURE;
1040 
1041 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1042 
1043 	if (!qdf_mem_debugfs_root)
1044 		return QDF_STATUS_E_FAILURE;
1045 
1046 
1047 	debugfs_create_atomic_t("kmalloc",
1048 				S_IRUSR,
1049 				qdf_mem_debugfs_root,
1050 				&qdf_mem_stat.kmalloc);
1051 
1052 	debugfs_create_atomic_t("dma",
1053 				S_IRUSR,
1054 				qdf_mem_debugfs_root,
1055 				&qdf_mem_stat.dma);
1056 
1057 	debugfs_create_atomic_t("skb",
1058 				S_IRUSR,
1059 				qdf_mem_debugfs_root,
1060 				&qdf_mem_stat.skb);
1061 
1062 	return QDF_STATUS_SUCCESS;
1063 }
1064 
1065 #else /* WLAN_DEBUGFS */
1066 
1067 static QDF_STATUS qdf_mem_debugfs_init(void)
1068 {
1069 	return QDF_STATUS_E_NOSUPPORT;
1070 }
1071 static void qdf_mem_debugfs_exit(void) {}
1072 
1073 
1074 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1075 {
1076 	return QDF_STATUS_E_NOSUPPORT;
1077 }
1078 
1079 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1080 {
1081 	return QDF_STATUS_E_NOSUPPORT;
1082 }
1083 
1084 #endif /* WLAN_DEBUGFS */
1085 
1086 void qdf_mem_kmalloc_inc(qdf_size_t size)
1087 {
1088 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1089 }
1090 
1091 static void qdf_mem_dma_inc(qdf_size_t size)
1092 {
1093 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1094 }
1095 
1096 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1097 void qdf_mem_skb_inc(qdf_size_t size)
1098 {
1099 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1100 }
1101 
1102 void qdf_mem_skb_dec(qdf_size_t size)
1103 {
1104 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1105 }
1106 
1107 void qdf_mem_skb_total_inc(qdf_size_t size)
1108 {
1109 	int32_t skb_mem_max = 0;
1110 
1111 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1112 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1113 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1114 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1115 }
1116 
1117 void qdf_mem_skb_total_dec(qdf_size_t size)
1118 {
1119 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1120 }
1121 
1122 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1123 {
1124 	int32_t curr_dp_tx_skb_mem_max = 0;
1125 
1126 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1127 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1128 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1129 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1130 }
1131 
1132 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1133 {
1134 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1135 }
1136 
1137 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1138 {
1139 	int32_t curr_dp_rx_skb_mem_max = 0;
1140 
1141 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1142 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1143 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1144 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1145 }
1146 
1147 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1148 {
1149 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1150 }
1151 
1152 void qdf_mem_dp_tx_skb_cnt_inc(void)
1153 {
1154 	int32_t curr_dp_tx_skb_count_max = 0;
1155 
1156 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1157 	curr_dp_tx_skb_count_max =
1158 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1159 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1160 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1161 }
1162 
1163 void qdf_mem_dp_tx_skb_cnt_dec(void)
1164 {
1165 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1166 }
1167 
1168 void qdf_mem_dp_rx_skb_cnt_inc(void)
1169 {
1170 	int32_t curr_dp_rx_skb_count_max = 0;
1171 
1172 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1173 	curr_dp_rx_skb_count_max =
1174 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1175 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1176 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1177 }
1178 
1179 void qdf_mem_dp_rx_skb_cnt_dec(void)
1180 {
1181 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1182 }
1183 #endif
1184 
1185 void qdf_mem_kmalloc_dec(qdf_size_t size)
1186 {
1187 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1188 }
1189 
1190 static inline void qdf_mem_dma_dec(qdf_size_t size)
1191 {
1192 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1193 }
1194 
1195 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1196 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1197 {
1198 	__qdf_mempool_ctxt_t *new_pool = NULL;
1199 	u_int32_t align = L1_CACHE_BYTES;
1200 	unsigned long aligned_pool_mem;
1201 	int pool_id;
1202 	int i;
1203 
1204 	if (prealloc_disabled) {
1205 		/* TBD: We can maintain a list of pools in qdf_device_t
1206 		 * to help debugging
1207 		 * when pre-allocation is not enabled
1208 		 */
1209 		new_pool = (__qdf_mempool_ctxt_t *)
1210 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1211 		if (!new_pool)
1212 			return QDF_STATUS_E_NOMEM;
1213 
1214 		memset(new_pool, 0, sizeof(*new_pool));
1215 		/* TBD: define flags for zeroing buffers etc */
1216 		new_pool->flags = flags;
1217 		new_pool->elem_size = elem_size;
1218 		new_pool->max_elem = elem_cnt;
1219 		*pool_addr = new_pool;
1220 		return 0;
1221 	}
1222 
1223 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1224 		if (!osdev->mem_pool[pool_id])
1225 			break;
1226 	}
1227 
1228 	if (pool_id == MAX_MEM_POOLS)
1229 		return -ENOMEM;
1230 
1231 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1232 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1233 	if (!new_pool)
1234 		return -ENOMEM;
1235 
1236 	memset(new_pool, 0, sizeof(*new_pool));
1237 	/* TBD: define flags for zeroing buffers etc */
1238 	new_pool->flags = flags;
1239 	new_pool->pool_id = pool_id;
1240 
1241 	/* Round up the element size to cacheline */
1242 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1243 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1244 				((align)?(align - 1):0);
1245 
1246 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1247 	if (!new_pool->pool_mem) {
1248 			/* TBD: Check if we need get_free_pages above */
1249 		kfree(new_pool);
1250 		osdev->mem_pool[pool_id] = NULL;
1251 		return -ENOMEM;
1252 	}
1253 
1254 	spin_lock_init(&new_pool->lock);
1255 
1256 	/* Initialize free list */
1257 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1258 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1259 	STAILQ_INIT(&new_pool->free_list);
1260 
1261 	for (i = 0; i < elem_cnt; i++)
1262 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1263 			(mempool_elem_t *)(aligned_pool_mem +
1264 			(new_pool->elem_size * i)), mempool_entry);
1265 
1266 
1267 	new_pool->free_cnt = elem_cnt;
1268 	*pool_addr = new_pool;
1269 	return 0;
1270 }
1271 qdf_export_symbol(__qdf_mempool_init);
1272 
1273 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1274 {
1275 	int pool_id = 0;
1276 
1277 	if (!pool)
1278 		return;
1279 
1280 	if (prealloc_disabled) {
1281 		kfree(pool);
1282 		return;
1283 	}
1284 
1285 	pool_id = pool->pool_id;
1286 
1287 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1288 	kfree(pool->pool_mem);
1289 	kfree(pool);
1290 	osdev->mem_pool[pool_id] = NULL;
1291 }
1292 qdf_export_symbol(__qdf_mempool_destroy);
1293 
1294 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1295 {
1296 	void *buf = NULL;
1297 
1298 	if (!pool)
1299 		return NULL;
1300 
1301 	if (prealloc_disabled)
1302 		return  qdf_mem_malloc(pool->elem_size);
1303 
1304 	spin_lock_bh(&pool->lock);
1305 
1306 	buf = STAILQ_FIRST(&pool->free_list);
1307 	if (buf) {
1308 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1309 		pool->free_cnt--;
1310 	}
1311 
1312 	/* TBD: Update free count if debug is enabled */
1313 	spin_unlock_bh(&pool->lock);
1314 
1315 	return buf;
1316 }
1317 qdf_export_symbol(__qdf_mempool_alloc);
1318 
1319 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1320 {
1321 	if (!pool)
1322 		return;
1323 
1324 
1325 	if (prealloc_disabled)
1326 		return qdf_mem_free(buf);
1327 
1328 	spin_lock_bh(&pool->lock);
1329 	pool->free_cnt++;
1330 
1331 	STAILQ_INSERT_TAIL
1332 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1333 	spin_unlock_bh(&pool->lock);
1334 }
1335 qdf_export_symbol(__qdf_mempool_free);
1336 
1337 #ifdef CNSS_MEM_PRE_ALLOC
1338 static bool qdf_might_be_prealloc(void *ptr)
1339 {
1340 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1341 		return true;
1342 	else
1343 		return false;
1344 }
1345 
1346 /**
1347  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1348  * @size: the number of bytes to allocate
1349  *
1350  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1351  * a chunk of pre-allocated memory. If size if less than or equal to
1352  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1353  *
1354  * Return: NULL on failure, non-NULL on success
1355  */
1356 static void *qdf_mem_prealloc_get(size_t size)
1357 {
1358 	void *ptr;
1359 
1360 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1361 		return NULL;
1362 
1363 	ptr = wcnss_prealloc_get(size);
1364 	if (!ptr)
1365 		return NULL;
1366 
1367 	memset(ptr, 0, size);
1368 
1369 	return ptr;
1370 }
1371 
1372 static inline bool qdf_mem_prealloc_put(void *ptr)
1373 {
1374 	return wcnss_prealloc_put(ptr);
1375 }
1376 #else
1377 static bool qdf_might_be_prealloc(void *ptr)
1378 {
1379 	return false;
1380 }
1381 
1382 static inline void *qdf_mem_prealloc_get(size_t size)
1383 {
1384 	return NULL;
1385 }
1386 
1387 static inline bool qdf_mem_prealloc_put(void *ptr)
1388 {
1389 	return false;
1390 }
1391 #endif /* CNSS_MEM_PRE_ALLOC */
1392 
1393 /* External Function implementation */
1394 #ifdef MEMORY_DEBUG
1395 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1396 bool qdf_mem_debug_config_get(void)
1397 {
1398 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1399 	return false;
1400 }
1401 #else
1402 bool qdf_mem_debug_config_get(void)
1403 {
1404 	return mem_debug_disabled;
1405 }
1406 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1407 
1408 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1409 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1410 {
1411 	QDF_STATUS status;
1412 
1413 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1414 	return status;
1415 }
1416 #endif
1417 
1418 /**
1419  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1420  *
1421  * Return: none
1422  */
1423 static void qdf_mem_debug_init(void)
1424 {
1425 	int i;
1426 
1427 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1428 
1429 	if (is_initial_mem_debug_disabled)
1430 		return;
1431 
1432 	/* Initializing the list with maximum size of 60000 */
1433 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1434 		qdf_list_create(&qdf_mem_domains[i], 60000);
1435 	qdf_spinlock_create(&qdf_mem_list_lock);
1436 
1437 	/* dma */
1438 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1439 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1440 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1441 }
1442 
1443 static uint32_t
1444 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1445 			       qdf_list_t *mem_list)
1446 {
1447 	if (is_initial_mem_debug_disabled)
1448 		return 0;
1449 
1450 	if (qdf_list_empty(mem_list))
1451 		return 0;
1452 
1453 	qdf_err("Memory leaks detected in %s domain!",
1454 		qdf_debug_domain_name(domain));
1455 	qdf_mem_domain_print(mem_list,
1456 			     qdf_err_printer,
1457 			     NULL,
1458 			     0,
1459 			     qdf_mem_meta_table_print);
1460 
1461 	return mem_list->count;
1462 }
1463 
1464 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1465 {
1466 	uint32_t leak_count = 0;
1467 	int i;
1468 
1469 	if (is_initial_mem_debug_disabled)
1470 		return;
1471 
1472 	/* detect and print leaks */
1473 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1474 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1475 
1476 	if (leak_count)
1477 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1478 				   leak_count);
1479 }
1480 
1481 /**
1482  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1483  *
1484  * Return: none
1485  */
1486 static void qdf_mem_debug_exit(void)
1487 {
1488 	int i;
1489 
1490 	if (is_initial_mem_debug_disabled)
1491 		return;
1492 
1493 	/* mem */
1494 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1495 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1496 		qdf_list_destroy(qdf_mem_list_get(i));
1497 
1498 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1499 
1500 	/* dma */
1501 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1502 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1503 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1504 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1505 }
1506 
1507 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1508 			   void *caller, uint32_t flag)
1509 {
1510 	QDF_STATUS status;
1511 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1512 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1513 	struct qdf_mem_header *header;
1514 	void *ptr;
1515 	unsigned long start, duration;
1516 
1517 	if (is_initial_mem_debug_disabled)
1518 		return __qdf_mem_malloc(size, func, line);
1519 
1520 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1521 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1522 		return NULL;
1523 	}
1524 
1525 	ptr = qdf_mem_prealloc_get(size);
1526 	if (ptr)
1527 		return ptr;
1528 
1529 	if (!flag)
1530 		flag = qdf_mem_malloc_flags();
1531 
1532 	start = qdf_mc_timer_get_system_time();
1533 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1534 	duration = qdf_mc_timer_get_system_time() - start;
1535 
1536 	if (duration > QDF_MEM_WARN_THRESHOLD)
1537 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1538 			 duration, size, func, line);
1539 
1540 	if (!header) {
1541 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1542 		return NULL;
1543 	}
1544 
1545 	qdf_mem_header_init(header, size, func, line, caller);
1546 	qdf_mem_trailer_init(header);
1547 	ptr = qdf_mem_get_ptr(header);
1548 
1549 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1550 	status = qdf_list_insert_front(mem_list, &header->node);
1551 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1552 	if (QDF_IS_STATUS_ERROR(status))
1553 		qdf_err("Failed to insert memory header; status %d", status);
1554 
1555 	qdf_mem_kmalloc_inc(ksize(header));
1556 
1557 	return ptr;
1558 }
1559 qdf_export_symbol(qdf_mem_malloc_debug);
1560 
1561 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1562 				  uint32_t line, void *caller)
1563 {
1564 	QDF_STATUS status;
1565 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1566 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1567 	struct qdf_mem_header *header;
1568 	void *ptr;
1569 	unsigned long start, duration;
1570 
1571 	if (is_initial_mem_debug_disabled)
1572 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1573 
1574 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1575 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1576 		return NULL;
1577 	}
1578 
1579 	ptr = qdf_mem_prealloc_get(size);
1580 	if (ptr)
1581 		return ptr;
1582 
1583 	start = qdf_mc_timer_get_system_time();
1584 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1585 	duration = qdf_mc_timer_get_system_time() - start;
1586 
1587 	if (duration > QDF_MEM_WARN_THRESHOLD)
1588 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1589 			 duration, size, func, line);
1590 
1591 	if (!header) {
1592 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1593 		return NULL;
1594 	}
1595 
1596 	qdf_mem_header_init(header, size, func, line, caller);
1597 	qdf_mem_trailer_init(header);
1598 	ptr = qdf_mem_get_ptr(header);
1599 
1600 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1601 	status = qdf_list_insert_front(mem_list, &header->node);
1602 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1603 	if (QDF_IS_STATUS_ERROR(status))
1604 		qdf_err("Failed to insert memory header; status %d", status);
1605 
1606 	qdf_mem_kmalloc_inc(ksize(header));
1607 
1608 	return ptr;
1609 }
1610 
1611 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1612 
1613 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1614 				     uint32_t line)
1615 {
1616 	void *ptr;
1617 
1618 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1619 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1620 			     line);
1621 		return NULL;
1622 	}
1623 
1624 	ptr = qdf_mem_prealloc_get(size);
1625 	if (ptr)
1626 		return ptr;
1627 
1628 	ptr = kzalloc(size, GFP_ATOMIC);
1629 	if (!ptr) {
1630 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1631 			      size, func, line);
1632 		return NULL;
1633 	}
1634 
1635 	qdf_mem_kmalloc_inc(ksize(ptr));
1636 
1637 	return ptr;
1638 }
1639 
1640 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1641 
1642 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1643 {
1644 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1645 	struct qdf_mem_header *header;
1646 	enum qdf_mem_validation_bitmap error_bitmap;
1647 
1648 	if (is_initial_mem_debug_disabled) {
1649 		__qdf_mem_free(ptr);
1650 		return;
1651 	}
1652 
1653 	/* freeing a null pointer is valid */
1654 	if (qdf_unlikely(!ptr))
1655 		return;
1656 
1657 	if (qdf_mem_prealloc_put(ptr))
1658 		return;
1659 
1660 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1661 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1662 				   ptr);
1663 
1664 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1665 
1666 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1667 	header = qdf_mem_get_header(ptr);
1668 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1669 	error_bitmap |= qdf_mem_trailer_validate(header);
1670 
1671 	if (!error_bitmap) {
1672 		header->freed = true;
1673 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1674 				     &header->node);
1675 	}
1676 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1677 
1678 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1679 				    func, line);
1680 
1681 	qdf_mem_kmalloc_dec(ksize(header));
1682 	kfree(header);
1683 }
1684 qdf_export_symbol(qdf_mem_free_debug);
1685 
1686 void qdf_mem_check_for_leaks(void)
1687 {
1688 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1689 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1690 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1691 	uint32_t leaks_count = 0;
1692 
1693 	if (is_initial_mem_debug_disabled)
1694 		return;
1695 
1696 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1697 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1698 
1699 	if (leaks_count)
1700 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1701 				   leaks_count);
1702 }
1703 
1704 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1705 				     struct qdf_mem_multi_page_t *pages,
1706 				     size_t element_size, uint32_t element_num,
1707 				     qdf_dma_context_t memctxt, bool cacheable,
1708 				     const char *func, uint32_t line,
1709 				     void *caller)
1710 {
1711 	uint16_t page_idx;
1712 	struct qdf_mem_dma_page_t *dma_pages;
1713 	void **cacheable_pages = NULL;
1714 	uint16_t i;
1715 
1716 	if (!pages->page_size)
1717 		pages->page_size = qdf_page_size;
1718 
1719 	pages->num_element_per_page = pages->page_size / element_size;
1720 	if (!pages->num_element_per_page) {
1721 		qdf_print("Invalid page %d or element size %d",
1722 			  (int)pages->page_size, (int)element_size);
1723 		goto out_fail;
1724 	}
1725 
1726 	pages->num_pages = element_num / pages->num_element_per_page;
1727 	if (element_num % pages->num_element_per_page)
1728 		pages->num_pages++;
1729 
1730 	if (cacheable) {
1731 		/* Pages information storage */
1732 		pages->cacheable_pages = qdf_mem_malloc_debug(
1733 			pages->num_pages * sizeof(pages->cacheable_pages),
1734 			func, line, caller, 0);
1735 		if (!pages->cacheable_pages)
1736 			goto out_fail;
1737 
1738 		cacheable_pages = pages->cacheable_pages;
1739 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1740 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1741 				pages->page_size, func, line, caller, 0);
1742 			if (!cacheable_pages[page_idx])
1743 				goto page_alloc_fail;
1744 		}
1745 		pages->dma_pages = NULL;
1746 	} else {
1747 		pages->dma_pages = qdf_mem_malloc_debug(
1748 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1749 			func, line, caller, 0);
1750 		if (!pages->dma_pages)
1751 			goto out_fail;
1752 
1753 		dma_pages = pages->dma_pages;
1754 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1755 			dma_pages->page_v_addr_start =
1756 				qdf_mem_alloc_consistent_debug(
1757 					osdev, osdev->dev, pages->page_size,
1758 					&dma_pages->page_p_addr,
1759 					func, line, caller);
1760 			if (!dma_pages->page_v_addr_start) {
1761 				qdf_print("dmaable page alloc fail pi %d",
1762 					  page_idx);
1763 				goto page_alloc_fail;
1764 			}
1765 			dma_pages->page_v_addr_end =
1766 				dma_pages->page_v_addr_start + pages->page_size;
1767 			dma_pages++;
1768 		}
1769 		pages->cacheable_pages = NULL;
1770 	}
1771 	return;
1772 
1773 page_alloc_fail:
1774 	if (cacheable) {
1775 		for (i = 0; i < page_idx; i++)
1776 			qdf_mem_free_debug(pages->cacheable_pages[i],
1777 					   func, line);
1778 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1779 	} else {
1780 		dma_pages = pages->dma_pages;
1781 		for (i = 0; i < page_idx; i++) {
1782 			qdf_mem_free_consistent_debug(
1783 				osdev, osdev->dev,
1784 				pages->page_size, dma_pages->page_v_addr_start,
1785 				dma_pages->page_p_addr, memctxt, func, line);
1786 			dma_pages++;
1787 		}
1788 		qdf_mem_free_debug(pages->dma_pages, func, line);
1789 	}
1790 
1791 out_fail:
1792 	pages->cacheable_pages = NULL;
1793 	pages->dma_pages = NULL;
1794 	pages->num_pages = 0;
1795 }
1796 
1797 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1798 
1799 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1800 				    struct qdf_mem_multi_page_t *pages,
1801 				    qdf_dma_context_t memctxt, bool cacheable,
1802 				    const char *func, uint32_t line)
1803 {
1804 	unsigned int page_idx;
1805 	struct qdf_mem_dma_page_t *dma_pages;
1806 
1807 	if (!pages->page_size)
1808 		pages->page_size = qdf_page_size;
1809 
1810 	if (cacheable) {
1811 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1812 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1813 					   func, line);
1814 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1815 	} else {
1816 		dma_pages = pages->dma_pages;
1817 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1818 			qdf_mem_free_consistent_debug(
1819 				osdev, osdev->dev, pages->page_size,
1820 				dma_pages->page_v_addr_start,
1821 				dma_pages->page_p_addr, memctxt, func, line);
1822 			dma_pages++;
1823 		}
1824 		qdf_mem_free_debug(pages->dma_pages, func, line);
1825 	}
1826 
1827 	pages->cacheable_pages = NULL;
1828 	pages->dma_pages = NULL;
1829 	pages->num_pages = 0;
1830 }
1831 
1832 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1833 
1834 #else
1835 static void qdf_mem_debug_init(void) {}
1836 
1837 static void qdf_mem_debug_exit(void) {}
1838 
1839 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1840 {
1841 	void *ptr;
1842 
1843 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1844 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1845 			     line);
1846 		return NULL;
1847 	}
1848 
1849 	ptr = qdf_mem_prealloc_get(size);
1850 	if (ptr)
1851 		return ptr;
1852 
1853 	ptr = kzalloc(size, GFP_ATOMIC);
1854 	if (!ptr) {
1855 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1856 			      size, func, line);
1857 		return NULL;
1858 	}
1859 
1860 	qdf_mem_kmalloc_inc(ksize(ptr));
1861 
1862 	return ptr;
1863 }
1864 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1865 
1866 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1867 			       struct qdf_mem_multi_page_t *pages,
1868 			       size_t element_size, uint32_t element_num,
1869 			       qdf_dma_context_t memctxt, bool cacheable)
1870 {
1871 	uint16_t page_idx;
1872 	struct qdf_mem_dma_page_t *dma_pages;
1873 	void **cacheable_pages = NULL;
1874 	uint16_t i;
1875 
1876 	if (!pages->page_size)
1877 		pages->page_size = qdf_page_size;
1878 
1879 	pages->num_element_per_page = pages->page_size / element_size;
1880 	if (!pages->num_element_per_page) {
1881 		qdf_print("Invalid page %d or element size %d",
1882 			  (int)pages->page_size, (int)element_size);
1883 		goto out_fail;
1884 	}
1885 
1886 	pages->num_pages = element_num / pages->num_element_per_page;
1887 	if (element_num % pages->num_element_per_page)
1888 		pages->num_pages++;
1889 
1890 	if (cacheable) {
1891 		/* Pages information storage */
1892 		pages->cacheable_pages = qdf_mem_malloc(
1893 			pages->num_pages * sizeof(pages->cacheable_pages));
1894 		if (!pages->cacheable_pages)
1895 			goto out_fail;
1896 
1897 		cacheable_pages = pages->cacheable_pages;
1898 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1899 			cacheable_pages[page_idx] =
1900 				qdf_mem_malloc(pages->page_size);
1901 			if (!cacheable_pages[page_idx])
1902 				goto page_alloc_fail;
1903 		}
1904 		pages->dma_pages = NULL;
1905 	} else {
1906 		pages->dma_pages = qdf_mem_malloc(
1907 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1908 		if (!pages->dma_pages)
1909 			goto out_fail;
1910 
1911 		dma_pages = pages->dma_pages;
1912 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1913 			dma_pages->page_v_addr_start =
1914 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1915 					 pages->page_size,
1916 					&dma_pages->page_p_addr);
1917 			if (!dma_pages->page_v_addr_start) {
1918 				qdf_print("dmaable page alloc fail pi %d",
1919 					page_idx);
1920 				goto page_alloc_fail;
1921 			}
1922 			dma_pages->page_v_addr_end =
1923 				dma_pages->page_v_addr_start + pages->page_size;
1924 			dma_pages++;
1925 		}
1926 		pages->cacheable_pages = NULL;
1927 	}
1928 	return;
1929 
1930 page_alloc_fail:
1931 	if (cacheable) {
1932 		for (i = 0; i < page_idx; i++)
1933 			qdf_mem_free(pages->cacheable_pages[i]);
1934 		qdf_mem_free(pages->cacheable_pages);
1935 	} else {
1936 		dma_pages = pages->dma_pages;
1937 		for (i = 0; i < page_idx; i++) {
1938 			qdf_mem_free_consistent(
1939 				osdev, osdev->dev, pages->page_size,
1940 				dma_pages->page_v_addr_start,
1941 				dma_pages->page_p_addr, memctxt);
1942 			dma_pages++;
1943 		}
1944 		qdf_mem_free(pages->dma_pages);
1945 	}
1946 
1947 out_fail:
1948 	pages->cacheable_pages = NULL;
1949 	pages->dma_pages = NULL;
1950 	pages->num_pages = 0;
1951 	return;
1952 }
1953 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1954 
1955 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1956 			      struct qdf_mem_multi_page_t *pages,
1957 			      qdf_dma_context_t memctxt, bool cacheable)
1958 {
1959 	unsigned int page_idx;
1960 	struct qdf_mem_dma_page_t *dma_pages;
1961 
1962 	if (!pages->page_size)
1963 		pages->page_size = qdf_page_size;
1964 
1965 	if (cacheable) {
1966 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1967 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1968 		qdf_mem_free(pages->cacheable_pages);
1969 	} else {
1970 		dma_pages = pages->dma_pages;
1971 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1972 			qdf_mem_free_consistent(
1973 				osdev, osdev->dev, pages->page_size,
1974 				dma_pages->page_v_addr_start,
1975 				dma_pages->page_p_addr, memctxt);
1976 			dma_pages++;
1977 		}
1978 		qdf_mem_free(pages->dma_pages);
1979 	}
1980 
1981 	pages->cacheable_pages = NULL;
1982 	pages->dma_pages = NULL;
1983 	pages->num_pages = 0;
1984 	return;
1985 }
1986 qdf_export_symbol(qdf_mem_multi_pages_free);
1987 #endif
1988 
1989 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
1990 			      bool cacheable)
1991 {
1992 	unsigned int page_idx;
1993 	struct qdf_mem_dma_page_t *dma_pages;
1994 
1995 	if (!pages->page_size)
1996 		pages->page_size = qdf_page_size;
1997 
1998 	if (cacheable) {
1999 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2000 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2001 				     pages->page_size);
2002 	} else {
2003 		dma_pages = pages->dma_pages;
2004 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2005 			qdf_mem_zero(dma_pages->page_v_addr_start,
2006 				     pages->page_size);
2007 			dma_pages++;
2008 		}
2009 	}
2010 }
2011 
2012 qdf_export_symbol(qdf_mem_multi_pages_zero);
2013 
2014 void __qdf_mem_free(void *ptr)
2015 {
2016 	if (!ptr)
2017 		return;
2018 
2019 	if (qdf_might_be_prealloc(ptr)) {
2020 		if (qdf_mem_prealloc_put(ptr))
2021 			return;
2022 	}
2023 
2024 	qdf_mem_kmalloc_dec(ksize(ptr));
2025 
2026 	kfree(ptr);
2027 }
2028 
2029 qdf_export_symbol(__qdf_mem_free);
2030 
2031 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2032 {
2033 	void *ptr;
2034 
2035 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2036 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2037 			     line);
2038 		return NULL;
2039 	}
2040 
2041 	ptr = qdf_mem_prealloc_get(size);
2042 	if (ptr)
2043 		return ptr;
2044 
2045 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2046 	if (!ptr)
2047 		return NULL;
2048 
2049 	qdf_mem_kmalloc_inc(ksize(ptr));
2050 
2051 	return ptr;
2052 }
2053 
2054 qdf_export_symbol(__qdf_mem_malloc);
2055 
2056 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2057 void __qdf_untracked_mem_free(void *ptr)
2058 {
2059 	if (!ptr)
2060 		return;
2061 
2062 	kfree(ptr);
2063 }
2064 
2065 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2066 {
2067 	void *ptr;
2068 
2069 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2070 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2071 			     line);
2072 		return NULL;
2073 	}
2074 
2075 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2076 	if (!ptr)
2077 		return NULL;
2078 
2079 	return ptr;
2080 }
2081 #endif
2082 
2083 void *qdf_aligned_malloc_fl(uint32_t *size,
2084 			    void **vaddr_unaligned,
2085 				qdf_dma_addr_t *paddr_unaligned,
2086 				qdf_dma_addr_t *paddr_aligned,
2087 				uint32_t align,
2088 			    const char *func, uint32_t line)
2089 {
2090 	void *vaddr_aligned;
2091 	uint32_t align_alloc_size;
2092 
2093 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2094 			line);
2095 	if (!*vaddr_unaligned) {
2096 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2097 		return NULL;
2098 	}
2099 
2100 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2101 
2102 	/* Re-allocate additional bytes to align base address only if
2103 	 * above allocation returns unaligned address. Reason for
2104 	 * trying exact size allocation above is, OS tries to allocate
2105 	 * blocks of size power-of-2 pages and then free extra pages.
2106 	 * e.g., of a ring size of 1MB, the allocation below will
2107 	 * request 1MB plus 7 bytes for alignment, which will cause a
2108 	 * 2MB block allocation,and that is failing sometimes due to
2109 	 * memory fragmentation.
2110 	 */
2111 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2112 		align_alloc_size = *size + align - 1;
2113 
2114 		qdf_mem_free(*vaddr_unaligned);
2115 		*vaddr_unaligned = qdf_mem_malloc_fl(
2116 				(qdf_size_t)align_alloc_size, func, line);
2117 		if (!*vaddr_unaligned) {
2118 			qdf_warn("Failed to alloc %uB @ %s:%d",
2119 				 align_alloc_size, func, line);
2120 			return NULL;
2121 		}
2122 
2123 		*paddr_unaligned = qdf_mem_virt_to_phys(
2124 				*vaddr_unaligned);
2125 		*size = align_alloc_size;
2126 	}
2127 
2128 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2129 		((unsigned long)(*paddr_unaligned), align);
2130 
2131 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2132 			((unsigned long)(*paddr_aligned) -
2133 			 (unsigned long)(*paddr_unaligned)));
2134 
2135 	return vaddr_aligned;
2136 }
2137 
2138 qdf_export_symbol(qdf_aligned_malloc_fl);
2139 
2140 #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
2141 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2142 			       uint32_t elem_size, uint32_t elem_count,
2143 			       uint8_t cacheable, qdf_mem_release_cb cb,
2144 			       void *elem_list)
2145 {
2146 	uint16_t i, i_int;
2147 	void *page_info;
2148 	void *elem;
2149 	uint32_t num_elem = 0;
2150 
2151 	for (i = 0; i < pages->num_pages; i++) {
2152 		if (cacheable)
2153 			page_info = pages->cacheable_pages[i];
2154 		else
2155 			page_info = pages->dma_pages[i].page_v_addr_start;
2156 
2157 		if (!page_info)
2158 			return -ENOMEM;
2159 
2160 		elem = page_info;
2161 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2162 			cb(ctxt, elem, elem_list);
2163 			elem = ((char *)elem + elem_size);
2164 			num_elem++;
2165 
2166 			/* Number of desc pool elements reached */
2167 			if (num_elem == (elem_count - 1))
2168 				break;
2169 		}
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2176 #endif
2177 
2178 int qdf_mem_multi_page_link(qdf_device_t osdev,
2179 			    struct qdf_mem_multi_page_t *pages,
2180 			    uint32_t elem_size, uint32_t elem_count,
2181 			    uint8_t cacheable)
2182 {
2183 	uint16_t i, i_int;
2184 	void *page_info;
2185 	void **c_elem = NULL;
2186 	uint32_t num_link = 0;
2187 
2188 	for (i = 0; i < pages->num_pages; i++) {
2189 		if (cacheable)
2190 			page_info = pages->cacheable_pages[i];
2191 		else
2192 			page_info = pages->dma_pages[i].page_v_addr_start;
2193 
2194 		if (!page_info)
2195 			return -ENOMEM;
2196 
2197 		c_elem = (void **)page_info;
2198 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2199 			if (i_int == (pages->num_element_per_page - 1)) {
2200 				if ((i + 1) == pages->num_pages)
2201 					break;
2202 				if (cacheable)
2203 					*c_elem = pages->
2204 						cacheable_pages[i + 1];
2205 				else
2206 					*c_elem = pages->
2207 						dma_pages[i + 1].
2208 							page_v_addr_start;
2209 				num_link++;
2210 				break;
2211 			} else {
2212 				*c_elem =
2213 					(void *)(((char *)c_elem) + elem_size);
2214 			}
2215 			num_link++;
2216 			c_elem = (void **)*c_elem;
2217 
2218 			/* Last link established exit */
2219 			if (num_link == (elem_count - 1))
2220 				break;
2221 		}
2222 	}
2223 
2224 	if (c_elem)
2225 		*c_elem = NULL;
2226 
2227 	return 0;
2228 }
2229 qdf_export_symbol(qdf_mem_multi_page_link);
2230 
2231 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2232 {
2233 	/* special case where dst_addr or src_addr can be NULL */
2234 	if (!num_bytes)
2235 		return;
2236 
2237 	QDF_BUG(dst_addr);
2238 	QDF_BUG(src_addr);
2239 	if (!dst_addr || !src_addr)
2240 		return;
2241 
2242 	memcpy(dst_addr, src_addr, num_bytes);
2243 }
2244 qdf_export_symbol(qdf_mem_copy);
2245 
2246 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2247 {
2248 	qdf_shared_mem_t *shared_mem;
2249 	qdf_dma_addr_t dma_addr, paddr;
2250 	int ret;
2251 
2252 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2253 	if (!shared_mem)
2254 		return NULL;
2255 
2256 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2257 				size, qdf_mem_get_dma_addr_ptr(osdev,
2258 						&shared_mem->mem_info));
2259 	if (!shared_mem->vaddr) {
2260 		qdf_err("Unable to allocate DMA memory for shared resource");
2261 		qdf_mem_free(shared_mem);
2262 		return NULL;
2263 	}
2264 
2265 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2266 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2267 
2268 	qdf_mem_zero(shared_mem->vaddr, size);
2269 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2270 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2271 
2272 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2273 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2274 				      shared_mem->vaddr, dma_addr, size);
2275 	if (ret) {
2276 		qdf_err("Unable to get DMA sgtable");
2277 		qdf_mem_free_consistent(osdev, osdev->dev,
2278 					shared_mem->mem_info.size,
2279 					shared_mem->vaddr,
2280 					dma_addr,
2281 					qdf_get_dma_mem_context(shared_mem,
2282 								memctx));
2283 		qdf_mem_free(shared_mem);
2284 		return NULL;
2285 	}
2286 
2287 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2288 
2289 	return shared_mem;
2290 }
2291 
2292 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2293 
2294 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2295 {
2296 	if (0 == num_bytes) {
2297 		/* special case where dst_addr or src_addr can be NULL */
2298 		return;
2299 	}
2300 
2301 	if ((!dst_addr) || (!src_addr)) {
2302 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2303 			  "%s called with NULL parameter, source:%pK destination:%pK",
2304 			  __func__, src_addr, dst_addr);
2305 		QDF_ASSERT(0);
2306 		return;
2307 	}
2308 	memcpy_toio(dst_addr, src_addr, num_bytes);
2309 }
2310 
2311 qdf_export_symbol(qdf_mem_copy_toio);
2312 
2313 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2314 {
2315 	if (!ptr) {
2316 		qdf_print("%s called with NULL parameter ptr", __func__);
2317 		return;
2318 	}
2319 	memset_io(ptr, value, num_bytes);
2320 }
2321 
2322 qdf_export_symbol(qdf_mem_set_io);
2323 
2324 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2325 {
2326 	QDF_BUG(ptr);
2327 	if (!ptr)
2328 		return;
2329 
2330 	memset(ptr, value, num_bytes);
2331 }
2332 qdf_export_symbol(qdf_mem_set);
2333 
2334 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2335 {
2336 	/* special case where dst_addr or src_addr can be NULL */
2337 	if (!num_bytes)
2338 		return;
2339 
2340 	QDF_BUG(dst_addr);
2341 	QDF_BUG(src_addr);
2342 	if (!dst_addr || !src_addr)
2343 		return;
2344 
2345 	memmove(dst_addr, src_addr, num_bytes);
2346 }
2347 qdf_export_symbol(qdf_mem_move);
2348 
2349 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2350 {
2351 	QDF_BUG(left);
2352 	QDF_BUG(right);
2353 
2354 	return memcmp(left, right, size);
2355 }
2356 qdf_export_symbol(qdf_mem_cmp);
2357 
2358 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2359 /**
2360  * qdf_mem_dma_alloc() - allocates memory for dma
2361  * @osdev: OS device handle
2362  * @dev: Pointer to device handle
2363  * @size: Size to be allocated
2364  * @phy_addr: Physical address
2365  *
2366  * Return: pointer of allocated memory or null if memory alloc fails
2367  */
2368 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2369 				      qdf_size_t size,
2370 				      qdf_dma_addr_t *phy_addr)
2371 {
2372 	void *vaddr;
2373 
2374 	vaddr = qdf_mem_malloc(size);
2375 	*phy_addr = ((uintptr_t) vaddr);
2376 	/* using this type conversion to suppress "cast from pointer to integer
2377 	 * of different size" warning on some platforms
2378 	 */
2379 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2380 	return vaddr;
2381 }
2382 
2383 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2384 	!defined(QCA_WIFI_QCN9000)
2385 
2386 #define QCA8074_RAM_BASE 0x50000000
2387 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2388 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2389 			qdf_dma_addr_t *phy_addr)
2390 {
2391 	void *vaddr = NULL;
2392 	int i;
2393 
2394 	*phy_addr = 0;
2395 
2396 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2397 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2398 					   qdf_mem_malloc_flags());
2399 
2400 		if (!vaddr) {
2401 			qdf_err("%s failed , size: %zu!", __func__, size);
2402 			return NULL;
2403 		}
2404 
2405 		if (*phy_addr >= QCA8074_RAM_BASE)
2406 			return vaddr;
2407 
2408 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2409 	}
2410 
2411 	return NULL;
2412 }
2413 #elif defined(QCA_DMA_PADDR_CHECK)
2414 #ifdef CONFIG_LEAK_DETECTION
2415 #define MAX_DEBUG_DOMAIN_COUNT QDF_DEBUG_DOMAIN_COUNT
2416 #define debug_domain_get() qdf_debug_domain_get()
2417 #else
2418 #define MAX_DEBUG_DOMAIN_COUNT 1
2419 #define debug_domain_get() DEFAULT_DEBUG_DOMAIN_INIT
2420 #endif
2421 /**
2422  * struct qdf_dma_buf_entry - DMA invalid buffer list entry
2423  * @node: QDF list node member
2424  * @size: DMA buffer size
2425  * @phy_addr: DMA buffer physical address
2426  * @vaddr: DMA buffer virtual address. if DMA buffer size is larger than entry
2427  *         size, we use the DMA buffer to save entry info and the starting
2428  *         address of the entry is the DMA buffer vaddr, in this way, we can
2429  *         reduce unnecessary memory consumption. if DMA buffer size is smaller
2430  *         than entry size, we need alloc another buffer, and vaddr will be set
2431  *         to the invalid dma buffer virtual address.
2432  */
2433 struct qdf_dma_buf_entry {
2434 	qdf_list_node_t node;
2435 	qdf_size_t size;
2436 	qdf_dma_addr_t phy_addr;
2437 	void *vaddr;
2438 };
2439 
2440 #define DMA_PHY_ADDR_RESERVED 0x2000
2441 #define QDF_DMA_MEM_ALLOC_MAX_RETRIES 10
2442 #define QDF_DMA_INVALID_BUF_LIST_SIZE 128
2443 static qdf_list_t qdf_invalid_buf_list[MAX_DEBUG_DOMAIN_COUNT];
2444 static bool qdf_invalid_buf_list_init[MAX_DEBUG_DOMAIN_COUNT];
2445 static qdf_spinlock_t qdf_invalid_buf_list_lock;
2446 
2447 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2448 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2449 {
2450 	void *vaddr;
2451 	uint32_t retry;
2452 	QDF_STATUS status;
2453 	bool is_separate;
2454 	qdf_list_t *cur_buf_list;
2455 	struct qdf_dma_buf_entry *entry;
2456 	uint8_t current_domain;
2457 
2458 	for (retry = 0; retry < QDF_DMA_MEM_ALLOC_MAX_RETRIES; retry++) {
2459 		vaddr = dma_alloc_coherent(dev, size, paddr,
2460 					   qdf_mem_malloc_flags());
2461 		if (!vaddr)
2462 			return NULL;
2463 
2464 		if (qdf_likely(*paddr > DMA_PHY_ADDR_RESERVED))
2465 			return vaddr;
2466 
2467 		current_domain = debug_domain_get();
2468 
2469 		/* if qdf_invalid_buf_list not init, so we can't store memory
2470 		 * info and can't hold it. let's free the invalid memory and
2471 		 * try to get memory with phy address greater than
2472 		 * DMA_PHY_ADDR_RESERVED
2473 		 */
2474 		if (current_domain >= MAX_DEBUG_DOMAIN_COUNT ||
2475 		    !qdf_invalid_buf_list_init[current_domain]) {
2476 			qdf_debug("physical address below 0x%x, re-alloc",
2477 				  DMA_PHY_ADDR_RESERVED);
2478 			dma_free_coherent(dev, size, vaddr, *paddr);
2479 			continue;
2480 		}
2481 
2482 		cur_buf_list = &qdf_invalid_buf_list[current_domain];
2483 		if (size >= sizeof(*entry)) {
2484 			entry = vaddr;
2485 			entry->vaddr = NULL;
2486 		} else {
2487 			entry = qdf_mem_malloc(sizeof(*entry));
2488 			if (!entry) {
2489 				dma_free_coherent(dev, size, vaddr, *paddr);
2490 				qdf_err("qdf_mem_malloc entry failed!");
2491 				continue;
2492 			}
2493 			entry->vaddr = vaddr;
2494 		}
2495 
2496 		entry->phy_addr = *paddr;
2497 		entry->size = size;
2498 		qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
2499 		status = qdf_list_insert_back(cur_buf_list,
2500 					      &entry->node);
2501 		qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
2502 		if (QDF_IS_STATUS_ERROR(status)) {
2503 			qdf_err("insert buf entry fail, status %d", status);
2504 			is_separate = !entry->vaddr ? false : true;
2505 			dma_free_coherent(dev, size, vaddr, *paddr);
2506 			if (is_separate)
2507 				qdf_mem_free(entry);
2508 		}
2509 	}
2510 
2511 	return NULL;
2512 }
2513 #else
2514 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2515 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2516 {
2517 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2518 }
2519 #endif
2520 
2521 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2522 static inline void
2523 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2524 {
2525 	qdf_mem_free(vaddr);
2526 }
2527 #else
2528 
2529 static inline void
2530 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2531 {
2532 	dma_free_coherent(dev, size, vaddr, paddr);
2533 }
2534 #endif
2535 
2536 #ifdef MEMORY_DEBUG
2537 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2538 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2539 				     const char *func, uint32_t line,
2540 				     void *caller)
2541 {
2542 	QDF_STATUS status;
2543 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2544 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2545 	struct qdf_mem_header *header;
2546 	void *vaddr;
2547 
2548 	if (is_initial_mem_debug_disabled)
2549 		return __qdf_mem_alloc_consistent(osdev, dev,
2550 						  size, paddr,
2551 						  func, line);
2552 
2553 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2554 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2555 		return NULL;
2556 	}
2557 
2558 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2559 				   paddr);
2560 
2561 	if (!vaddr) {
2562 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2563 		return NULL;
2564 	}
2565 
2566 	header = qdf_mem_dma_get_header(vaddr, size);
2567 	/* For DMA buffers we only add trailers, this function will init
2568 	 * the header structure at the tail
2569 	 * Prefix the header into DMA buffer causes SMMU faults, so
2570 	 * do not prefix header into the DMA buffers
2571 	 */
2572 	qdf_mem_header_init(header, size, func, line, caller);
2573 
2574 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2575 	status = qdf_list_insert_front(mem_list, &header->node);
2576 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2577 	if (QDF_IS_STATUS_ERROR(status))
2578 		qdf_err("Failed to insert memory header; status %d", status);
2579 
2580 	qdf_mem_dma_inc(size);
2581 
2582 	return vaddr;
2583 }
2584 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2585 
2586 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2587 				   qdf_size_t size, void *vaddr,
2588 				   qdf_dma_addr_t paddr,
2589 				   qdf_dma_context_t memctx,
2590 				   const char *func, uint32_t line)
2591 {
2592 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2593 	struct qdf_mem_header *header;
2594 	enum qdf_mem_validation_bitmap error_bitmap;
2595 
2596 	if (is_initial_mem_debug_disabled) {
2597 		__qdf_mem_free_consistent(
2598 					  osdev, dev,
2599 					  size, vaddr,
2600 					  paddr, memctx);
2601 		return;
2602 	}
2603 
2604 	/* freeing a null pointer is valid */
2605 	if (qdf_unlikely(!vaddr))
2606 		return;
2607 
2608 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2609 
2610 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2611 	/* For DMA buffers we only add trailers, this function will retrieve
2612 	 * the header structure at the tail
2613 	 * Prefix the header into DMA buffer causes SMMU faults, so
2614 	 * do not prefix header into the DMA buffers
2615 	 */
2616 	header = qdf_mem_dma_get_header(vaddr, size);
2617 	error_bitmap = qdf_mem_header_validate(header, domain);
2618 	if (!error_bitmap) {
2619 		header->freed = true;
2620 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2621 				     &header->node);
2622 	}
2623 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2624 
2625 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2626 
2627 	qdf_mem_dma_dec(header->size);
2628 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2629 }
2630 qdf_export_symbol(qdf_mem_free_consistent_debug);
2631 #endif /* MEMORY_DEBUG */
2632 
2633 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2634 			       qdf_size_t size, void *vaddr,
2635 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2636 {
2637 	qdf_mem_dma_dec(size);
2638 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2639 }
2640 
2641 qdf_export_symbol(__qdf_mem_free_consistent);
2642 
2643 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2644 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2645 				 const char *func, uint32_t line)
2646 {
2647 	void *vaddr;
2648 
2649 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2650 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2651 			     size, func, line);
2652 		return NULL;
2653 	}
2654 
2655 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2656 
2657 	if (vaddr)
2658 		qdf_mem_dma_inc(size);
2659 
2660 	return vaddr;
2661 }
2662 
2663 qdf_export_symbol(__qdf_mem_alloc_consistent);
2664 
2665 void *qdf_aligned_mem_alloc_consistent_fl(
2666 	qdf_device_t osdev, uint32_t *size,
2667 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2668 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2669 	const char *func, uint32_t line)
2670 {
2671 	void *vaddr_aligned;
2672 	uint32_t align_alloc_size;
2673 
2674 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2675 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2676 	if (!*vaddr_unaligned) {
2677 		qdf_warn("Failed to alloc %uB @ %s:%d",
2678 			 *size, func, line);
2679 		return NULL;
2680 	}
2681 
2682 	/* Re-allocate additional bytes to align base address only if
2683 	 * above allocation returns unaligned address. Reason for
2684 	 * trying exact size allocation above is, OS tries to allocate
2685 	 * blocks of size power-of-2 pages and then free extra pages.
2686 	 * e.g., of a ring size of 1MB, the allocation below will
2687 	 * request 1MB plus 7 bytes for alignment, which will cause a
2688 	 * 2MB block allocation,and that is failing sometimes due to
2689 	 * memory fragmentation.
2690 	 */
2691 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2692 		align_alloc_size = *size + align - 1;
2693 
2694 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2695 					*vaddr_unaligned,
2696 					*paddr_unaligned, 0);
2697 
2698 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2699 				osdev, osdev->dev, align_alloc_size,
2700 				paddr_unaligned);
2701 		if (!*vaddr_unaligned) {
2702 			qdf_warn("Failed to alloc %uB @ %s:%d",
2703 				 align_alloc_size, func, line);
2704 			return NULL;
2705 		}
2706 
2707 		*size = align_alloc_size;
2708 	}
2709 
2710 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2711 			(unsigned long)(*paddr_unaligned), align);
2712 
2713 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2714 				 ((unsigned long)(*paddr_aligned) -
2715 				  (unsigned long)(*paddr_unaligned)));
2716 
2717 	return vaddr_aligned;
2718 }
2719 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2720 
2721 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2722 					qdf_dma_addr_t bus_addr,
2723 					qdf_size_t size,
2724 					enum dma_data_direction direction)
2725 {
2726 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2727 }
2728 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2729 
2730 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2731 				     qdf_dma_addr_t bus_addr,
2732 				     qdf_size_t size,
2733 				     enum dma_data_direction direction)
2734 {
2735 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2736 }
2737 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2738 
2739 void qdf_mem_init(void)
2740 {
2741 	qdf_mem_debug_init();
2742 	qdf_net_buf_debug_init();
2743 	qdf_frag_debug_init();
2744 	qdf_mem_debugfs_init();
2745 	qdf_mem_debug_debugfs_init();
2746 }
2747 qdf_export_symbol(qdf_mem_init);
2748 
2749 void qdf_mem_exit(void)
2750 {
2751 	qdf_mem_debug_debugfs_exit();
2752 	qdf_mem_debugfs_exit();
2753 	qdf_frag_debug_exit();
2754 	qdf_net_buf_debug_exit();
2755 	qdf_mem_debug_exit();
2756 }
2757 qdf_export_symbol(qdf_mem_exit);
2758 
2759 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2760 {
2761 	if ((!dst_addr) || (!src_addr)) {
2762 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2763 			  "%s called with NULL parameter, source:%pK destination:%pK",
2764 			  __func__, src_addr, dst_addr);
2765 		QDF_ASSERT(0);
2766 		return;
2767 	}
2768 	ether_addr_copy(dst_addr, src_addr);
2769 }
2770 qdf_export_symbol(qdf_ether_addr_copy);
2771 
2772 int32_t qdf_dma_mem_stats_read(void)
2773 {
2774 	return qdf_atomic_read(&qdf_mem_stat.dma);
2775 }
2776 
2777 qdf_export_symbol(qdf_dma_mem_stats_read);
2778 
2779 int32_t qdf_heap_mem_stats_read(void)
2780 {
2781 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2782 }
2783 
2784 qdf_export_symbol(qdf_heap_mem_stats_read);
2785 
2786 int32_t qdf_skb_mem_stats_read(void)
2787 {
2788 	return qdf_atomic_read(&qdf_mem_stat.skb);
2789 }
2790 
2791 qdf_export_symbol(qdf_skb_mem_stats_read);
2792 
2793 int32_t qdf_skb_total_mem_stats_read(void)
2794 {
2795 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2796 }
2797 
2798 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2799 
2800 int32_t qdf_skb_max_mem_stats_read(void)
2801 {
2802 	return qdf_mem_stat.skb_mem_max;
2803 }
2804 
2805 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2806 
2807 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2808 {
2809 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2810 }
2811 
2812 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2813 
2814 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2815 {
2816 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2817 }
2818 
2819 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2820 
2821 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2822 {
2823 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2824 }
2825 
2826 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2827 
2828 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2829 {
2830 	return qdf_mem_stat.dp_tx_skb_count_max;
2831 }
2832 
2833 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2834 
2835 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2836 {
2837 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2838 }
2839 
2840 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2841 
2842 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2843 {
2844 	return qdf_mem_stat.dp_rx_skb_count_max;
2845 }
2846 
2847 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2848 
2849 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2850 {
2851 	return qdf_mem_stat.dp_tx_skb_mem_max;
2852 }
2853 
2854 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2855 
2856 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2857 {
2858 	return qdf_mem_stat.dp_rx_skb_mem_max;
2859 }
2860 
2861 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2862 
2863 int32_t qdf_mem_tx_desc_cnt_read(void)
2864 {
2865 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2866 }
2867 
2868 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2869 
2870 int32_t qdf_mem_tx_desc_max_read(void)
2871 {
2872 	return qdf_mem_stat.tx_descs_max;
2873 }
2874 
2875 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2876 
2877 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2878 				int32_t tx_descs_max)
2879 {
2880 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2881 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2882 }
2883 
2884 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2885 
2886 void qdf_mem_stats_init(void)
2887 {
2888 	qdf_mem_stat.skb_mem_max = 0;
2889 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
2890 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
2891 	qdf_mem_stat.dp_tx_skb_count_max = 0;
2892 	qdf_mem_stat.dp_rx_skb_count_max = 0;
2893 	qdf_mem_stat.tx_descs_max = 0;
2894 }
2895 
2896 qdf_export_symbol(qdf_mem_stats_init);
2897 
2898 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
2899 {
2900 	void *ptr;
2901 
2902 	if (!size) {
2903 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
2904 		return NULL;
2905 	}
2906 
2907 	ptr = vzalloc(size);
2908 
2909 	return ptr;
2910 }
2911 
2912 qdf_export_symbol(__qdf_mem_valloc);
2913 
2914 void __qdf_mem_vfree(void *ptr)
2915 {
2916 	if (qdf_unlikely(!ptr))
2917 		return;
2918 
2919 	vfree(ptr);
2920 }
2921 
2922 qdf_export_symbol(__qdf_mem_vfree);
2923 
2924 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
2925 int
2926 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
2927 			  enum qdf_iommu_attr attr, void *data)
2928 {
2929 	return __qdf_iommu_domain_get_attr(domain, attr, data);
2930 }
2931 
2932 qdf_export_symbol(qdf_iommu_domain_get_attr);
2933 #endif
2934 
2935 #ifdef ENHANCED_OS_ABSTRACTION
2936 void qdf_update_mem_map_table(qdf_device_t osdev,
2937 			      qdf_mem_info_t *mem_info,
2938 			      qdf_dma_addr_t dma_addr,
2939 			      uint32_t mem_size)
2940 {
2941 	if (!mem_info) {
2942 		qdf_nofl_err("%s: NULL mem_info", __func__);
2943 		return;
2944 	}
2945 
2946 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
2947 }
2948 
2949 qdf_export_symbol(qdf_update_mem_map_table);
2950 
2951 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
2952 					  qdf_dma_addr_t dma_addr)
2953 {
2954 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2955 }
2956 
2957 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
2958 #endif
2959 
2960 #ifdef QCA_KMEM_CACHE_SUPPORT
2961 qdf_kmem_cache_t
2962 __qdf_kmem_cache_create(const char *cache_name,
2963 			qdf_size_t size)
2964 {
2965 	struct kmem_cache *cache;
2966 
2967 	cache = kmem_cache_create(cache_name, size,
2968 				  0, 0, NULL);
2969 
2970 	if (!cache)
2971 		return NULL;
2972 
2973 	return cache;
2974 }
2975 qdf_export_symbol(__qdf_kmem_cache_create);
2976 
2977 void
2978 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
2979 {
2980 	kmem_cache_destroy(cache);
2981 }
2982 
2983 qdf_export_symbol(__qdf_kmem_cache_destroy);
2984 
2985 void*
2986 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
2987 {
2988 	int flags = GFP_KERNEL;
2989 
2990 	if (in_interrupt() || irqs_disabled() || in_atomic())
2991 		flags = GFP_ATOMIC;
2992 
2993 	return kmem_cache_alloc(cache, flags);
2994 }
2995 
2996 qdf_export_symbol(__qdf_kmem_cache_alloc);
2997 
2998 void
2999 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3000 
3001 {
3002 	kmem_cache_free(cache, node);
3003 }
3004 
3005 qdf_export_symbol(__qdf_kmem_cache_free);
3006 #else
3007 qdf_kmem_cache_t
3008 __qdf_kmem_cache_create(const char *cache_name,
3009 			qdf_size_t size)
3010 {
3011 	return NULL;
3012 }
3013 
3014 void
3015 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3016 {
3017 }
3018 
3019 void *
3020 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3021 {
3022 	return NULL;
3023 }
3024 
3025 void
3026 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3027 {
3028 }
3029 #endif
3030 
3031 #ifdef QCA_DMA_PADDR_CHECK
3032 void qdf_dma_invalid_buf_list_init(void)
3033 {
3034 	int i;
3035 
3036 	for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++) {
3037 		qdf_list_create(&qdf_invalid_buf_list[i],
3038 				QDF_DMA_INVALID_BUF_LIST_SIZE);
3039 		qdf_invalid_buf_list_init[i] = true;
3040 	}
3041 	qdf_spinlock_create(&qdf_invalid_buf_list_lock);
3042 }
3043 
3044 void qdf_dma_invalid_buf_free(void *dev, uint8_t domain)
3045 {
3046 	bool is_separate;
3047 	qdf_list_t *cur_buf_list;
3048 	struct qdf_dma_buf_entry *entry;
3049 	QDF_STATUS status = QDF_STATUS_E_EMPTY;
3050 
3051 	if (!dev)
3052 		return;
3053 
3054 	if (domain >= MAX_DEBUG_DOMAIN_COUNT)
3055 		return;
3056 
3057 	if (!qdf_invalid_buf_list_init[domain])
3058 		return;
3059 
3060 	cur_buf_list = &qdf_invalid_buf_list[domain];
3061 	do {
3062 		qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
3063 		status = qdf_list_remove_front(cur_buf_list,
3064 					       (qdf_list_node_t **)&entry);
3065 		qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
3066 
3067 		if (status != QDF_STATUS_SUCCESS)
3068 			break;
3069 
3070 		is_separate = !entry->vaddr ? false : true;
3071 		if (is_separate) {
3072 			dma_free_coherent(dev, entry->size, entry->vaddr,
3073 					  entry->phy_addr);
3074 			qdf_mem_free(entry);
3075 		} else
3076 			dma_free_coherent(dev, entry->size, entry,
3077 					  entry->phy_addr);
3078 	} while (!qdf_list_empty(cur_buf_list));
3079 	qdf_invalid_buf_list_init[domain] = false;
3080 }
3081 
3082 void qdf_dma_invalid_buf_list_deinit(void)
3083 {
3084 	int i;
3085 
3086 	for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++)
3087 		qdf_list_destroy(&qdf_invalid_buf_list[i]);
3088 
3089 	qdf_spinlock_destroy(&qdf_invalid_buf_list_lock);
3090 }
3091 #endif /* QCA_DMA_PADDR_CHECK */
3092