xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision ed7ed761f307f964abd13da4df8dcb908086bd83)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * This file provides OS dependent memory management APIs
23  */
24 
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46 
47 /* cnss prealloc maintains various prealloc pools of 8Kb, 16Kb, 32Kb and so
48  * on and allocates buffer from the pool for wlan driver. When wlan driver
49  * requests to free the memory buffer then cnss prealloc derives slab_cache
50  * from virtual memory via page struct to identify prealloc pool id to put
51  * back memory buffer into the pool. Kernel 5.17 removed slab_cache from page
52  * struct. So add headroom to store cache pointer at the beginning of
53  * allocated memory buffer to use it later in identifying prealloc pool id.
54  */
55 #if defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE)
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
57 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
58 {
59 	return true;
60 }
61 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
62 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
63 {
64 	return false;
65 }
66 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
67 #else /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
68 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
69 {
70 	return false;
71 }
72 #endif /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
73 
74 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
75 static bool mem_debug_disabled;
76 qdf_declare_param(mem_debug_disabled, bool);
77 #endif
78 
79 #ifdef MEMORY_DEBUG
80 static bool is_initial_mem_debug_disabled;
81 #endif
82 
83 /* Preprocessor Definitions and Constants */
84 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
85 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
86 #define QDF_DEBUG_STRING_SIZE 512
87 
88 /**
89  * struct __qdf_mem_stat - qdf memory statistics
90  * @kmalloc: total kmalloc allocations
91  * @dma: total dma allocations
92  * @skb: total skb allocations
93  * @skb_total: total skb allocations in host driver
94  * @dp_tx_skb: total Tx skb allocations in datapath
95  * @dp_rx_skb: total Rx skb allocations in datapath
96  * @skb_mem_max: high watermark for skb allocations
97  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
98  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
99  * @dp_tx_skb_count: DP Tx buffer count
100  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
101  * @dp_rx_skb_count: DP Rx buffer count
102  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
103  * @tx_descs_outstanding: Current pending Tx descs count
104  * @tx_descs_max: High watermark for pending Tx descs count
105  */
106 static struct __qdf_mem_stat {
107 	qdf_atomic_t kmalloc;
108 	qdf_atomic_t dma;
109 	qdf_atomic_t skb;
110 	qdf_atomic_t skb_total;
111 	qdf_atomic_t dp_tx_skb;
112 	qdf_atomic_t dp_rx_skb;
113 	int32_t skb_mem_max;
114 	int32_t dp_tx_skb_mem_max;
115 	int32_t dp_rx_skb_mem_max;
116 	qdf_atomic_t dp_tx_skb_count;
117 	int32_t dp_tx_skb_count_max;
118 	qdf_atomic_t dp_rx_skb_count;
119 	int32_t dp_rx_skb_count_max;
120 	qdf_atomic_t tx_descs_outstanding;
121 	int32_t tx_descs_max;
122 } qdf_mem_stat;
123 
124 #ifdef MEMORY_DEBUG
125 #include "qdf_debug_domain.h"
126 
127 enum list_type {
128 	LIST_TYPE_MEM = 0,
129 	LIST_TYPE_DMA = 1,
130 	LIST_TYPE_NBUF = 2,
131 	LIST_TYPE_MAX,
132 };
133 
134 /**
135  * struct major_alloc_priv - private data registered to debugfs entry
136  *                           created to list the list major allocations
137  * @type:            type of the list to be parsed
138  * @threshold:       configured by user by overwriting the respective debugfs
139  *                   sys entry. This is to list the functions which requested
140  *                   memory/dma allocations more than threshold number of times.
141  */
142 struct major_alloc_priv {
143 	enum list_type type;
144 	uint32_t threshold;
145 };
146 
147 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
148 static qdf_spinlock_t qdf_mem_list_lock;
149 
150 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
151 static qdf_spinlock_t qdf_mem_dma_list_lock;
152 
153 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
154 {
155 	return &qdf_mem_domains[domain];
156 }
157 
158 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
159 {
160 	return &qdf_mem_dma_domains[domain];
161 }
162 
163 /**
164  * struct qdf_mem_header - memory object to dubug
165  * @node: node to the list
166  * @domain: the active memory domain at time of allocation
167  * @freed: flag set during free, used to detect double frees
168  *	Use uint8_t so we can detect corruption
169  * @func: name of the function the allocation was made from
170  * @line: line number of the file the allocation was made from
171  * @size: size of the allocation in bytes
172  * @caller: Caller of the function for which memory is allocated
173  * @header: a known value, used to detect out-of-bounds access
174  * @time: timestamp at which allocation was made
175  */
176 struct qdf_mem_header {
177 	qdf_list_node_t node;
178 	enum qdf_debug_domain domain;
179 	uint8_t freed;
180 	char func[QDF_MEM_FUNC_NAME_SIZE];
181 	uint32_t line;
182 	uint32_t size;
183 	void *caller;
184 	uint64_t header;
185 	uint64_t time;
186 };
187 
188 /* align the qdf_mem_header to 8 bytes */
189 #define QDF_DMA_MEM_HEADER_ALIGN 8
190 
191 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
192 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
193 
194 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
195 {
196 	return (struct qdf_mem_header *)ptr - 1;
197 }
198 
199 /* make sure the header pointer is 8bytes aligned */
200 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
201 							    qdf_size_t size)
202 {
203 	return (struct qdf_mem_header *)
204 				qdf_roundup((size_t)((uint8_t *)ptr + size),
205 					    QDF_DMA_MEM_HEADER_ALIGN);
206 }
207 
208 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
209 {
210 	return (uint64_t *)((void *)(header + 1) + header->size);
211 }
212 
213 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
214 {
215 	return (void *)(header + 1);
216 }
217 
218 /* number of bytes needed for the qdf memory debug information */
219 #define QDF_MEM_DEBUG_SIZE \
220 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
221 
222 /* number of bytes needed for the qdf dma memory debug information */
223 #define QDF_DMA_MEM_DEBUG_SIZE \
224 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
225 
226 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
227 {
228 	QDF_BUG(header);
229 	if (!header)
230 		return;
231 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
232 }
233 
234 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
235 				const char *func, uint32_t line, void *caller)
236 {
237 	QDF_BUG(header);
238 	if (!header)
239 		return;
240 
241 	header->domain = qdf_debug_domain_get();
242 	header->freed = false;
243 
244 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
245 
246 	header->line = line;
247 	header->size = size;
248 	header->caller = caller;
249 	header->header = WLAN_MEM_HEADER;
250 	header->time = qdf_get_log_timestamp();
251 }
252 
253 enum qdf_mem_validation_bitmap {
254 	QDF_MEM_BAD_HEADER = 1 << 0,
255 	QDF_MEM_BAD_TRAILER = 1 << 1,
256 	QDF_MEM_BAD_SIZE = 1 << 2,
257 	QDF_MEM_DOUBLE_FREE = 1 << 3,
258 	QDF_MEM_BAD_FREED = 1 << 4,
259 	QDF_MEM_BAD_NODE = 1 << 5,
260 	QDF_MEM_BAD_DOMAIN = 1 << 6,
261 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
262 };
263 
264 static enum qdf_mem_validation_bitmap
265 qdf_mem_trailer_validate(struct qdf_mem_header *header)
266 {
267 	enum qdf_mem_validation_bitmap error_bitmap = 0;
268 
269 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
270 		error_bitmap |= QDF_MEM_BAD_TRAILER;
271 	return error_bitmap;
272 }
273 
274 static enum qdf_mem_validation_bitmap
275 qdf_mem_header_validate(struct qdf_mem_header *header,
276 			enum qdf_debug_domain domain)
277 {
278 	enum qdf_mem_validation_bitmap error_bitmap = 0;
279 
280 	if (header->header != WLAN_MEM_HEADER)
281 		error_bitmap |= QDF_MEM_BAD_HEADER;
282 
283 	if (header->size > QDF_MEM_MAX_MALLOC)
284 		error_bitmap |= QDF_MEM_BAD_SIZE;
285 
286 	if (header->freed == true)
287 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
288 	else if (header->freed)
289 		error_bitmap |= QDF_MEM_BAD_FREED;
290 
291 	if (!qdf_list_node_in_any_list(&header->node))
292 		error_bitmap |= QDF_MEM_BAD_NODE;
293 
294 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
295 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
296 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
297 	else if (header->domain != domain)
298 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
299 
300 	return error_bitmap;
301 }
302 
303 static void
304 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
305 			    enum qdf_debug_domain current_domain,
306 			    enum qdf_mem_validation_bitmap error_bitmap,
307 			    const char *func,
308 			    uint32_t line)
309 {
310 	if (!error_bitmap)
311 		return;
312 
313 	if (error_bitmap & QDF_MEM_BAD_HEADER)
314 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
315 			header->header, WLAN_MEM_HEADER);
316 
317 	if (error_bitmap & QDF_MEM_BAD_SIZE)
318 		qdf_err("Corrupted memory size %u (expected < %d)",
319 			header->size, QDF_MEM_MAX_MALLOC);
320 
321 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
322 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
323 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
324 
325 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
326 		qdf_err("Memory has previously been freed");
327 
328 	if (error_bitmap & QDF_MEM_BAD_FREED)
329 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
330 
331 	if (error_bitmap & QDF_MEM_BAD_NODE)
332 		qdf_err("Corrupted memory header node or double free");
333 
334 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
335 		qdf_err("Corrupted memory domain 0x%x", header->domain);
336 
337 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
338 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
339 			qdf_debug_domain_name(header->domain), header->domain,
340 			qdf_debug_domain_name(current_domain), current_domain);
341 
342 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
343 }
344 
345 /**
346  * struct __qdf_mem_info - memory statistics
347  * @func: the function which allocated memory
348  * @line: the line at which allocation happened
349  * @size: the size of allocation
350  * @caller: Address of the caller function
351  * @count: how many allocations of same type
352  * @time: timestamp at which allocation happened
353  */
354 struct __qdf_mem_info {
355 	char func[QDF_MEM_FUNC_NAME_SIZE];
356 	uint32_t line;
357 	uint32_t size;
358 	void *caller;
359 	uint32_t count;
360 	uint64_t time;
361 };
362 
363 /*
364  * The table depth defines the de-duplication proximity scope.
365  * A deeper table takes more time, so choose any optimum value.
366  */
367 #define QDF_MEM_STAT_TABLE_SIZE 8
368 
369 /**
370  * qdf_mem_debug_print_header() - memory debug header print logic
371  * @print: the print adapter function
372  * @print_priv: the private data to be consumed by @print
373  * @threshold: the threshold value set by user to list top allocations
374  *
375  * Return: None
376  */
377 static void qdf_mem_debug_print_header(qdf_abstract_print print,
378 				       void *print_priv,
379 				       uint32_t threshold)
380 {
381 	if (threshold)
382 		print(print_priv, "APIs requested allocations >= %u no of time",
383 		      threshold);
384 	print(print_priv,
385 	      "--------------------------------------------------------------");
386 	print(print_priv,
387 	      " count    size     total    filename     caller    timestamp");
388 	print(print_priv,
389 	      "--------------------------------------------------------------");
390 }
391 
392 /**
393  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
394  * @table: the memory metadata table to insert into
395  * @meta: the memory metadata to insert
396  *
397  * Return: true if the table is full after inserting, false otherwise
398  */
399 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
400 				      struct qdf_mem_header *meta)
401 {
402 	int i;
403 
404 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
405 		if (!table[i].count) {
406 			qdf_str_lcopy(table[i].func, meta->func,
407 				      QDF_MEM_FUNC_NAME_SIZE);
408 			table[i].line = meta->line;
409 			table[i].size = meta->size;
410 			table[i].count = 1;
411 			table[i].caller = meta->caller;
412 			table[i].time = meta->time;
413 			break;
414 		}
415 
416 		if (qdf_str_eq(table[i].func, meta->func) &&
417 		    table[i].line == meta->line &&
418 		    table[i].size == meta->size &&
419 		    table[i].caller == meta->caller) {
420 			table[i].count++;
421 			break;
422 		}
423 	}
424 
425 	/* return true if the table is now full */
426 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
427 }
428 
429 /**
430  * qdf_mem_domain_print() - output agnostic memory domain print logic
431  * @domain: the memory domain to print
432  * @print: the print adapter function
433  * @print_priv: the private data to be consumed by @print
434  * @threshold: the threshold value set by uset to list top allocations
435  * @mem_print: pointer to function which prints the memory allocation data
436  *
437  * Return: None
438  */
439 static void qdf_mem_domain_print(qdf_list_t *domain,
440 				 qdf_abstract_print print,
441 				 void *print_priv,
442 				 uint32_t threshold,
443 				 void (*mem_print)(struct __qdf_mem_info *,
444 						   qdf_abstract_print,
445 						   void *, uint32_t))
446 {
447 	QDF_STATUS status;
448 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
449 	qdf_list_node_t *node;
450 
451 	qdf_mem_zero(table, sizeof(table));
452 	qdf_mem_debug_print_header(print, print_priv, threshold);
453 
454 	/* hold lock while inserting to avoid use-after free of the metadata */
455 	qdf_spin_lock(&qdf_mem_list_lock);
456 	status = qdf_list_peek_front(domain, &node);
457 	while (QDF_IS_STATUS_SUCCESS(status)) {
458 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
459 		bool is_full = qdf_mem_meta_table_insert(table, meta);
460 
461 		qdf_spin_unlock(&qdf_mem_list_lock);
462 
463 		if (is_full) {
464 			(*mem_print)(table, print, print_priv, threshold);
465 			qdf_mem_zero(table, sizeof(table));
466 		}
467 
468 		qdf_spin_lock(&qdf_mem_list_lock);
469 		status = qdf_list_peek_next(domain, node, &node);
470 	}
471 	qdf_spin_unlock(&qdf_mem_list_lock);
472 
473 	(*mem_print)(table, print, print_priv, threshold);
474 }
475 
476 /**
477  * qdf_mem_meta_table_print() - memory metadata table print logic
478  * @table: the memory metadata table to print
479  * @print: the print adapter function
480  * @print_priv: the private data to be consumed by @print
481  * @threshold: the threshold value set by user to list top allocations
482  *
483  * Return: None
484  */
485 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
486 				     qdf_abstract_print print,
487 				     void *print_priv,
488 				     uint32_t threshold)
489 {
490 	int i;
491 	char debug_str[QDF_DEBUG_STRING_SIZE];
492 	size_t len = 0;
493 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
494 
495 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
496 			     "%s", debug_prefix);
497 
498 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
499 		if (!table[i].count)
500 			break;
501 
502 		print(print_priv,
503 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
504 		      table[i].count,
505 		      table[i].size,
506 		      table[i].count * table[i].size,
507 		      table[i].func,
508 		      table[i].line, table[i].caller,
509 		      table[i].time);
510 		len += qdf_scnprintf(debug_str + len,
511 				     sizeof(debug_str) - len,
512 				     " @ %s:%u %pS",
513 				     table[i].func,
514 				     table[i].line,
515 				     table[i].caller);
516 	}
517 	print(print_priv, "%s", debug_str);
518 }
519 
520 static int qdf_err_printer(void *priv, const char *fmt, ...)
521 {
522 	va_list args;
523 
524 	va_start(args, fmt);
525 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
526 	va_end(args);
527 
528 	return 0;
529 }
530 
531 #endif /* MEMORY_DEBUG */
532 
533 bool prealloc_disabled = 1;
534 qdf_declare_param(prealloc_disabled, bool);
535 qdf_export_symbol(prealloc_disabled);
536 
537 int qdf_mem_malloc_flags(void)
538 {
539 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
540 		return GFP_ATOMIC;
541 
542 	return GFP_KERNEL;
543 }
544 
545 qdf_export_symbol(qdf_mem_malloc_flags);
546 
547 bool qdf_prealloc_disabled_config_get(void)
548 {
549 	return prealloc_disabled;
550 }
551 
552 qdf_export_symbol(qdf_prealloc_disabled_config_get);
553 
554 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
555 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
556 {
557 	QDF_STATUS status;
558 
559 	status = qdf_bool_parse(str_value, &prealloc_disabled);
560 	return status;
561 }
562 #endif
563 
564 #if defined WLAN_DEBUGFS
565 
566 /* Debugfs root directory for qdf_mem */
567 static struct dentry *qdf_mem_debugfs_root;
568 
569 #ifdef MEMORY_DEBUG
570 static int seq_printf_printer(void *priv, const char *fmt, ...)
571 {
572 	struct seq_file *file = priv;
573 	va_list args;
574 
575 	va_start(args, fmt);
576 	seq_vprintf(file, fmt, args);
577 	seq_puts(file, "\n");
578 	va_end(args);
579 
580 	return 0;
581 }
582 
583 /**
584  * qdf_print_major_alloc() - memory metadata table print logic
585  * @table: the memory metadata table to print
586  * @print: the print adapter function
587  * @print_priv: the private data to be consumed by @print
588  * @threshold: the threshold value set by uset to list top allocations
589  *
590  * Return: None
591  */
592 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
593 				  qdf_abstract_print print,
594 				  void *print_priv,
595 				  uint32_t threshold)
596 {
597 	int i;
598 
599 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
600 		if (!table[i].count)
601 			break;
602 		if (table[i].count >= threshold)
603 			print(print_priv,
604 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
605 			      table[i].count,
606 			      table[i].size,
607 			      table[i].count * table[i].size,
608 			      table[i].func,
609 			      table[i].line, table[i].caller,
610 			      table[i].time);
611 	}
612 }
613 
614 /**
615  * qdf_mem_seq_start() - sequential callback to start
616  * @seq: seq_file handle
617  * @pos: The start position of the sequence
618  *
619  * Return: iterator pointer, or NULL if iteration is complete
620  */
621 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
622 {
623 	enum qdf_debug_domain domain = *pos;
624 
625 	if (!qdf_debug_domain_valid(domain))
626 		return NULL;
627 
628 	/* just use the current position as our iterator */
629 	return pos;
630 }
631 
632 /**
633  * qdf_mem_seq_next() - next sequential callback
634  * @seq: seq_file handle
635  * @v: the current iterator
636  * @pos: the current position
637  *
638  * Get the next node and release previous node.
639  *
640  * Return: iterator pointer, or NULL if iteration is complete
641  */
642 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
643 {
644 	++*pos;
645 
646 	return qdf_mem_seq_start(seq, pos);
647 }
648 
649 /**
650  * qdf_mem_seq_stop() - stop sequential callback
651  * @seq: seq_file handle
652  * @v: current iterator
653  *
654  * Return: None
655  */
656 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
657 
658 /**
659  * qdf_mem_seq_show() - print sequential callback
660  * @seq: seq_file handle
661  * @v: current iterator
662  *
663  * Return: 0 - success
664  */
665 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
666 {
667 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
668 
669 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
670 		   qdf_debug_domain_name(domain_id), domain_id);
671 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
672 			     seq_printf_printer,
673 			     seq,
674 			     0,
675 			     qdf_mem_meta_table_print);
676 
677 	return 0;
678 }
679 
680 /* sequential file operation table */
681 static const struct seq_operations qdf_mem_seq_ops = {
682 	.start = qdf_mem_seq_start,
683 	.next  = qdf_mem_seq_next,
684 	.stop  = qdf_mem_seq_stop,
685 	.show  = qdf_mem_seq_show,
686 };
687 
688 
689 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
690 {
691 	return seq_open(file, &qdf_mem_seq_ops);
692 }
693 
694 /**
695  * qdf_major_alloc_show() - print sequential callback
696  * @seq: seq_file handle
697  * @v: current iterator
698  *
699  * Return: 0 - success
700  */
701 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
702 {
703 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
704 	struct major_alloc_priv *priv;
705 	qdf_list_t *list;
706 
707 	priv = (struct major_alloc_priv *)seq->private;
708 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
709 		   qdf_debug_domain_name(domain_id), domain_id);
710 
711 	switch (priv->type) {
712 	case LIST_TYPE_MEM:
713 		list = qdf_mem_list_get(domain_id);
714 		break;
715 	case LIST_TYPE_DMA:
716 		list = qdf_mem_dma_list(domain_id);
717 		break;
718 	default:
719 		list = NULL;
720 		break;
721 	}
722 
723 	if (list)
724 		qdf_mem_domain_print(list,
725 				     seq_printf_printer,
726 				     seq,
727 				     priv->threshold,
728 				     qdf_print_major_alloc);
729 
730 	return 0;
731 }
732 
733 /* sequential file operation table created to track major allocs */
734 static const struct seq_operations qdf_major_allocs_seq_ops = {
735 	.start = qdf_mem_seq_start,
736 	.next = qdf_mem_seq_next,
737 	.stop = qdf_mem_seq_stop,
738 	.show = qdf_major_alloc_show,
739 };
740 
741 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
742 {
743 	void *private = inode->i_private;
744 	struct seq_file *seq;
745 	int rc;
746 
747 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
748 	if (rc == 0) {
749 		seq = file->private_data;
750 		seq->private = private;
751 	}
752 	return rc;
753 }
754 
755 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
756 					     const char __user *user_buf,
757 					     size_t count,
758 					     loff_t *pos)
759 {
760 	char buf[32];
761 	ssize_t buf_size;
762 	uint32_t threshold;
763 	struct seq_file *seq = file->private_data;
764 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
765 
766 	buf_size = min(count, (sizeof(buf) - 1));
767 	if (buf_size <= 0)
768 		return 0;
769 	if (copy_from_user(buf, user_buf, buf_size))
770 		return -EFAULT;
771 	buf[buf_size] = '\0';
772 	if (!kstrtou32(buf, 10, &threshold))
773 		priv->threshold = threshold;
774 	return buf_size;
775 }
776 
777 /**
778  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
779  * @threshold: the threshold value set by uset to list top allocations
780  * @print: the print adapter function
781  * @print_priv: the private data to be consumed by @print
782  * @mem_print: pointer to function which prints the memory allocation data
783  *
784  * Return: None
785  */
786 static void
787 qdf_print_major_nbuf_allocs(uint32_t threshold,
788 			    qdf_abstract_print print,
789 			    void *print_priv,
790 			    void (*mem_print)(struct __qdf_mem_info *,
791 					      qdf_abstract_print,
792 					      void *, uint32_t))
793 {
794 	uint32_t nbuf_iter;
795 	unsigned long irq_flag = 0;
796 	QDF_NBUF_TRACK *p_node;
797 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
798 	struct qdf_mem_header meta;
799 	bool is_full;
800 
801 	qdf_mem_zero(table, sizeof(table));
802 	qdf_mem_debug_print_header(print, print_priv, threshold);
803 
804 	if (is_initial_mem_debug_disabled)
805 		return;
806 
807 	qdf_rl_info("major nbuf print with threshold %u", threshold);
808 
809 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
810 	     nbuf_iter++) {
811 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
812 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
813 		while (p_node) {
814 			meta.line = p_node->line_num;
815 			meta.size = p_node->size;
816 			meta.caller = NULL;
817 			meta.time = p_node->time;
818 			qdf_str_lcopy(meta.func, p_node->func_name,
819 				      QDF_MEM_FUNC_NAME_SIZE);
820 
821 			is_full = qdf_mem_meta_table_insert(table, &meta);
822 
823 			if (is_full) {
824 				(*mem_print)(table, print,
825 					     print_priv, threshold);
826 				qdf_mem_zero(table, sizeof(table));
827 			}
828 
829 			p_node = p_node->p_next;
830 		}
831 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
832 	}
833 
834 	(*mem_print)(table, print, print_priv, threshold);
835 
836 	qdf_rl_info("major nbuf print end");
837 }
838 
839 /**
840  * qdf_major_nbuf_alloc_show() - print sequential callback
841  * @seq: seq_file handle
842  * @v: current iterator
843  *
844  * Return: 0 - success
845  */
846 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
847 {
848 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
849 
850 	if (!priv) {
851 		qdf_err("priv is null");
852 		return -EINVAL;
853 	}
854 
855 	qdf_print_major_nbuf_allocs(priv->threshold,
856 				    seq_printf_printer,
857 				    seq,
858 				    qdf_print_major_alloc);
859 
860 	return 0;
861 }
862 
863 /**
864  * qdf_nbuf_seq_start() - sequential callback to start
865  * @seq: seq_file handle
866  * @pos: The start position of the sequence
867  *
868  * Return: iterator pointer, or NULL if iteration is complete
869  */
870 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
871 {
872 	enum qdf_debug_domain domain = *pos;
873 
874 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
875 		return NULL;
876 
877 	return pos;
878 }
879 
880 /**
881  * qdf_nbuf_seq_next() - next sequential callback
882  * @seq: seq_file handle
883  * @v: the current iterator
884  * @pos: the current position
885  *
886  * Get the next node and release previous node.
887  *
888  * Return: iterator pointer, or NULL if iteration is complete
889  */
890 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
891 {
892 	++*pos;
893 
894 	return qdf_nbuf_seq_start(seq, pos);
895 }
896 
897 /**
898  * qdf_nbuf_seq_stop() - stop sequential callback
899  * @seq: seq_file handle
900  * @v: current iterator
901  *
902  * Return: None
903  */
904 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
905 
906 /* sequential file operation table created to track major skb allocs */
907 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
908 	.start = qdf_nbuf_seq_start,
909 	.next = qdf_nbuf_seq_next,
910 	.stop = qdf_nbuf_seq_stop,
911 	.show = qdf_major_nbuf_alloc_show,
912 };
913 
914 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
915 {
916 	void *private = inode->i_private;
917 	struct seq_file *seq;
918 	int rc;
919 
920 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
921 	if (rc == 0) {
922 		seq = file->private_data;
923 		seq->private = private;
924 	}
925 	return rc;
926 }
927 
928 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
929 						  const char __user *user_buf,
930 						  size_t count,
931 						  loff_t *pos)
932 {
933 	char buf[32];
934 	ssize_t buf_size;
935 	uint32_t threshold;
936 	struct seq_file *seq = file->private_data;
937 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
938 
939 	buf_size = min(count, (sizeof(buf) - 1));
940 	if (buf_size <= 0)
941 		return 0;
942 	if (copy_from_user(buf, user_buf, buf_size))
943 		return -EFAULT;
944 	buf[buf_size] = '\0';
945 	if (!kstrtou32(buf, 10, &threshold))
946 		priv->threshold = threshold;
947 	return buf_size;
948 }
949 
950 /* file operation table for listing major allocs */
951 static const struct file_operations fops_qdf_major_allocs = {
952 	.owner = THIS_MODULE,
953 	.open = qdf_major_allocs_open,
954 	.read = seq_read,
955 	.llseek = seq_lseek,
956 	.release = seq_release,
957 	.write = qdf_major_alloc_set_threshold,
958 };
959 
960 /* debugfs file operation table */
961 static const struct file_operations fops_qdf_mem_debugfs = {
962 	.owner = THIS_MODULE,
963 	.open = qdf_mem_debugfs_open,
964 	.read = seq_read,
965 	.llseek = seq_lseek,
966 	.release = seq_release,
967 };
968 
969 /* file operation table for listing major allocs */
970 static const struct file_operations fops_qdf_nbuf_major_allocs = {
971 	.owner = THIS_MODULE,
972 	.open = qdf_major_nbuf_allocs_open,
973 	.read = seq_read,
974 	.llseek = seq_lseek,
975 	.release = seq_release,
976 	.write = qdf_major_nbuf_alloc_set_threshold,
977 };
978 
979 static struct major_alloc_priv mem_priv = {
980 	/* List type set to mem */
981 	LIST_TYPE_MEM,
982 	/* initial threshold to list APIs which allocates mem >= 50 times */
983 	50
984 };
985 
986 static struct major_alloc_priv dma_priv = {
987 	/* List type set to DMA */
988 	LIST_TYPE_DMA,
989 	/* initial threshold to list APIs which allocates dma >= 50 times */
990 	50
991 };
992 
993 static struct major_alloc_priv nbuf_priv = {
994 	/* List type set to NBUF */
995 	LIST_TYPE_NBUF,
996 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
997 	50
998 };
999 
1000 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1001 {
1002 	if (is_initial_mem_debug_disabled)
1003 		return QDF_STATUS_SUCCESS;
1004 
1005 	if (!qdf_mem_debugfs_root)
1006 		return QDF_STATUS_E_FAILURE;
1007 
1008 	debugfs_create_file("list",
1009 			    S_IRUSR,
1010 			    qdf_mem_debugfs_root,
1011 			    NULL,
1012 			    &fops_qdf_mem_debugfs);
1013 
1014 	debugfs_create_file("major_mem_allocs",
1015 			    0600,
1016 			    qdf_mem_debugfs_root,
1017 			    &mem_priv,
1018 			    &fops_qdf_major_allocs);
1019 
1020 	debugfs_create_file("major_dma_allocs",
1021 			    0600,
1022 			    qdf_mem_debugfs_root,
1023 			    &dma_priv,
1024 			    &fops_qdf_major_allocs);
1025 
1026 	debugfs_create_file("major_nbuf_allocs",
1027 			    0600,
1028 			    qdf_mem_debugfs_root,
1029 			    &nbuf_priv,
1030 			    &fops_qdf_nbuf_major_allocs);
1031 
1032 	return QDF_STATUS_SUCCESS;
1033 }
1034 
1035 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1036 {
1037 	return QDF_STATUS_SUCCESS;
1038 }
1039 
1040 #else /* MEMORY_DEBUG */
1041 
1042 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1043 {
1044 	return QDF_STATUS_E_NOSUPPORT;
1045 }
1046 
1047 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1048 {
1049 	return QDF_STATUS_E_NOSUPPORT;
1050 }
1051 
1052 #endif /* MEMORY_DEBUG */
1053 
1054 
1055 static void qdf_mem_debugfs_exit(void)
1056 {
1057 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1058 	qdf_mem_debugfs_root = NULL;
1059 }
1060 
1061 static QDF_STATUS qdf_mem_debugfs_init(void)
1062 {
1063 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1064 
1065 	if (!qdf_debugfs_root)
1066 		return QDF_STATUS_E_FAILURE;
1067 
1068 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1069 
1070 	if (!qdf_mem_debugfs_root)
1071 		return QDF_STATUS_E_FAILURE;
1072 
1073 
1074 	debugfs_create_atomic_t("kmalloc",
1075 				S_IRUSR,
1076 				qdf_mem_debugfs_root,
1077 				&qdf_mem_stat.kmalloc);
1078 
1079 	debugfs_create_atomic_t("dma",
1080 				S_IRUSR,
1081 				qdf_mem_debugfs_root,
1082 				&qdf_mem_stat.dma);
1083 
1084 	debugfs_create_atomic_t("skb",
1085 				S_IRUSR,
1086 				qdf_mem_debugfs_root,
1087 				&qdf_mem_stat.skb);
1088 
1089 	return QDF_STATUS_SUCCESS;
1090 }
1091 
1092 #else /* WLAN_DEBUGFS */
1093 
1094 static QDF_STATUS qdf_mem_debugfs_init(void)
1095 {
1096 	return QDF_STATUS_E_NOSUPPORT;
1097 }
1098 static void qdf_mem_debugfs_exit(void) {}
1099 
1100 
1101 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1102 {
1103 	return QDF_STATUS_E_NOSUPPORT;
1104 }
1105 
1106 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1107 {
1108 	return QDF_STATUS_E_NOSUPPORT;
1109 }
1110 
1111 #endif /* WLAN_DEBUGFS */
1112 
1113 void qdf_mem_kmalloc_inc(qdf_size_t size)
1114 {
1115 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1116 }
1117 
1118 static void qdf_mem_dma_inc(qdf_size_t size)
1119 {
1120 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1121 }
1122 
1123 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1124 void qdf_mem_skb_inc(qdf_size_t size)
1125 {
1126 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1127 }
1128 
1129 void qdf_mem_skb_dec(qdf_size_t size)
1130 {
1131 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1132 }
1133 
1134 void qdf_mem_skb_total_inc(qdf_size_t size)
1135 {
1136 	int32_t skb_mem_max = 0;
1137 
1138 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1139 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1140 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1141 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1142 }
1143 
1144 void qdf_mem_skb_total_dec(qdf_size_t size)
1145 {
1146 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1147 }
1148 
1149 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1150 {
1151 	int32_t curr_dp_tx_skb_mem_max = 0;
1152 
1153 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1154 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1155 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1156 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1157 }
1158 
1159 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1160 {
1161 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1162 }
1163 
1164 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1165 {
1166 	int32_t curr_dp_rx_skb_mem_max = 0;
1167 
1168 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1169 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1170 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1171 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1172 }
1173 
1174 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1175 {
1176 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1177 }
1178 
1179 void qdf_mem_dp_tx_skb_cnt_inc(void)
1180 {
1181 	int32_t curr_dp_tx_skb_count_max = 0;
1182 
1183 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1184 	curr_dp_tx_skb_count_max =
1185 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1186 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1187 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1188 }
1189 
1190 void qdf_mem_dp_tx_skb_cnt_dec(void)
1191 {
1192 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1193 }
1194 
1195 void qdf_mem_dp_rx_skb_cnt_inc(void)
1196 {
1197 	int32_t curr_dp_rx_skb_count_max = 0;
1198 
1199 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1200 	curr_dp_rx_skb_count_max =
1201 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1202 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1203 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1204 }
1205 
1206 void qdf_mem_dp_rx_skb_cnt_dec(void)
1207 {
1208 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1209 }
1210 #endif
1211 
1212 void qdf_mem_kmalloc_dec(qdf_size_t size)
1213 {
1214 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1215 }
1216 
1217 static inline void qdf_mem_dma_dec(qdf_size_t size)
1218 {
1219 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1220 }
1221 
1222 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1223 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1224 {
1225 	__qdf_mempool_ctxt_t *new_pool = NULL;
1226 	u_int32_t align = L1_CACHE_BYTES;
1227 	unsigned long aligned_pool_mem;
1228 	int pool_id;
1229 	int i;
1230 
1231 	if (prealloc_disabled) {
1232 		/* TBD: We can maintain a list of pools in qdf_device_t
1233 		 * to help debugging
1234 		 * when pre-allocation is not enabled
1235 		 */
1236 		new_pool = (__qdf_mempool_ctxt_t *)
1237 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1238 		if (!new_pool)
1239 			return QDF_STATUS_E_NOMEM;
1240 
1241 		memset(new_pool, 0, sizeof(*new_pool));
1242 		/* TBD: define flags for zeroing buffers etc */
1243 		new_pool->flags = flags;
1244 		new_pool->elem_size = elem_size;
1245 		new_pool->max_elem = elem_cnt;
1246 		*pool_addr = new_pool;
1247 		return 0;
1248 	}
1249 
1250 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1251 		if (!osdev->mem_pool[pool_id])
1252 			break;
1253 	}
1254 
1255 	if (pool_id == MAX_MEM_POOLS)
1256 		return -ENOMEM;
1257 
1258 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1259 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1260 	if (!new_pool)
1261 		return -ENOMEM;
1262 
1263 	memset(new_pool, 0, sizeof(*new_pool));
1264 	/* TBD: define flags for zeroing buffers etc */
1265 	new_pool->flags = flags;
1266 	new_pool->pool_id = pool_id;
1267 
1268 	/* Round up the element size to cacheline */
1269 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1270 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1271 				((align)?(align - 1):0);
1272 
1273 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1274 	if (!new_pool->pool_mem) {
1275 			/* TBD: Check if we need get_free_pages above */
1276 		kfree(new_pool);
1277 		osdev->mem_pool[pool_id] = NULL;
1278 		return -ENOMEM;
1279 	}
1280 
1281 	spin_lock_init(&new_pool->lock);
1282 
1283 	/* Initialize free list */
1284 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1285 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1286 	STAILQ_INIT(&new_pool->free_list);
1287 
1288 	for (i = 0; i < elem_cnt; i++)
1289 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1290 			(mempool_elem_t *)(aligned_pool_mem +
1291 			(new_pool->elem_size * i)), mempool_entry);
1292 
1293 
1294 	new_pool->free_cnt = elem_cnt;
1295 	*pool_addr = new_pool;
1296 	return 0;
1297 }
1298 qdf_export_symbol(__qdf_mempool_init);
1299 
1300 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1301 {
1302 	int pool_id = 0;
1303 
1304 	if (!pool)
1305 		return;
1306 
1307 	if (prealloc_disabled) {
1308 		kfree(pool);
1309 		return;
1310 	}
1311 
1312 	pool_id = pool->pool_id;
1313 
1314 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1315 	kfree(pool->pool_mem);
1316 	kfree(pool);
1317 	osdev->mem_pool[pool_id] = NULL;
1318 }
1319 qdf_export_symbol(__qdf_mempool_destroy);
1320 
1321 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1322 {
1323 	void *buf = NULL;
1324 
1325 	if (!pool)
1326 		return NULL;
1327 
1328 	if (prealloc_disabled)
1329 		return  qdf_mem_malloc(pool->elem_size);
1330 
1331 	spin_lock_bh(&pool->lock);
1332 
1333 	buf = STAILQ_FIRST(&pool->free_list);
1334 	if (buf) {
1335 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1336 		pool->free_cnt--;
1337 	}
1338 
1339 	/* TBD: Update free count if debug is enabled */
1340 	spin_unlock_bh(&pool->lock);
1341 
1342 	return buf;
1343 }
1344 qdf_export_symbol(__qdf_mempool_alloc);
1345 
1346 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1347 {
1348 	if (!pool)
1349 		return;
1350 
1351 
1352 	if (prealloc_disabled)
1353 		return qdf_mem_free(buf);
1354 
1355 	spin_lock_bh(&pool->lock);
1356 	pool->free_cnt++;
1357 
1358 	STAILQ_INSERT_TAIL
1359 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1360 	spin_unlock_bh(&pool->lock);
1361 }
1362 qdf_export_symbol(__qdf_mempool_free);
1363 
1364 #ifdef CNSS_MEM_PRE_ALLOC
1365 static bool qdf_might_be_prealloc(void *ptr)
1366 {
1367 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1368 		return true;
1369 	else
1370 		return false;
1371 }
1372 
1373 /**
1374  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1375  * @size: the number of bytes to allocate
1376  *
1377  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1378  * a chunk of pre-allocated memory. If size if less than or equal to
1379  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1380  *
1381  * Return: NULL on failure, non-NULL on success
1382  */
1383 static void *qdf_mem_prealloc_get(size_t size)
1384 {
1385 	void *ptr;
1386 
1387 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1388 		return NULL;
1389 
1390 	ptr = wcnss_prealloc_get(size);
1391 	if (!ptr)
1392 		return NULL;
1393 
1394 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1395 		ptr += sizeof(void *);
1396 
1397 	memset(ptr, 0, size);
1398 
1399 	return ptr;
1400 }
1401 
1402 static inline bool qdf_mem_prealloc_put(void *ptr)
1403 {
1404 	return wcnss_prealloc_put(ptr);
1405 }
1406 #else
1407 static bool qdf_might_be_prealloc(void *ptr)
1408 {
1409 	return false;
1410 }
1411 
1412 static inline void *qdf_mem_prealloc_get(size_t size)
1413 {
1414 	return NULL;
1415 }
1416 
1417 static inline bool qdf_mem_prealloc_put(void *ptr)
1418 {
1419 	return false;
1420 }
1421 #endif /* CNSS_MEM_PRE_ALLOC */
1422 
1423 /* External Function implementation */
1424 #ifdef MEMORY_DEBUG
1425 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1426 bool qdf_mem_debug_config_get(void)
1427 {
1428 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1429 	return false;
1430 }
1431 #else
1432 bool qdf_mem_debug_config_get(void)
1433 {
1434 	return mem_debug_disabled;
1435 }
1436 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1437 
1438 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1439 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1440 {
1441 	QDF_STATUS status;
1442 
1443 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1444 	return status;
1445 }
1446 #endif
1447 
1448 /**
1449  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1450  *
1451  * Return: none
1452  */
1453 static void qdf_mem_debug_init(void)
1454 {
1455 	int i;
1456 
1457 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1458 
1459 	if (is_initial_mem_debug_disabled)
1460 		return;
1461 
1462 	/* Initializing the list with maximum size of 60000 */
1463 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1464 		qdf_list_create(&qdf_mem_domains[i], 60000);
1465 	qdf_spinlock_create(&qdf_mem_list_lock);
1466 
1467 	/* dma */
1468 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1469 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1470 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1471 }
1472 
1473 static uint32_t
1474 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1475 			       qdf_list_t *mem_list)
1476 {
1477 	if (is_initial_mem_debug_disabled)
1478 		return 0;
1479 
1480 	if (qdf_list_empty(mem_list))
1481 		return 0;
1482 
1483 	qdf_err("Memory leaks detected in %s domain!",
1484 		qdf_debug_domain_name(domain));
1485 	qdf_mem_domain_print(mem_list,
1486 			     qdf_err_printer,
1487 			     NULL,
1488 			     0,
1489 			     qdf_mem_meta_table_print);
1490 
1491 	return mem_list->count;
1492 }
1493 
1494 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1495 {
1496 	uint32_t leak_count = 0;
1497 	int i;
1498 
1499 	if (is_initial_mem_debug_disabled)
1500 		return;
1501 
1502 	/* detect and print leaks */
1503 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1504 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1505 
1506 	if (leak_count)
1507 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1508 				   leak_count);
1509 }
1510 
1511 /**
1512  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1513  *
1514  * Return: none
1515  */
1516 static void qdf_mem_debug_exit(void)
1517 {
1518 	int i;
1519 
1520 	if (is_initial_mem_debug_disabled)
1521 		return;
1522 
1523 	/* mem */
1524 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1525 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1526 		qdf_list_destroy(qdf_mem_list_get(i));
1527 
1528 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1529 
1530 	/* dma */
1531 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1532 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1533 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1534 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1535 }
1536 
1537 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1538 			   void *caller, uint32_t flag)
1539 {
1540 	QDF_STATUS status;
1541 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1542 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1543 	struct qdf_mem_header *header;
1544 	void *ptr;
1545 	unsigned long start, duration;
1546 
1547 	if (is_initial_mem_debug_disabled)
1548 		return __qdf_mem_malloc(size, func, line);
1549 
1550 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1551 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1552 		return NULL;
1553 	}
1554 
1555 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1556 		size += sizeof(void *);
1557 
1558 	ptr = qdf_mem_prealloc_get(size);
1559 	if (ptr)
1560 		return ptr;
1561 
1562 	if (!flag)
1563 		flag = qdf_mem_malloc_flags();
1564 
1565 	start = qdf_mc_timer_get_system_time();
1566 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1567 	duration = qdf_mc_timer_get_system_time() - start;
1568 
1569 	if (duration > QDF_MEM_WARN_THRESHOLD)
1570 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1571 			 duration, size, func, line);
1572 
1573 	if (!header) {
1574 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1575 		return NULL;
1576 	}
1577 
1578 	qdf_mem_header_init(header, size, func, line, caller);
1579 	qdf_mem_trailer_init(header);
1580 	ptr = qdf_mem_get_ptr(header);
1581 
1582 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1583 	status = qdf_list_insert_front(mem_list, &header->node);
1584 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1585 	if (QDF_IS_STATUS_ERROR(status))
1586 		qdf_err("Failed to insert memory header; status %d", status);
1587 
1588 	qdf_mem_kmalloc_inc(ksize(header));
1589 
1590 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1591 		ptr += sizeof(void *);
1592 
1593 	return ptr;
1594 }
1595 qdf_export_symbol(qdf_mem_malloc_debug);
1596 
1597 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1598 				  uint32_t line, void *caller)
1599 {
1600 	QDF_STATUS status;
1601 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1602 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1603 	struct qdf_mem_header *header;
1604 	void *ptr;
1605 	unsigned long start, duration;
1606 
1607 	if (is_initial_mem_debug_disabled)
1608 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1609 
1610 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1611 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1612 		return NULL;
1613 	}
1614 
1615 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1616 		size += sizeof(void *);
1617 
1618 	ptr = qdf_mem_prealloc_get(size);
1619 	if (ptr)
1620 		return ptr;
1621 
1622 	start = qdf_mc_timer_get_system_time();
1623 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1624 	duration = qdf_mc_timer_get_system_time() - start;
1625 
1626 	if (duration > QDF_MEM_WARN_THRESHOLD)
1627 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1628 			 duration, size, func, line);
1629 
1630 	if (!header) {
1631 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1632 		return NULL;
1633 	}
1634 
1635 	qdf_mem_header_init(header, size, func, line, caller);
1636 	qdf_mem_trailer_init(header);
1637 	ptr = qdf_mem_get_ptr(header);
1638 
1639 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1640 	status = qdf_list_insert_front(mem_list, &header->node);
1641 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1642 	if (QDF_IS_STATUS_ERROR(status))
1643 		qdf_err("Failed to insert memory header; status %d", status);
1644 
1645 	qdf_mem_kmalloc_inc(ksize(header));
1646 
1647 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1648 		ptr += sizeof(void *);
1649 
1650 	return ptr;
1651 }
1652 
1653 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1654 
1655 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1656 				     uint32_t line)
1657 {
1658 	void *ptr;
1659 
1660 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1661 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1662 			     line);
1663 		return NULL;
1664 	}
1665 
1666 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1667 		size += sizeof(void *);
1668 
1669 	ptr = qdf_mem_prealloc_get(size);
1670 	if (ptr)
1671 		return ptr;
1672 
1673 	ptr = kzalloc(size, GFP_ATOMIC);
1674 	if (!ptr) {
1675 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1676 			      size, func, line);
1677 		return NULL;
1678 	}
1679 
1680 	qdf_mem_kmalloc_inc(ksize(ptr));
1681 
1682 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1683 		ptr += sizeof(void *);
1684 
1685 	return ptr;
1686 }
1687 
1688 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1689 
1690 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1691 {
1692 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1693 	struct qdf_mem_header *header;
1694 	enum qdf_mem_validation_bitmap error_bitmap;
1695 
1696 	if (is_initial_mem_debug_disabled) {
1697 		__qdf_mem_free(ptr);
1698 		return;
1699 	}
1700 
1701 	/* freeing a null pointer is valid */
1702 	if (qdf_unlikely(!ptr))
1703 		return;
1704 
1705 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1706 		ptr = ptr - sizeof(void *);
1707 
1708 	if (qdf_mem_prealloc_put(ptr))
1709 		return;
1710 
1711 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1712 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1713 				   ptr);
1714 
1715 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1716 
1717 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1718 	header = qdf_mem_get_header(ptr);
1719 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1720 	error_bitmap |= qdf_mem_trailer_validate(header);
1721 
1722 	if (!error_bitmap) {
1723 		header->freed = true;
1724 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1725 				     &header->node);
1726 	}
1727 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1728 
1729 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1730 				    func, line);
1731 
1732 	qdf_mem_kmalloc_dec(ksize(header));
1733 	kfree(header);
1734 }
1735 qdf_export_symbol(qdf_mem_free_debug);
1736 
1737 void qdf_mem_check_for_leaks(void)
1738 {
1739 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1740 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1741 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1742 	uint32_t leaks_count = 0;
1743 
1744 	if (is_initial_mem_debug_disabled)
1745 		return;
1746 
1747 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1748 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1749 
1750 	if (leaks_count)
1751 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1752 				   leaks_count);
1753 }
1754 
1755 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1756 				     struct qdf_mem_multi_page_t *pages,
1757 				     size_t element_size, uint32_t element_num,
1758 				     qdf_dma_context_t memctxt, bool cacheable,
1759 				     const char *func, uint32_t line,
1760 				     void *caller)
1761 {
1762 	uint16_t page_idx;
1763 	struct qdf_mem_dma_page_t *dma_pages;
1764 	void **cacheable_pages = NULL;
1765 	uint16_t i;
1766 
1767 	if (!pages->page_size)
1768 		pages->page_size = qdf_page_size;
1769 
1770 	pages->num_element_per_page = pages->page_size / element_size;
1771 	if (!pages->num_element_per_page) {
1772 		qdf_print("Invalid page %d or element size %d",
1773 			  (int)pages->page_size, (int)element_size);
1774 		goto out_fail;
1775 	}
1776 
1777 	pages->num_pages = element_num / pages->num_element_per_page;
1778 	if (element_num % pages->num_element_per_page)
1779 		pages->num_pages++;
1780 
1781 	if (cacheable) {
1782 		/* Pages information storage */
1783 		pages->cacheable_pages = qdf_mem_malloc_debug(
1784 			pages->num_pages * sizeof(pages->cacheable_pages),
1785 			func, line, caller, 0);
1786 		if (!pages->cacheable_pages)
1787 			goto out_fail;
1788 
1789 		cacheable_pages = pages->cacheable_pages;
1790 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1791 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1792 				pages->page_size, func, line, caller, 0);
1793 			if (!cacheable_pages[page_idx])
1794 				goto page_alloc_fail;
1795 		}
1796 		pages->dma_pages = NULL;
1797 	} else {
1798 		pages->dma_pages = qdf_mem_malloc_debug(
1799 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1800 			func, line, caller, 0);
1801 		if (!pages->dma_pages)
1802 			goto out_fail;
1803 
1804 		dma_pages = pages->dma_pages;
1805 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1806 			dma_pages->page_v_addr_start =
1807 				qdf_mem_alloc_consistent_debug(
1808 					osdev, osdev->dev, pages->page_size,
1809 					&dma_pages->page_p_addr,
1810 					func, line, caller);
1811 			if (!dma_pages->page_v_addr_start) {
1812 				qdf_print("dmaable page alloc fail pi %d",
1813 					  page_idx);
1814 				goto page_alloc_fail;
1815 			}
1816 			dma_pages->page_v_addr_end =
1817 				dma_pages->page_v_addr_start + pages->page_size;
1818 			dma_pages++;
1819 		}
1820 		pages->cacheable_pages = NULL;
1821 	}
1822 	return;
1823 
1824 page_alloc_fail:
1825 	if (cacheable) {
1826 		for (i = 0; i < page_idx; i++)
1827 			qdf_mem_free_debug(pages->cacheable_pages[i],
1828 					   func, line);
1829 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1830 	} else {
1831 		dma_pages = pages->dma_pages;
1832 		for (i = 0; i < page_idx; i++) {
1833 			qdf_mem_free_consistent_debug(
1834 				osdev, osdev->dev,
1835 				pages->page_size, dma_pages->page_v_addr_start,
1836 				dma_pages->page_p_addr, memctxt, func, line);
1837 			dma_pages++;
1838 		}
1839 		qdf_mem_free_debug(pages->dma_pages, func, line);
1840 	}
1841 
1842 out_fail:
1843 	pages->cacheable_pages = NULL;
1844 	pages->dma_pages = NULL;
1845 	pages->num_pages = 0;
1846 }
1847 
1848 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1849 
1850 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1851 				    struct qdf_mem_multi_page_t *pages,
1852 				    qdf_dma_context_t memctxt, bool cacheable,
1853 				    const char *func, uint32_t line)
1854 {
1855 	unsigned int page_idx;
1856 	struct qdf_mem_dma_page_t *dma_pages;
1857 
1858 	if (!pages->page_size)
1859 		pages->page_size = qdf_page_size;
1860 
1861 	if (cacheable) {
1862 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1863 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1864 					   func, line);
1865 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1866 	} else {
1867 		dma_pages = pages->dma_pages;
1868 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1869 			qdf_mem_free_consistent_debug(
1870 				osdev, osdev->dev, pages->page_size,
1871 				dma_pages->page_v_addr_start,
1872 				dma_pages->page_p_addr, memctxt, func, line);
1873 			dma_pages++;
1874 		}
1875 		qdf_mem_free_debug(pages->dma_pages, func, line);
1876 	}
1877 
1878 	pages->cacheable_pages = NULL;
1879 	pages->dma_pages = NULL;
1880 	pages->num_pages = 0;
1881 }
1882 
1883 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1884 
1885 #else
1886 static void qdf_mem_debug_init(void) {}
1887 
1888 static void qdf_mem_debug_exit(void) {}
1889 
1890 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1891 {
1892 	void *ptr;
1893 
1894 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1895 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1896 			     line);
1897 		return NULL;
1898 	}
1899 
1900 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1901 		size += sizeof(void *);
1902 
1903 	ptr = qdf_mem_prealloc_get(size);
1904 	if (ptr)
1905 		return ptr;
1906 
1907 	ptr = kzalloc(size, GFP_ATOMIC);
1908 	if (!ptr) {
1909 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1910 			      size, func, line);
1911 		return NULL;
1912 	}
1913 
1914 	qdf_mem_kmalloc_inc(ksize(ptr));
1915 
1916 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1917 		ptr += sizeof(void *);
1918 
1919 	return ptr;
1920 }
1921 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1922 
1923 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1924 			       struct qdf_mem_multi_page_t *pages,
1925 			       size_t element_size, uint32_t element_num,
1926 			       qdf_dma_context_t memctxt, bool cacheable)
1927 {
1928 	uint16_t page_idx;
1929 	struct qdf_mem_dma_page_t *dma_pages;
1930 	void **cacheable_pages = NULL;
1931 	uint16_t i;
1932 
1933 	if (!pages->page_size)
1934 		pages->page_size = qdf_page_size;
1935 
1936 	pages->num_element_per_page = pages->page_size / element_size;
1937 	if (!pages->num_element_per_page) {
1938 		qdf_print("Invalid page %d or element size %d",
1939 			  (int)pages->page_size, (int)element_size);
1940 		goto out_fail;
1941 	}
1942 
1943 	pages->num_pages = element_num / pages->num_element_per_page;
1944 	if (element_num % pages->num_element_per_page)
1945 		pages->num_pages++;
1946 
1947 	if (cacheable) {
1948 		/* Pages information storage */
1949 		pages->cacheable_pages = qdf_mem_malloc(
1950 			pages->num_pages * sizeof(pages->cacheable_pages));
1951 		if (!pages->cacheable_pages)
1952 			goto out_fail;
1953 
1954 		cacheable_pages = pages->cacheable_pages;
1955 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1956 			cacheable_pages[page_idx] =
1957 				qdf_mem_malloc(pages->page_size);
1958 			if (!cacheable_pages[page_idx])
1959 				goto page_alloc_fail;
1960 		}
1961 		pages->dma_pages = NULL;
1962 	} else {
1963 		pages->dma_pages = qdf_mem_malloc(
1964 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1965 		if (!pages->dma_pages)
1966 			goto out_fail;
1967 
1968 		dma_pages = pages->dma_pages;
1969 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1970 			dma_pages->page_v_addr_start =
1971 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1972 					 pages->page_size,
1973 					&dma_pages->page_p_addr);
1974 			if (!dma_pages->page_v_addr_start) {
1975 				qdf_print("dmaable page alloc fail pi %d",
1976 					page_idx);
1977 				goto page_alloc_fail;
1978 			}
1979 			dma_pages->page_v_addr_end =
1980 				dma_pages->page_v_addr_start + pages->page_size;
1981 			dma_pages++;
1982 		}
1983 		pages->cacheable_pages = NULL;
1984 	}
1985 	return;
1986 
1987 page_alloc_fail:
1988 	if (cacheable) {
1989 		for (i = 0; i < page_idx; i++)
1990 			qdf_mem_free(pages->cacheable_pages[i]);
1991 		qdf_mem_free(pages->cacheable_pages);
1992 	} else {
1993 		dma_pages = pages->dma_pages;
1994 		for (i = 0; i < page_idx; i++) {
1995 			qdf_mem_free_consistent(
1996 				osdev, osdev->dev, pages->page_size,
1997 				dma_pages->page_v_addr_start,
1998 				dma_pages->page_p_addr, memctxt);
1999 			dma_pages++;
2000 		}
2001 		qdf_mem_free(pages->dma_pages);
2002 	}
2003 
2004 out_fail:
2005 	pages->cacheable_pages = NULL;
2006 	pages->dma_pages = NULL;
2007 	pages->num_pages = 0;
2008 	return;
2009 }
2010 qdf_export_symbol(qdf_mem_multi_pages_alloc);
2011 
2012 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2013 			      struct qdf_mem_multi_page_t *pages,
2014 			      qdf_dma_context_t memctxt, bool cacheable)
2015 {
2016 	unsigned int page_idx;
2017 	struct qdf_mem_dma_page_t *dma_pages;
2018 
2019 	if (!pages->page_size)
2020 		pages->page_size = qdf_page_size;
2021 
2022 	if (cacheable) {
2023 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2024 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2025 		qdf_mem_free(pages->cacheable_pages);
2026 	} else {
2027 		dma_pages = pages->dma_pages;
2028 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2029 			qdf_mem_free_consistent(
2030 				osdev, osdev->dev, pages->page_size,
2031 				dma_pages->page_v_addr_start,
2032 				dma_pages->page_p_addr, memctxt);
2033 			dma_pages++;
2034 		}
2035 		qdf_mem_free(pages->dma_pages);
2036 	}
2037 
2038 	pages->cacheable_pages = NULL;
2039 	pages->dma_pages = NULL;
2040 	pages->num_pages = 0;
2041 	return;
2042 }
2043 qdf_export_symbol(qdf_mem_multi_pages_free);
2044 #endif
2045 
2046 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2047 			      bool cacheable)
2048 {
2049 	unsigned int page_idx;
2050 	struct qdf_mem_dma_page_t *dma_pages;
2051 
2052 	if (!pages->page_size)
2053 		pages->page_size = qdf_page_size;
2054 
2055 	if (cacheable) {
2056 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2057 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2058 				     pages->page_size);
2059 	} else {
2060 		dma_pages = pages->dma_pages;
2061 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2062 			qdf_mem_zero(dma_pages->page_v_addr_start,
2063 				     pages->page_size);
2064 			dma_pages++;
2065 		}
2066 	}
2067 }
2068 
2069 qdf_export_symbol(qdf_mem_multi_pages_zero);
2070 
2071 void __qdf_mem_free(void *ptr)
2072 {
2073 	if (!ptr)
2074 		return;
2075 
2076 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2077 		ptr = ptr - sizeof(void *);
2078 
2079 	if (qdf_might_be_prealloc(ptr)) {
2080 		if (qdf_mem_prealloc_put(ptr))
2081 			return;
2082 	}
2083 
2084 	qdf_mem_kmalloc_dec(ksize(ptr));
2085 
2086 	kfree(ptr);
2087 }
2088 
2089 qdf_export_symbol(__qdf_mem_free);
2090 
2091 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2092 {
2093 	void *ptr;
2094 
2095 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2096 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2097 			     line);
2098 		return NULL;
2099 	}
2100 
2101 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2102 		size += sizeof(void *);
2103 
2104 	ptr = qdf_mem_prealloc_get(size);
2105 	if (ptr)
2106 		return ptr;
2107 
2108 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2109 	if (!ptr)
2110 		return NULL;
2111 
2112 	qdf_mem_kmalloc_inc(ksize(ptr));
2113 
2114 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2115 		ptr += sizeof(void *);
2116 
2117 	return ptr;
2118 }
2119 
2120 qdf_export_symbol(__qdf_mem_malloc);
2121 
2122 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2123 void __qdf_untracked_mem_free(void *ptr)
2124 {
2125 	if (!ptr)
2126 		return;
2127 
2128 	kfree(ptr);
2129 }
2130 
2131 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2132 {
2133 	void *ptr;
2134 
2135 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2136 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2137 			     line);
2138 		return NULL;
2139 	}
2140 
2141 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2142 	if (!ptr)
2143 		return NULL;
2144 
2145 	return ptr;
2146 }
2147 #endif
2148 
2149 void *qdf_aligned_malloc_fl(uint32_t *size,
2150 			    void **vaddr_unaligned,
2151 				qdf_dma_addr_t *paddr_unaligned,
2152 				qdf_dma_addr_t *paddr_aligned,
2153 				uint32_t align,
2154 			    const char *func, uint32_t line)
2155 {
2156 	void *vaddr_aligned;
2157 	uint32_t align_alloc_size;
2158 
2159 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2160 			line);
2161 	if (!*vaddr_unaligned) {
2162 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2163 		return NULL;
2164 	}
2165 
2166 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2167 
2168 	/* Re-allocate additional bytes to align base address only if
2169 	 * above allocation returns unaligned address. Reason for
2170 	 * trying exact size allocation above is, OS tries to allocate
2171 	 * blocks of size power-of-2 pages and then free extra pages.
2172 	 * e.g., of a ring size of 1MB, the allocation below will
2173 	 * request 1MB plus 7 bytes for alignment, which will cause a
2174 	 * 2MB block allocation,and that is failing sometimes due to
2175 	 * memory fragmentation.
2176 	 */
2177 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2178 		align_alloc_size = *size + align - 1;
2179 
2180 		qdf_mem_free(*vaddr_unaligned);
2181 		*vaddr_unaligned = qdf_mem_malloc_fl(
2182 				(qdf_size_t)align_alloc_size, func, line);
2183 		if (!*vaddr_unaligned) {
2184 			qdf_warn("Failed to alloc %uB @ %s:%d",
2185 				 align_alloc_size, func, line);
2186 			return NULL;
2187 		}
2188 
2189 		*paddr_unaligned = qdf_mem_virt_to_phys(
2190 				*vaddr_unaligned);
2191 		*size = align_alloc_size;
2192 	}
2193 
2194 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2195 		((unsigned long)(*paddr_unaligned), align);
2196 
2197 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2198 			((unsigned long)(*paddr_aligned) -
2199 			 (unsigned long)(*paddr_unaligned)));
2200 
2201 	return vaddr_aligned;
2202 }
2203 
2204 qdf_export_symbol(qdf_aligned_malloc_fl);
2205 
2206 #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
2207 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2208 			       uint32_t elem_size, uint32_t elem_count,
2209 			       uint8_t cacheable, qdf_mem_release_cb cb,
2210 			       void *elem_list)
2211 {
2212 	uint16_t i, i_int;
2213 	void *page_info;
2214 	void *elem;
2215 	uint32_t num_elem = 0;
2216 
2217 	for (i = 0; i < pages->num_pages; i++) {
2218 		if (cacheable)
2219 			page_info = pages->cacheable_pages[i];
2220 		else
2221 			page_info = pages->dma_pages[i].page_v_addr_start;
2222 
2223 		if (!page_info)
2224 			return -ENOMEM;
2225 
2226 		elem = page_info;
2227 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2228 			cb(ctxt, elem, elem_list);
2229 			elem = ((char *)elem + elem_size);
2230 			num_elem++;
2231 
2232 			/* Number of desc pool elements reached */
2233 			if (num_elem == (elem_count - 1))
2234 				break;
2235 		}
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2242 #endif
2243 
2244 int qdf_mem_multi_page_link(qdf_device_t osdev,
2245 			    struct qdf_mem_multi_page_t *pages,
2246 			    uint32_t elem_size, uint32_t elem_count,
2247 			    uint8_t cacheable)
2248 {
2249 	uint16_t i, i_int;
2250 	void *page_info;
2251 	void **c_elem = NULL;
2252 	uint32_t num_link = 0;
2253 
2254 	for (i = 0; i < pages->num_pages; i++) {
2255 		if (cacheable)
2256 			page_info = pages->cacheable_pages[i];
2257 		else
2258 			page_info = pages->dma_pages[i].page_v_addr_start;
2259 
2260 		if (!page_info)
2261 			return -ENOMEM;
2262 
2263 		c_elem = (void **)page_info;
2264 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2265 			if (i_int == (pages->num_element_per_page - 1)) {
2266 				if ((i + 1) == pages->num_pages)
2267 					break;
2268 				if (cacheable)
2269 					*c_elem = pages->
2270 						cacheable_pages[i + 1];
2271 				else
2272 					*c_elem = pages->
2273 						dma_pages[i + 1].
2274 							page_v_addr_start;
2275 				num_link++;
2276 				break;
2277 			} else {
2278 				*c_elem =
2279 					(void *)(((char *)c_elem) + elem_size);
2280 			}
2281 			num_link++;
2282 			c_elem = (void **)*c_elem;
2283 
2284 			/* Last link established exit */
2285 			if (num_link == (elem_count - 1))
2286 				break;
2287 		}
2288 	}
2289 
2290 	if (c_elem)
2291 		*c_elem = NULL;
2292 
2293 	return 0;
2294 }
2295 qdf_export_symbol(qdf_mem_multi_page_link);
2296 
2297 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2298 {
2299 	/* special case where dst_addr or src_addr can be NULL */
2300 	if (!num_bytes)
2301 		return;
2302 
2303 	QDF_BUG(dst_addr);
2304 	QDF_BUG(src_addr);
2305 	if (!dst_addr || !src_addr)
2306 		return;
2307 
2308 	memcpy(dst_addr, src_addr, num_bytes);
2309 }
2310 qdf_export_symbol(qdf_mem_copy);
2311 
2312 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2313 {
2314 	qdf_shared_mem_t *shared_mem;
2315 	qdf_dma_addr_t dma_addr, paddr;
2316 	int ret;
2317 
2318 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2319 	if (!shared_mem)
2320 		return NULL;
2321 
2322 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2323 				size, qdf_mem_get_dma_addr_ptr(osdev,
2324 						&shared_mem->mem_info));
2325 	if (!shared_mem->vaddr) {
2326 		qdf_err("Unable to allocate DMA memory for shared resource");
2327 		qdf_mem_free(shared_mem);
2328 		return NULL;
2329 	}
2330 
2331 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2332 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2333 
2334 	qdf_mem_zero(shared_mem->vaddr, size);
2335 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2336 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2337 
2338 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2339 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2340 				      shared_mem->vaddr, dma_addr, size);
2341 	if (ret) {
2342 		qdf_err("Unable to get DMA sgtable");
2343 		qdf_mem_free_consistent(osdev, osdev->dev,
2344 					shared_mem->mem_info.size,
2345 					shared_mem->vaddr,
2346 					dma_addr,
2347 					qdf_get_dma_mem_context(shared_mem,
2348 								memctx));
2349 		qdf_mem_free(shared_mem);
2350 		return NULL;
2351 	}
2352 
2353 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2354 
2355 	return shared_mem;
2356 }
2357 
2358 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2359 
2360 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2361 {
2362 	if (0 == num_bytes) {
2363 		/* special case where dst_addr or src_addr can be NULL */
2364 		return;
2365 	}
2366 
2367 	if ((!dst_addr) || (!src_addr)) {
2368 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2369 			  "%s called with NULL parameter, source:%pK destination:%pK",
2370 			  __func__, src_addr, dst_addr);
2371 		QDF_ASSERT(0);
2372 		return;
2373 	}
2374 	memcpy_toio(dst_addr, src_addr, num_bytes);
2375 }
2376 
2377 qdf_export_symbol(qdf_mem_copy_toio);
2378 
2379 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2380 {
2381 	if (!ptr) {
2382 		qdf_print("%s called with NULL parameter ptr", __func__);
2383 		return;
2384 	}
2385 	memset_io(ptr, value, num_bytes);
2386 }
2387 
2388 qdf_export_symbol(qdf_mem_set_io);
2389 
2390 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2391 {
2392 	QDF_BUG(ptr);
2393 	if (!ptr)
2394 		return;
2395 
2396 	memset(ptr, value, num_bytes);
2397 }
2398 qdf_export_symbol(qdf_mem_set);
2399 
2400 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2401 {
2402 	/* special case where dst_addr or src_addr can be NULL */
2403 	if (!num_bytes)
2404 		return;
2405 
2406 	QDF_BUG(dst_addr);
2407 	QDF_BUG(src_addr);
2408 	if (!dst_addr || !src_addr)
2409 		return;
2410 
2411 	memmove(dst_addr, src_addr, num_bytes);
2412 }
2413 qdf_export_symbol(qdf_mem_move);
2414 
2415 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2416 {
2417 	QDF_BUG(left);
2418 	QDF_BUG(right);
2419 
2420 	return memcmp(left, right, size);
2421 }
2422 qdf_export_symbol(qdf_mem_cmp);
2423 
2424 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2425 /**
2426  * qdf_mem_dma_alloc() - allocates memory for dma
2427  * @osdev: OS device handle
2428  * @dev: Pointer to device handle
2429  * @size: Size to be allocated
2430  * @phy_addr: Physical address
2431  *
2432  * Return: pointer of allocated memory or null if memory alloc fails
2433  */
2434 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2435 				      qdf_size_t size,
2436 				      qdf_dma_addr_t *phy_addr)
2437 {
2438 	void *vaddr;
2439 
2440 	vaddr = qdf_mem_malloc(size);
2441 	*phy_addr = ((uintptr_t) vaddr);
2442 	/* using this type conversion to suppress "cast from pointer to integer
2443 	 * of different size" warning on some platforms
2444 	 */
2445 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2446 	return vaddr;
2447 }
2448 
2449 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2450 	!defined(QCA_WIFI_QCN9000)
2451 
2452 #define QCA8074_RAM_BASE 0x50000000
2453 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2454 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2455 			qdf_dma_addr_t *phy_addr)
2456 {
2457 	void *vaddr = NULL;
2458 	int i;
2459 
2460 	*phy_addr = 0;
2461 
2462 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2463 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2464 					   qdf_mem_malloc_flags());
2465 
2466 		if (!vaddr) {
2467 			qdf_err("%s failed , size: %zu!", __func__, size);
2468 			return NULL;
2469 		}
2470 
2471 		if (*phy_addr >= QCA8074_RAM_BASE)
2472 			return vaddr;
2473 
2474 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2475 	}
2476 
2477 	return NULL;
2478 }
2479 
2480 #else
2481 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2482 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2483 {
2484 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2485 }
2486 #endif
2487 
2488 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2489 static inline void
2490 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2491 {
2492 	qdf_mem_free(vaddr);
2493 }
2494 #else
2495 
2496 static inline void
2497 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2498 {
2499 	dma_free_coherent(dev, size, vaddr, paddr);
2500 }
2501 #endif
2502 
2503 #ifdef MEMORY_DEBUG
2504 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2505 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2506 				     const char *func, uint32_t line,
2507 				     void *caller)
2508 {
2509 	QDF_STATUS status;
2510 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2511 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2512 	struct qdf_mem_header *header;
2513 	void *vaddr;
2514 
2515 	if (is_initial_mem_debug_disabled)
2516 		return __qdf_mem_alloc_consistent(osdev, dev,
2517 						  size, paddr,
2518 						  func, line);
2519 
2520 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2521 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2522 		return NULL;
2523 	}
2524 
2525 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2526 				   paddr);
2527 
2528 	if (!vaddr) {
2529 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2530 		return NULL;
2531 	}
2532 
2533 	header = qdf_mem_dma_get_header(vaddr, size);
2534 	/* For DMA buffers we only add trailers, this function will init
2535 	 * the header structure at the tail
2536 	 * Prefix the header into DMA buffer causes SMMU faults, so
2537 	 * do not prefix header into the DMA buffers
2538 	 */
2539 	qdf_mem_header_init(header, size, func, line, caller);
2540 
2541 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2542 	status = qdf_list_insert_front(mem_list, &header->node);
2543 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2544 	if (QDF_IS_STATUS_ERROR(status))
2545 		qdf_err("Failed to insert memory header; status %d", status);
2546 
2547 	qdf_mem_dma_inc(size);
2548 
2549 	return vaddr;
2550 }
2551 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2552 
2553 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2554 				   qdf_size_t size, void *vaddr,
2555 				   qdf_dma_addr_t paddr,
2556 				   qdf_dma_context_t memctx,
2557 				   const char *func, uint32_t line)
2558 {
2559 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2560 	struct qdf_mem_header *header;
2561 	enum qdf_mem_validation_bitmap error_bitmap;
2562 
2563 	if (is_initial_mem_debug_disabled) {
2564 		__qdf_mem_free_consistent(
2565 					  osdev, dev,
2566 					  size, vaddr,
2567 					  paddr, memctx);
2568 		return;
2569 	}
2570 
2571 	/* freeing a null pointer is valid */
2572 	if (qdf_unlikely(!vaddr))
2573 		return;
2574 
2575 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2576 
2577 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2578 	/* For DMA buffers we only add trailers, this function will retrieve
2579 	 * the header structure at the tail
2580 	 * Prefix the header into DMA buffer causes SMMU faults, so
2581 	 * do not prefix header into the DMA buffers
2582 	 */
2583 	header = qdf_mem_dma_get_header(vaddr, size);
2584 	error_bitmap = qdf_mem_header_validate(header, domain);
2585 	if (!error_bitmap) {
2586 		header->freed = true;
2587 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2588 				     &header->node);
2589 	}
2590 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2591 
2592 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2593 
2594 	qdf_mem_dma_dec(header->size);
2595 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2596 }
2597 qdf_export_symbol(qdf_mem_free_consistent_debug);
2598 #endif /* MEMORY_DEBUG */
2599 
2600 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2601 			       qdf_size_t size, void *vaddr,
2602 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2603 {
2604 	qdf_mem_dma_dec(size);
2605 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2606 }
2607 
2608 qdf_export_symbol(__qdf_mem_free_consistent);
2609 
2610 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2611 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2612 				 const char *func, uint32_t line)
2613 {
2614 	void *vaddr;
2615 
2616 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2617 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2618 			     size, func, line);
2619 		return NULL;
2620 	}
2621 
2622 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2623 
2624 	if (vaddr)
2625 		qdf_mem_dma_inc(size);
2626 
2627 	return vaddr;
2628 }
2629 
2630 qdf_export_symbol(__qdf_mem_alloc_consistent);
2631 
2632 void *qdf_aligned_mem_alloc_consistent_fl(
2633 	qdf_device_t osdev, uint32_t *size,
2634 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2635 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2636 	const char *func, uint32_t line)
2637 {
2638 	void *vaddr_aligned;
2639 	uint32_t align_alloc_size;
2640 
2641 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2642 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2643 	if (!*vaddr_unaligned) {
2644 		qdf_warn("Failed to alloc %uB @ %s:%d",
2645 			 *size, func, line);
2646 		return NULL;
2647 	}
2648 
2649 	/* Re-allocate additional bytes to align base address only if
2650 	 * above allocation returns unaligned address. Reason for
2651 	 * trying exact size allocation above is, OS tries to allocate
2652 	 * blocks of size power-of-2 pages and then free extra pages.
2653 	 * e.g., of a ring size of 1MB, the allocation below will
2654 	 * request 1MB plus 7 bytes for alignment, which will cause a
2655 	 * 2MB block allocation,and that is failing sometimes due to
2656 	 * memory fragmentation.
2657 	 */
2658 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2659 		align_alloc_size = *size + align - 1;
2660 
2661 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2662 					*vaddr_unaligned,
2663 					*paddr_unaligned, 0);
2664 
2665 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2666 				osdev, osdev->dev, align_alloc_size,
2667 				paddr_unaligned);
2668 		if (!*vaddr_unaligned) {
2669 			qdf_warn("Failed to alloc %uB @ %s:%d",
2670 				 align_alloc_size, func, line);
2671 			return NULL;
2672 		}
2673 
2674 		*size = align_alloc_size;
2675 	}
2676 
2677 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2678 			(unsigned long)(*paddr_unaligned), align);
2679 
2680 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2681 				 ((unsigned long)(*paddr_aligned) -
2682 				  (unsigned long)(*paddr_unaligned)));
2683 
2684 	return vaddr_aligned;
2685 }
2686 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2687 
2688 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2689 					qdf_dma_addr_t bus_addr,
2690 					qdf_size_t size,
2691 					enum dma_data_direction direction)
2692 {
2693 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2694 }
2695 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2696 
2697 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2698 				     qdf_dma_addr_t bus_addr,
2699 				     qdf_size_t size,
2700 				     enum dma_data_direction direction)
2701 {
2702 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2703 }
2704 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2705 
2706 void qdf_mem_init(void)
2707 {
2708 	qdf_mem_debug_init();
2709 	qdf_net_buf_debug_init();
2710 	qdf_frag_debug_init();
2711 	qdf_mem_debugfs_init();
2712 	qdf_mem_debug_debugfs_init();
2713 }
2714 qdf_export_symbol(qdf_mem_init);
2715 
2716 void qdf_mem_exit(void)
2717 {
2718 	qdf_mem_debug_debugfs_exit();
2719 	qdf_mem_debugfs_exit();
2720 	qdf_frag_debug_exit();
2721 	qdf_net_buf_debug_exit();
2722 	qdf_mem_debug_exit();
2723 }
2724 qdf_export_symbol(qdf_mem_exit);
2725 
2726 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2727 {
2728 	if ((!dst_addr) || (!src_addr)) {
2729 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2730 			  "%s called with NULL parameter, source:%pK destination:%pK",
2731 			  __func__, src_addr, dst_addr);
2732 		QDF_ASSERT(0);
2733 		return;
2734 	}
2735 	ether_addr_copy(dst_addr, src_addr);
2736 }
2737 qdf_export_symbol(qdf_ether_addr_copy);
2738 
2739 int32_t qdf_dma_mem_stats_read(void)
2740 {
2741 	return qdf_atomic_read(&qdf_mem_stat.dma);
2742 }
2743 
2744 qdf_export_symbol(qdf_dma_mem_stats_read);
2745 
2746 int32_t qdf_heap_mem_stats_read(void)
2747 {
2748 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2749 }
2750 
2751 qdf_export_symbol(qdf_heap_mem_stats_read);
2752 
2753 int32_t qdf_skb_mem_stats_read(void)
2754 {
2755 	return qdf_atomic_read(&qdf_mem_stat.skb);
2756 }
2757 
2758 qdf_export_symbol(qdf_skb_mem_stats_read);
2759 
2760 int32_t qdf_skb_total_mem_stats_read(void)
2761 {
2762 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2763 }
2764 
2765 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2766 
2767 int32_t qdf_skb_max_mem_stats_read(void)
2768 {
2769 	return qdf_mem_stat.skb_mem_max;
2770 }
2771 
2772 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2773 
2774 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2775 {
2776 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2777 }
2778 
2779 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2780 
2781 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2782 {
2783 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2784 }
2785 
2786 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2787 
2788 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2789 {
2790 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2791 }
2792 
2793 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2794 
2795 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2796 {
2797 	return qdf_mem_stat.dp_tx_skb_count_max;
2798 }
2799 
2800 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2801 
2802 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2803 {
2804 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2805 }
2806 
2807 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2808 
2809 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2810 {
2811 	return qdf_mem_stat.dp_rx_skb_count_max;
2812 }
2813 
2814 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2815 
2816 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2817 {
2818 	return qdf_mem_stat.dp_tx_skb_mem_max;
2819 }
2820 
2821 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2822 
2823 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2824 {
2825 	return qdf_mem_stat.dp_rx_skb_mem_max;
2826 }
2827 
2828 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2829 
2830 int32_t qdf_mem_tx_desc_cnt_read(void)
2831 {
2832 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2833 }
2834 
2835 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2836 
2837 int32_t qdf_mem_tx_desc_max_read(void)
2838 {
2839 	return qdf_mem_stat.tx_descs_max;
2840 }
2841 
2842 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2843 
2844 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2845 				int32_t tx_descs_max)
2846 {
2847 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2848 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2849 }
2850 
2851 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2852 
2853 void qdf_mem_stats_init(void)
2854 {
2855 	qdf_mem_stat.skb_mem_max = 0;
2856 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
2857 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
2858 	qdf_mem_stat.dp_tx_skb_count_max = 0;
2859 	qdf_mem_stat.dp_rx_skb_count_max = 0;
2860 	qdf_mem_stat.tx_descs_max = 0;
2861 }
2862 
2863 qdf_export_symbol(qdf_mem_stats_init);
2864 
2865 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
2866 {
2867 	void *ptr;
2868 
2869 	if (!size) {
2870 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
2871 		return NULL;
2872 	}
2873 
2874 	ptr = vzalloc(size);
2875 
2876 	return ptr;
2877 }
2878 
2879 qdf_export_symbol(__qdf_mem_valloc);
2880 
2881 void __qdf_mem_vfree(void *ptr)
2882 {
2883 	if (qdf_unlikely(!ptr))
2884 		return;
2885 
2886 	vfree(ptr);
2887 }
2888 
2889 qdf_export_symbol(__qdf_mem_vfree);
2890 
2891 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
2892 int
2893 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
2894 			  enum qdf_iommu_attr attr, void *data)
2895 {
2896 	return __qdf_iommu_domain_get_attr(domain, attr, data);
2897 }
2898 
2899 qdf_export_symbol(qdf_iommu_domain_get_attr);
2900 #endif
2901 
2902 #ifdef ENHANCED_OS_ABSTRACTION
2903 void qdf_update_mem_map_table(qdf_device_t osdev,
2904 			      qdf_mem_info_t *mem_info,
2905 			      qdf_dma_addr_t dma_addr,
2906 			      uint32_t mem_size)
2907 {
2908 	if (!mem_info) {
2909 		qdf_nofl_err("%s: NULL mem_info", __func__);
2910 		return;
2911 	}
2912 
2913 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
2914 }
2915 
2916 qdf_export_symbol(qdf_update_mem_map_table);
2917 
2918 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
2919 					  qdf_dma_addr_t dma_addr)
2920 {
2921 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2922 }
2923 
2924 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
2925 #endif
2926 
2927 #ifdef QCA_KMEM_CACHE_SUPPORT
2928 qdf_kmem_cache_t
2929 __qdf_kmem_cache_create(const char *cache_name,
2930 			qdf_size_t size)
2931 {
2932 	struct kmem_cache *cache;
2933 
2934 	cache = kmem_cache_create(cache_name, size,
2935 				  0, 0, NULL);
2936 
2937 	if (!cache)
2938 		return NULL;
2939 
2940 	return cache;
2941 }
2942 qdf_export_symbol(__qdf_kmem_cache_create);
2943 
2944 void
2945 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
2946 {
2947 	kmem_cache_destroy(cache);
2948 }
2949 
2950 qdf_export_symbol(__qdf_kmem_cache_destroy);
2951 
2952 void*
2953 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
2954 {
2955 	int flags = GFP_KERNEL;
2956 
2957 	if (in_interrupt() || irqs_disabled() || in_atomic())
2958 		flags = GFP_ATOMIC;
2959 
2960 	return kmem_cache_alloc(cache, flags);
2961 }
2962 
2963 qdf_export_symbol(__qdf_kmem_cache_alloc);
2964 
2965 void
2966 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
2967 
2968 {
2969 	kmem_cache_free(cache, node);
2970 }
2971 
2972 qdf_export_symbol(__qdf_kmem_cache_free);
2973 #else
2974 qdf_kmem_cache_t
2975 __qdf_kmem_cache_create(const char *cache_name,
2976 			qdf_size_t size)
2977 {
2978 	return NULL;
2979 }
2980 
2981 void
2982 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
2983 {
2984 }
2985 
2986 void *
2987 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
2988 {
2989 	return NULL;
2990 }
2991 
2992 void
2993 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
2994 {
2995 }
2996 #endif
2997