xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_str.h"
32 #include "qdf_talloc.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 #include <qdf_list.h>
37 
38 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
39 #include <net/cnss_prealloc.h>
40 #endif
41 
42 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
43 static bool mem_debug_disabled;
44 qdf_declare_param(mem_debug_disabled, bool);
45 qdf_export_symbol(mem_debug_disabled);
46 #endif
47 
48 #ifdef MEMORY_DEBUG
49 static bool is_initial_mem_debug_disabled;
50 #endif
51 
52 /* Preprocessor Definitions and Constants */
53 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
54 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
55 #define QDF_DEBUG_STRING_SIZE 512
56 
57 /**
58  * struct __qdf_mem_stat - qdf memory statistics
59  * @kmalloc: total kmalloc allocations
60  * @dma: total dma allocations
61  * @skb: total skb allocations
62  * @skb_total: total skb allocations in host driver
63  * @dp_tx_skb: total Tx skb allocations in datapath
64  * @dp_rx_skb: total Rx skb allocations in datapath
65  * @skb_mem_max: high watermark for skb allocations
66  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
67  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
68  * @dp_tx_skb_count: DP Tx buffer count
69  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
70  * @dp_rx_skb_count: DP Rx buffer count
71  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
72  * @tx_descs_outstanding: Current pending Tx descs count
73  * @tx_descs_max: High watermark for pending Tx descs count
74  */
75 static struct __qdf_mem_stat {
76 	qdf_atomic_t kmalloc;
77 	qdf_atomic_t dma;
78 	qdf_atomic_t skb;
79 	qdf_atomic_t skb_total;
80 	qdf_atomic_t dp_tx_skb;
81 	qdf_atomic_t dp_rx_skb;
82 	int32_t skb_mem_max;
83 	int32_t dp_tx_skb_mem_max;
84 	int32_t dp_rx_skb_mem_max;
85 	qdf_atomic_t dp_tx_skb_count;
86 	int32_t dp_tx_skb_count_max;
87 	qdf_atomic_t dp_rx_skb_count;
88 	int32_t dp_rx_skb_count_max;
89 	qdf_atomic_t tx_descs_outstanding;
90 	int32_t tx_descs_max;
91 } qdf_mem_stat;
92 
93 #ifdef MEMORY_DEBUG
94 #include "qdf_debug_domain.h"
95 
96 enum list_type {
97 	LIST_TYPE_MEM = 0,
98 	LIST_TYPE_DMA = 1,
99 	LIST_TYPE_NBUF = 2,
100 	LIST_TYPE_MAX,
101 };
102 
103 /**
104  * major_alloc_priv: private data registered to debugfs entry created to list
105  *                   the list major allocations
106  * @type:            type of the list to be parsed
107  * @threshold:       configured by user by overwriting the respective debugfs
108  *                   sys entry. This is to list the functions which requested
109  *                   memory/dma allocations more than threshold nubmer of times.
110  */
111 struct major_alloc_priv {
112 	enum list_type type;
113 	uint32_t threshold;
114 };
115 
116 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
117 static qdf_spinlock_t qdf_mem_list_lock;
118 
119 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
120 static qdf_spinlock_t qdf_mem_dma_list_lock;
121 
122 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
123 {
124 	return &qdf_mem_domains[domain];
125 }
126 
127 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
128 {
129 	return &qdf_mem_dma_domains[domain];
130 }
131 
132 /**
133  * struct qdf_mem_header - memory object to dubug
134  * @node: node to the list
135  * @domain: the active memory domain at time of allocation
136  * @freed: flag set during free, used to detect double frees
137  *	Use uint8_t so we can detect corruption
138  * @func: name of the function the allocation was made from
139  * @line: line number of the file the allocation was made from
140  * @size: size of the allocation in bytes
141  * @caller: Caller of the function for which memory is allocated
142  * @header: a known value, used to detect out-of-bounds access
143  * @time: timestamp at which allocation was made
144  */
145 struct qdf_mem_header {
146 	qdf_list_node_t node;
147 	enum qdf_debug_domain domain;
148 	uint8_t freed;
149 	char func[QDF_MEM_FUNC_NAME_SIZE];
150 	uint32_t line;
151 	uint32_t size;
152 	void *caller;
153 	uint64_t header;
154 	uint64_t time;
155 };
156 
157 /* align the qdf_mem_header to 8 bytes */
158 #define QDF_DMA_MEM_HEADER_ALIGN 8
159 
160 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
161 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
162 
163 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
164 {
165 	return (struct qdf_mem_header *)ptr - 1;
166 }
167 
168 /* make sure the header pointer is 8bytes aligned */
169 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
170 							    qdf_size_t size)
171 {
172 	return (struct qdf_mem_header *)
173 				qdf_roundup((size_t)((uint8_t *)ptr + size),
174 					    QDF_DMA_MEM_HEADER_ALIGN);
175 }
176 
177 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
178 {
179 	return (uint64_t *)((void *)(header + 1) + header->size);
180 }
181 
182 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
183 {
184 	return (void *)(header + 1);
185 }
186 
187 /* number of bytes needed for the qdf memory debug information */
188 #define QDF_MEM_DEBUG_SIZE \
189 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
190 
191 /* number of bytes needed for the qdf dma memory debug information */
192 #define QDF_DMA_MEM_DEBUG_SIZE \
193 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
194 
195 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
196 {
197 	QDF_BUG(header);
198 	if (!header)
199 		return;
200 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
201 }
202 
203 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
204 				const char *func, uint32_t line, void *caller)
205 {
206 	QDF_BUG(header);
207 	if (!header)
208 		return;
209 
210 	header->domain = qdf_debug_domain_get();
211 	header->freed = false;
212 
213 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
214 
215 	header->line = line;
216 	header->size = size;
217 	header->caller = caller;
218 	header->header = WLAN_MEM_HEADER;
219 	header->time = qdf_get_log_timestamp();
220 }
221 
222 enum qdf_mem_validation_bitmap {
223 	QDF_MEM_BAD_HEADER = 1 << 0,
224 	QDF_MEM_BAD_TRAILER = 1 << 1,
225 	QDF_MEM_BAD_SIZE = 1 << 2,
226 	QDF_MEM_DOUBLE_FREE = 1 << 3,
227 	QDF_MEM_BAD_FREED = 1 << 4,
228 	QDF_MEM_BAD_NODE = 1 << 5,
229 	QDF_MEM_BAD_DOMAIN = 1 << 6,
230 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
231 };
232 
233 static enum qdf_mem_validation_bitmap
234 qdf_mem_trailer_validate(struct qdf_mem_header *header)
235 {
236 	enum qdf_mem_validation_bitmap error_bitmap = 0;
237 
238 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
239 		error_bitmap |= QDF_MEM_BAD_TRAILER;
240 	return error_bitmap;
241 }
242 
243 static enum qdf_mem_validation_bitmap
244 qdf_mem_header_validate(struct qdf_mem_header *header,
245 			enum qdf_debug_domain domain)
246 {
247 	enum qdf_mem_validation_bitmap error_bitmap = 0;
248 
249 	if (header->header != WLAN_MEM_HEADER)
250 		error_bitmap |= QDF_MEM_BAD_HEADER;
251 
252 	if (header->size > QDF_MEM_MAX_MALLOC)
253 		error_bitmap |= QDF_MEM_BAD_SIZE;
254 
255 	if (header->freed == true)
256 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
257 	else if (header->freed)
258 		error_bitmap |= QDF_MEM_BAD_FREED;
259 
260 	if (!qdf_list_node_in_any_list(&header->node))
261 		error_bitmap |= QDF_MEM_BAD_NODE;
262 
263 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
264 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
265 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
266 	else if (header->domain != domain)
267 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
268 
269 	return error_bitmap;
270 }
271 
272 static void
273 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
274 			    enum qdf_debug_domain current_domain,
275 			    enum qdf_mem_validation_bitmap error_bitmap,
276 			    const char *func,
277 			    uint32_t line)
278 {
279 	if (!error_bitmap)
280 		return;
281 
282 	if (error_bitmap & QDF_MEM_BAD_HEADER)
283 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
284 			header->header, WLAN_MEM_HEADER);
285 
286 	if (error_bitmap & QDF_MEM_BAD_SIZE)
287 		qdf_err("Corrupted memory size %u (expected < %d)",
288 			header->size, QDF_MEM_MAX_MALLOC);
289 
290 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
291 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
292 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
293 
294 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
295 		qdf_err("Memory has previously been freed");
296 
297 	if (error_bitmap & QDF_MEM_BAD_FREED)
298 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
299 
300 	if (error_bitmap & QDF_MEM_BAD_NODE)
301 		qdf_err("Corrupted memory header node or double free");
302 
303 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
304 		qdf_err("Corrupted memory domain 0x%x", header->domain);
305 
306 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
307 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
308 			qdf_debug_domain_name(header->domain), header->domain,
309 			qdf_debug_domain_name(current_domain), current_domain);
310 
311 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
312 }
313 
314 /**
315  * struct __qdf_mem_info - memory statistics
316  * @func: the function which allocated memory
317  * @line: the line at which allocation happened
318  * @size: the size of allocation
319  * @caller: Address of the caller function
320  * @count: how many allocations of same type
321  * @time: timestamp at which allocation happened
322  */
323 struct __qdf_mem_info {
324 	char func[QDF_MEM_FUNC_NAME_SIZE];
325 	uint32_t line;
326 	uint32_t size;
327 	void *caller;
328 	uint32_t count;
329 	uint64_t time;
330 };
331 
332 /*
333  * The table depth defines the de-duplication proximity scope.
334  * A deeper table takes more time, so choose any optimum value.
335  */
336 #define QDF_MEM_STAT_TABLE_SIZE 8
337 
338 /**
339  * qdf_mem_debug_print_header() - memory debug header print logic
340  * @print: the print adapter function
341  * @print_priv: the private data to be consumed by @print
342  * @threshold: the threshold value set by user to list top allocations
343  *
344  * Return: None
345  */
346 static void qdf_mem_debug_print_header(qdf_abstract_print print,
347 				       void *print_priv,
348 				       uint32_t threshold)
349 {
350 	if (threshold)
351 		print(print_priv, "APIs requested allocations >= %u no of time",
352 		      threshold);
353 	print(print_priv,
354 	      "--------------------------------------------------------------");
355 	print(print_priv,
356 	      " count    size     total    filename     caller    timestamp");
357 	print(print_priv,
358 	      "--------------------------------------------------------------");
359 }
360 
361 /**
362  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
363  * @table: the memory metadata table to insert into
364  * @meta: the memory metadata to insert
365  *
366  * Return: true if the table is full after inserting, false otherwise
367  */
368 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
369 				      struct qdf_mem_header *meta)
370 {
371 	int i;
372 
373 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
374 		if (!table[i].count) {
375 			qdf_str_lcopy(table[i].func, meta->func,
376 				      QDF_MEM_FUNC_NAME_SIZE);
377 			table[i].line = meta->line;
378 			table[i].size = meta->size;
379 			table[i].count = 1;
380 			table[i].caller = meta->caller;
381 			table[i].time = meta->time;
382 			break;
383 		}
384 
385 		if (qdf_str_eq(table[i].func, meta->func) &&
386 		    table[i].line == meta->line &&
387 		    table[i].size == meta->size &&
388 		    table[i].caller == meta->caller) {
389 			table[i].count++;
390 			break;
391 		}
392 	}
393 
394 	/* return true if the table is now full */
395 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
396 }
397 
398 /**
399  * qdf_mem_domain_print() - output agnostic memory domain print logic
400  * @domain: the memory domain to print
401  * @print: the print adapter function
402  * @print_priv: the private data to be consumed by @print
403  * @threshold: the threshold value set by uset to list top allocations
404  * @mem_print: pointer to function which prints the memory allocation data
405  *
406  * Return: None
407  */
408 static void qdf_mem_domain_print(qdf_list_t *domain,
409 				 qdf_abstract_print print,
410 				 void *print_priv,
411 				 uint32_t threshold,
412 				 void (*mem_print)(struct __qdf_mem_info *,
413 						   qdf_abstract_print,
414 						   void *, uint32_t))
415 {
416 	QDF_STATUS status;
417 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
418 	qdf_list_node_t *node;
419 
420 	qdf_mem_zero(table, sizeof(table));
421 	qdf_mem_debug_print_header(print, print_priv, threshold);
422 
423 	/* hold lock while inserting to avoid use-after free of the metadata */
424 	qdf_spin_lock(&qdf_mem_list_lock);
425 	status = qdf_list_peek_front(domain, &node);
426 	while (QDF_IS_STATUS_SUCCESS(status)) {
427 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
428 		bool is_full = qdf_mem_meta_table_insert(table, meta);
429 
430 		qdf_spin_unlock(&qdf_mem_list_lock);
431 
432 		if (is_full) {
433 			(*mem_print)(table, print, print_priv, threshold);
434 			qdf_mem_zero(table, sizeof(table));
435 		}
436 
437 		qdf_spin_lock(&qdf_mem_list_lock);
438 		status = qdf_list_peek_next(domain, node, &node);
439 	}
440 	qdf_spin_unlock(&qdf_mem_list_lock);
441 
442 	(*mem_print)(table, print, print_priv, threshold);
443 }
444 
445 /**
446  * qdf_mem_meta_table_print() - memory metadata table print logic
447  * @table: the memory metadata table to print
448  * @print: the print adapter function
449  * @print_priv: the private data to be consumed by @print
450  * @threshold: the threshold value set by user to list top allocations
451  *
452  * Return: None
453  */
454 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
455 				     qdf_abstract_print print,
456 				     void *print_priv,
457 				     uint32_t threshold)
458 {
459 	int i;
460 	char debug_str[QDF_DEBUG_STRING_SIZE];
461 	size_t len = 0;
462 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
463 
464 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
465 			     "%s", debug_prefix);
466 
467 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
468 		if (!table[i].count)
469 			break;
470 
471 		print(print_priv,
472 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
473 		      table[i].count,
474 		      table[i].size,
475 		      table[i].count * table[i].size,
476 		      table[i].func,
477 		      table[i].line, table[i].caller,
478 		      table[i].time);
479 		len += qdf_scnprintf(debug_str + len,
480 				     sizeof(debug_str) - len,
481 				     " @ %s:%u %pS",
482 				     table[i].func,
483 				     table[i].line,
484 				     table[i].caller);
485 	}
486 	print(print_priv, "%s", debug_str);
487 }
488 
489 static int qdf_err_printer(void *priv, const char *fmt, ...)
490 {
491 	va_list args;
492 
493 	va_start(args, fmt);
494 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
495 	va_end(args);
496 
497 	return 0;
498 }
499 
500 #endif /* MEMORY_DEBUG */
501 
502 bool prealloc_disabled = 1;
503 qdf_declare_param(prealloc_disabled, bool);
504 qdf_export_symbol(prealloc_disabled);
505 
506 int qdf_mem_malloc_flags(void)
507 {
508 	if (in_interrupt() || irqs_disabled() || in_atomic())
509 		return GFP_ATOMIC;
510 
511 	return GFP_KERNEL;
512 }
513 
514 qdf_export_symbol(qdf_mem_malloc_flags);
515 
516 /**
517  * qdf_prealloc_disabled_config_get() - Get the user configuration of
518  *                                       prealloc_disabled
519  *
520  * Return: value of prealloc_disabled qdf module argument
521  */
522 bool qdf_prealloc_disabled_config_get(void)
523 {
524 	return prealloc_disabled;
525 }
526 
527 qdf_export_symbol(qdf_prealloc_disabled_config_get);
528 
529 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
530 /**
531  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
532  * @str_value: value of the module param
533  *
534  * This function will set qdf module param prealloc_disabled
535  *
536  * Return: QDF_STATUS_SUCCESS on Success
537  */
538 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
539 {
540 	QDF_STATUS status;
541 
542 	status = qdf_bool_parse(str_value, &prealloc_disabled);
543 	return status;
544 }
545 #endif
546 
547 #if defined WLAN_DEBUGFS
548 
549 /* Debugfs root directory for qdf_mem */
550 static struct dentry *qdf_mem_debugfs_root;
551 
552 #ifdef MEMORY_DEBUG
553 static int seq_printf_printer(void *priv, const char *fmt, ...)
554 {
555 	struct seq_file *file = priv;
556 	va_list args;
557 
558 	va_start(args, fmt);
559 	seq_vprintf(file, fmt, args);
560 	seq_puts(file, "\n");
561 	va_end(args);
562 
563 	return 0;
564 }
565 
566 /**
567  * qdf_print_major_alloc() - memory metadata table print logic
568  * @table: the memory metadata table to print
569  * @print: the print adapter function
570  * @print_priv: the private data to be consumed by @print
571  * @threshold: the threshold value set by uset to list top allocations
572  *
573  * Return: None
574  */
575 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
576 				  qdf_abstract_print print,
577 				  void *print_priv,
578 				  uint32_t threshold)
579 {
580 	int i;
581 
582 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
583 		if (!table[i].count)
584 			break;
585 		if (table[i].count >= threshold)
586 			print(print_priv,
587 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
588 			      table[i].count,
589 			      table[i].size,
590 			      table[i].count * table[i].size,
591 			      table[i].func,
592 			      table[i].line, table[i].caller,
593 			      table[i].time);
594 	}
595 }
596 
597 /**
598  * qdf_mem_seq_start() - sequential callback to start
599  * @seq: seq_file handle
600  * @pos: The start position of the sequence
601  *
602  * Return: iterator pointer, or NULL if iteration is complete
603  */
604 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
605 {
606 	enum qdf_debug_domain domain = *pos;
607 
608 	if (!qdf_debug_domain_valid(domain))
609 		return NULL;
610 
611 	/* just use the current position as our iterator */
612 	return pos;
613 }
614 
615 /**
616  * qdf_mem_seq_next() - next sequential callback
617  * @seq: seq_file handle
618  * @v: the current iterator
619  * @pos: the current position
620  *
621  * Get the next node and release previous node.
622  *
623  * Return: iterator pointer, or NULL if iteration is complete
624  */
625 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
626 {
627 	++*pos;
628 
629 	return qdf_mem_seq_start(seq, pos);
630 }
631 
632 /**
633  * qdf_mem_seq_stop() - stop sequential callback
634  * @seq: seq_file handle
635  * @v: current iterator
636  *
637  * Return: None
638  */
639 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
640 
641 /**
642  * qdf_mem_seq_show() - print sequential callback
643  * @seq: seq_file handle
644  * @v: current iterator
645  *
646  * Return: 0 - success
647  */
648 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
649 {
650 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
651 
652 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
653 		   qdf_debug_domain_name(domain_id), domain_id);
654 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
655 			     seq_printf_printer,
656 			     seq,
657 			     0,
658 			     qdf_mem_meta_table_print);
659 
660 	return 0;
661 }
662 
663 /* sequential file operation table */
664 static const struct seq_operations qdf_mem_seq_ops = {
665 	.start = qdf_mem_seq_start,
666 	.next  = qdf_mem_seq_next,
667 	.stop  = qdf_mem_seq_stop,
668 	.show  = qdf_mem_seq_show,
669 };
670 
671 
672 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
673 {
674 	return seq_open(file, &qdf_mem_seq_ops);
675 }
676 
677 /**
678  * qdf_major_alloc_show() - print sequential callback
679  * @seq: seq_file handle
680  * @v: current iterator
681  *
682  * Return: 0 - success
683  */
684 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
685 {
686 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
687 	struct major_alloc_priv *priv;
688 	qdf_list_t *list;
689 
690 	priv = (struct major_alloc_priv *)seq->private;
691 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
692 		   qdf_debug_domain_name(domain_id), domain_id);
693 
694 	switch (priv->type) {
695 	case LIST_TYPE_MEM:
696 		list = qdf_mem_list_get(domain_id);
697 		break;
698 	case LIST_TYPE_DMA:
699 		list = qdf_mem_dma_list(domain_id);
700 		break;
701 	default:
702 		list = NULL;
703 		break;
704 	}
705 
706 	if (list)
707 		qdf_mem_domain_print(list,
708 				     seq_printf_printer,
709 				     seq,
710 				     priv->threshold,
711 				     qdf_print_major_alloc);
712 
713 	return 0;
714 }
715 
716 /* sequential file operation table created to track major allocs */
717 static const struct seq_operations qdf_major_allocs_seq_ops = {
718 	.start = qdf_mem_seq_start,
719 	.next = qdf_mem_seq_next,
720 	.stop = qdf_mem_seq_stop,
721 	.show = qdf_major_alloc_show,
722 };
723 
724 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
725 {
726 	void *private = inode->i_private;
727 	struct seq_file *seq;
728 	int rc;
729 
730 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
731 	if (rc == 0) {
732 		seq = file->private_data;
733 		seq->private = private;
734 	}
735 	return rc;
736 }
737 
738 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
739 					     const char __user *user_buf,
740 					     size_t count,
741 					     loff_t *pos)
742 {
743 	char buf[32];
744 	ssize_t buf_size;
745 	uint32_t threshold;
746 	struct seq_file *seq = file->private_data;
747 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
748 
749 	buf_size = min(count, (sizeof(buf) - 1));
750 	if (buf_size <= 0)
751 		return 0;
752 	if (copy_from_user(buf, user_buf, buf_size))
753 		return -EFAULT;
754 	buf[buf_size] = '\0';
755 	if (!kstrtou32(buf, 10, &threshold))
756 		priv->threshold = threshold;
757 	return buf_size;
758 }
759 
760 /**
761  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
762  * @threshold: the threshold value set by uset to list top allocations
763  * @print: the print adapter function
764  * @print_priv: the private data to be consumed by @print
765  * @mem_print: pointer to function which prints the memory allocation data
766  *
767  * Return: None
768  */
769 static void
770 qdf_print_major_nbuf_allocs(uint32_t threshold,
771 			    qdf_abstract_print print,
772 			    void *print_priv,
773 			    void (*mem_print)(struct __qdf_mem_info *,
774 					      qdf_abstract_print,
775 					      void *, uint32_t))
776 {
777 	uint32_t nbuf_iter;
778 	unsigned long irq_flag = 0;
779 	QDF_NBUF_TRACK *p_node;
780 	QDF_NBUF_TRACK *p_prev;
781 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
782 	struct qdf_mem_header meta;
783 	bool is_full;
784 
785 	qdf_mem_zero(table, sizeof(table));
786 	qdf_mem_debug_print_header(print, print_priv, threshold);
787 
788 	if (is_initial_mem_debug_disabled)
789 		return;
790 
791 	qdf_rl_info("major nbuf print with threshold %u", threshold);
792 
793 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
794 	     nbuf_iter++) {
795 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
796 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
797 		while (p_node) {
798 			meta.line = p_node->line_num;
799 			meta.size = p_node->size;
800 			meta.caller = NULL;
801 			meta.time = p_node->time;
802 			qdf_str_lcopy(meta.func, p_node->func_name,
803 				      QDF_MEM_FUNC_NAME_SIZE);
804 
805 			is_full = qdf_mem_meta_table_insert(table, &meta);
806 
807 			if (is_full) {
808 				(*mem_print)(table, print,
809 					     print_priv, threshold);
810 				qdf_mem_zero(table, sizeof(table));
811 			}
812 
813 			p_prev = p_node;
814 			p_node = p_node->p_next;
815 		}
816 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
817 	}
818 
819 	(*mem_print)(table, print, print_priv, threshold);
820 
821 	qdf_rl_info("major nbuf print end");
822 }
823 
824 /**
825  * qdf_major_nbuf_alloc_show() - print sequential callback
826  * @seq: seq_file handle
827  * @v: current iterator
828  *
829  * Return: 0 - success
830  */
831 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
832 {
833 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
834 
835 	if (!priv) {
836 		qdf_err("priv is null");
837 		return -EINVAL;
838 	}
839 
840 	qdf_print_major_nbuf_allocs(priv->threshold,
841 				    seq_printf_printer,
842 				    seq,
843 				    qdf_print_major_alloc);
844 
845 	return 0;
846 }
847 
848 /**
849  * qdf_nbuf_seq_start() - sequential callback to start
850  * @seq: seq_file handle
851  * @pos: The start position of the sequence
852  *
853  * Return: iterator pointer, or NULL if iteration is complete
854  */
855 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
856 {
857 	enum qdf_debug_domain domain = *pos;
858 
859 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
860 		return NULL;
861 
862 	return pos;
863 }
864 
865 /**
866  * qdf_nbuf_seq_next() - next sequential callback
867  * @seq: seq_file handle
868  * @v: the current iterator
869  * @pos: the current position
870  *
871  * Get the next node and release previous node.
872  *
873  * Return: iterator pointer, or NULL if iteration is complete
874  */
875 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
876 {
877 	++*pos;
878 
879 	return qdf_nbuf_seq_start(seq, pos);
880 }
881 
882 /**
883  * qdf_nbuf_seq_stop() - stop sequential callback
884  * @seq: seq_file handle
885  * @v: current iterator
886  *
887  * Return: None
888  */
889 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
890 
891 /* sequential file operation table created to track major skb allocs */
892 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
893 	.start = qdf_nbuf_seq_start,
894 	.next = qdf_nbuf_seq_next,
895 	.stop = qdf_nbuf_seq_stop,
896 	.show = qdf_major_nbuf_alloc_show,
897 };
898 
899 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
900 {
901 	void *private = inode->i_private;
902 	struct seq_file *seq;
903 	int rc;
904 
905 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
906 	if (rc == 0) {
907 		seq = file->private_data;
908 		seq->private = private;
909 	}
910 	return rc;
911 }
912 
913 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
914 						  const char __user *user_buf,
915 						  size_t count,
916 						  loff_t *pos)
917 {
918 	char buf[32];
919 	ssize_t buf_size;
920 	uint32_t threshold;
921 	struct seq_file *seq = file->private_data;
922 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
923 
924 	buf_size = min(count, (sizeof(buf) - 1));
925 	if (buf_size <= 0)
926 		return 0;
927 	if (copy_from_user(buf, user_buf, buf_size))
928 		return -EFAULT;
929 	buf[buf_size] = '\0';
930 	if (!kstrtou32(buf, 10, &threshold))
931 		priv->threshold = threshold;
932 	return buf_size;
933 }
934 
935 /* file operation table for listing major allocs */
936 static const struct file_operations fops_qdf_major_allocs = {
937 	.owner = THIS_MODULE,
938 	.open = qdf_major_allocs_open,
939 	.read = seq_read,
940 	.llseek = seq_lseek,
941 	.release = seq_release,
942 	.write = qdf_major_alloc_set_threshold,
943 };
944 
945 /* debugfs file operation table */
946 static const struct file_operations fops_qdf_mem_debugfs = {
947 	.owner = THIS_MODULE,
948 	.open = qdf_mem_debugfs_open,
949 	.read = seq_read,
950 	.llseek = seq_lseek,
951 	.release = seq_release,
952 };
953 
954 /* file operation table for listing major allocs */
955 static const struct file_operations fops_qdf_nbuf_major_allocs = {
956 	.owner = THIS_MODULE,
957 	.open = qdf_major_nbuf_allocs_open,
958 	.read = seq_read,
959 	.llseek = seq_lseek,
960 	.release = seq_release,
961 	.write = qdf_major_nbuf_alloc_set_threshold,
962 };
963 
964 static struct major_alloc_priv mem_priv = {
965 	/* List type set to mem */
966 	LIST_TYPE_MEM,
967 	/* initial threshold to list APIs which allocates mem >= 50 times */
968 	50
969 };
970 
971 static struct major_alloc_priv dma_priv = {
972 	/* List type set to DMA */
973 	LIST_TYPE_DMA,
974 	/* initial threshold to list APIs which allocates dma >= 50 times */
975 	50
976 };
977 
978 static struct major_alloc_priv nbuf_priv = {
979 	/* List type set to NBUF */
980 	LIST_TYPE_NBUF,
981 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
982 	50
983 };
984 
985 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
986 {
987 	if (is_initial_mem_debug_disabled)
988 		return QDF_STATUS_SUCCESS;
989 
990 	if (!qdf_mem_debugfs_root)
991 		return QDF_STATUS_E_FAILURE;
992 
993 	debugfs_create_file("list",
994 			    S_IRUSR,
995 			    qdf_mem_debugfs_root,
996 			    NULL,
997 			    &fops_qdf_mem_debugfs);
998 
999 	debugfs_create_file("major_mem_allocs",
1000 			    0600,
1001 			    qdf_mem_debugfs_root,
1002 			    &mem_priv,
1003 			    &fops_qdf_major_allocs);
1004 
1005 	debugfs_create_file("major_dma_allocs",
1006 			    0600,
1007 			    qdf_mem_debugfs_root,
1008 			    &dma_priv,
1009 			    &fops_qdf_major_allocs);
1010 
1011 	debugfs_create_file("major_nbuf_allocs",
1012 			    0600,
1013 			    qdf_mem_debugfs_root,
1014 			    &nbuf_priv,
1015 			    &fops_qdf_nbuf_major_allocs);
1016 
1017 	return QDF_STATUS_SUCCESS;
1018 }
1019 
1020 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1021 {
1022 	return QDF_STATUS_SUCCESS;
1023 }
1024 
1025 #else /* MEMORY_DEBUG */
1026 
1027 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1028 {
1029 	return QDF_STATUS_E_NOSUPPORT;
1030 }
1031 
1032 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1033 {
1034 	return QDF_STATUS_E_NOSUPPORT;
1035 }
1036 
1037 #endif /* MEMORY_DEBUG */
1038 
1039 
1040 static void qdf_mem_debugfs_exit(void)
1041 {
1042 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1043 	qdf_mem_debugfs_root = NULL;
1044 }
1045 
1046 static QDF_STATUS qdf_mem_debugfs_init(void)
1047 {
1048 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1049 
1050 	if (!qdf_debugfs_root)
1051 		return QDF_STATUS_E_FAILURE;
1052 
1053 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1054 
1055 	if (!qdf_mem_debugfs_root)
1056 		return QDF_STATUS_E_FAILURE;
1057 
1058 
1059 	debugfs_create_atomic_t("kmalloc",
1060 				S_IRUSR,
1061 				qdf_mem_debugfs_root,
1062 				&qdf_mem_stat.kmalloc);
1063 
1064 	debugfs_create_atomic_t("dma",
1065 				S_IRUSR,
1066 				qdf_mem_debugfs_root,
1067 				&qdf_mem_stat.dma);
1068 
1069 	debugfs_create_atomic_t("skb",
1070 				S_IRUSR,
1071 				qdf_mem_debugfs_root,
1072 				&qdf_mem_stat.skb);
1073 
1074 	return QDF_STATUS_SUCCESS;
1075 }
1076 
1077 #else /* WLAN_DEBUGFS */
1078 
1079 static QDF_STATUS qdf_mem_debugfs_init(void)
1080 {
1081 	return QDF_STATUS_E_NOSUPPORT;
1082 }
1083 static void qdf_mem_debugfs_exit(void) {}
1084 
1085 
1086 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1087 {
1088 	return QDF_STATUS_E_NOSUPPORT;
1089 }
1090 
1091 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1092 {
1093 	return QDF_STATUS_E_NOSUPPORT;
1094 }
1095 
1096 #endif /* WLAN_DEBUGFS */
1097 
1098 void qdf_mem_kmalloc_inc(qdf_size_t size)
1099 {
1100 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1101 }
1102 
1103 static void qdf_mem_dma_inc(qdf_size_t size)
1104 {
1105 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1106 }
1107 
1108 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1109 void qdf_mem_skb_inc(qdf_size_t size)
1110 {
1111 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1112 }
1113 
1114 void qdf_mem_skb_dec(qdf_size_t size)
1115 {
1116 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1117 }
1118 
1119 void qdf_mem_skb_total_inc(qdf_size_t size)
1120 {
1121 	int32_t skb_mem_max = 0;
1122 
1123 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1124 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1125 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1126 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1127 }
1128 
1129 void qdf_mem_skb_total_dec(qdf_size_t size)
1130 {
1131 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1132 }
1133 
1134 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1135 {
1136 	int32_t curr_dp_tx_skb_mem_max = 0;
1137 
1138 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1139 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1140 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1141 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1142 }
1143 
1144 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1145 {
1146 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1147 }
1148 
1149 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1150 {
1151 	int32_t curr_dp_rx_skb_mem_max = 0;
1152 
1153 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1154 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1155 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1156 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1157 }
1158 
1159 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1160 {
1161 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1162 }
1163 
1164 void qdf_mem_dp_tx_skb_cnt_inc(void)
1165 {
1166 	int32_t curr_dp_tx_skb_count_max = 0;
1167 
1168 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1169 	curr_dp_tx_skb_count_max =
1170 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1171 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1172 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1173 }
1174 
1175 void qdf_mem_dp_tx_skb_cnt_dec(void)
1176 {
1177 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1178 }
1179 
1180 void qdf_mem_dp_rx_skb_cnt_inc(void)
1181 {
1182 	int32_t curr_dp_rx_skb_count_max = 0;
1183 
1184 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1185 	curr_dp_rx_skb_count_max =
1186 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1187 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1188 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1189 }
1190 
1191 void qdf_mem_dp_rx_skb_cnt_dec(void)
1192 {
1193 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1194 }
1195 #endif
1196 
1197 void qdf_mem_kmalloc_dec(qdf_size_t size)
1198 {
1199 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1200 }
1201 
1202 static inline void qdf_mem_dma_dec(qdf_size_t size)
1203 {
1204 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1205 }
1206 
1207 /**
1208  * __qdf_mempool_init() - Create and initialize memory pool
1209  *
1210  * @osdev: platform device object
1211  * @pool_addr: address of the pool created
1212  * @elem_cnt: no. of elements in pool
1213  * @elem_size: size of each pool element in bytes
1214  * @flags: flags
1215  *
1216  * return: Handle to memory pool or NULL if allocation failed
1217  */
1218 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1219 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1220 {
1221 	__qdf_mempool_ctxt_t *new_pool = NULL;
1222 	u_int32_t align = L1_CACHE_BYTES;
1223 	unsigned long aligned_pool_mem;
1224 	int pool_id;
1225 	int i;
1226 
1227 	if (prealloc_disabled) {
1228 		/* TBD: We can maintain a list of pools in qdf_device_t
1229 		 * to help debugging
1230 		 * when pre-allocation is not enabled
1231 		 */
1232 		new_pool = (__qdf_mempool_ctxt_t *)
1233 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1234 		if (!new_pool)
1235 			return QDF_STATUS_E_NOMEM;
1236 
1237 		memset(new_pool, 0, sizeof(*new_pool));
1238 		/* TBD: define flags for zeroing buffers etc */
1239 		new_pool->flags = flags;
1240 		new_pool->elem_size = elem_size;
1241 		new_pool->max_elem = elem_cnt;
1242 		*pool_addr = new_pool;
1243 		return 0;
1244 	}
1245 
1246 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1247 		if (!osdev->mem_pool[pool_id])
1248 			break;
1249 	}
1250 
1251 	if (pool_id == MAX_MEM_POOLS)
1252 		return -ENOMEM;
1253 
1254 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1255 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1256 	if (!new_pool)
1257 		return -ENOMEM;
1258 
1259 	memset(new_pool, 0, sizeof(*new_pool));
1260 	/* TBD: define flags for zeroing buffers etc */
1261 	new_pool->flags = flags;
1262 	new_pool->pool_id = pool_id;
1263 
1264 	/* Round up the element size to cacheline */
1265 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1266 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1267 				((align)?(align - 1):0);
1268 
1269 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1270 	if (!new_pool->pool_mem) {
1271 			/* TBD: Check if we need get_free_pages above */
1272 		kfree(new_pool);
1273 		osdev->mem_pool[pool_id] = NULL;
1274 		return -ENOMEM;
1275 	}
1276 
1277 	spin_lock_init(&new_pool->lock);
1278 
1279 	/* Initialize free list */
1280 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1281 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1282 	STAILQ_INIT(&new_pool->free_list);
1283 
1284 	for (i = 0; i < elem_cnt; i++)
1285 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1286 			(mempool_elem_t *)(aligned_pool_mem +
1287 			(new_pool->elem_size * i)), mempool_entry);
1288 
1289 
1290 	new_pool->free_cnt = elem_cnt;
1291 	*pool_addr = new_pool;
1292 	return 0;
1293 }
1294 qdf_export_symbol(__qdf_mempool_init);
1295 
1296 /**
1297  * __qdf_mempool_destroy() - Destroy memory pool
1298  * @osdev: platform device object
1299  * @Handle: to memory pool
1300  *
1301  * Returns: none
1302  */
1303 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1304 {
1305 	int pool_id = 0;
1306 
1307 	if (!pool)
1308 		return;
1309 
1310 	if (prealloc_disabled) {
1311 		kfree(pool);
1312 		return;
1313 	}
1314 
1315 	pool_id = pool->pool_id;
1316 
1317 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1318 	kfree(pool->pool_mem);
1319 	kfree(pool);
1320 	osdev->mem_pool[pool_id] = NULL;
1321 }
1322 qdf_export_symbol(__qdf_mempool_destroy);
1323 
1324 /**
1325  * __qdf_mempool_alloc() - Allocate an element memory pool
1326  *
1327  * @osdev: platform device object
1328  * @Handle: to memory pool
1329  *
1330  * Return: Pointer to the allocated element or NULL if the pool is empty
1331  */
1332 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1333 {
1334 	void *buf = NULL;
1335 
1336 	if (!pool)
1337 		return NULL;
1338 
1339 	if (prealloc_disabled)
1340 		return  qdf_mem_malloc(pool->elem_size);
1341 
1342 	spin_lock_bh(&pool->lock);
1343 
1344 	buf = STAILQ_FIRST(&pool->free_list);
1345 	if (buf) {
1346 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1347 		pool->free_cnt--;
1348 	}
1349 
1350 	/* TBD: Update free count if debug is enabled */
1351 	spin_unlock_bh(&pool->lock);
1352 
1353 	return buf;
1354 }
1355 qdf_export_symbol(__qdf_mempool_alloc);
1356 
1357 /**
1358  * __qdf_mempool_free() - Free a memory pool element
1359  * @osdev: Platform device object
1360  * @pool: Handle to memory pool
1361  * @buf: Element to be freed
1362  *
1363  * Returns: none
1364  */
1365 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1366 {
1367 	if (!pool)
1368 		return;
1369 
1370 
1371 	if (prealloc_disabled)
1372 		return qdf_mem_free(buf);
1373 
1374 	spin_lock_bh(&pool->lock);
1375 	pool->free_cnt++;
1376 
1377 	STAILQ_INSERT_TAIL
1378 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1379 	spin_unlock_bh(&pool->lock);
1380 }
1381 qdf_export_symbol(__qdf_mempool_free);
1382 
1383 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
1384 static bool qdf_might_be_prealloc(void *ptr)
1385 {
1386 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1387 		return true;
1388 	else
1389 		return false;
1390 }
1391 
1392 /**
1393  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1394  * @size: the number of bytes to allocate
1395  *
1396  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1397  * a chunk of pre-allocated memory. If size if less than or equal to
1398  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1399  *
1400  * Return: NULL on failure, non-NULL on success
1401  */
1402 static void *qdf_mem_prealloc_get(size_t size)
1403 {
1404 	void *ptr;
1405 
1406 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1407 		return NULL;
1408 
1409 	ptr = wcnss_prealloc_get(size);
1410 	if (!ptr)
1411 		return NULL;
1412 
1413 	memset(ptr, 0, size);
1414 
1415 	return ptr;
1416 }
1417 
1418 static inline bool qdf_mem_prealloc_put(void *ptr)
1419 {
1420 	return wcnss_prealloc_put(ptr);
1421 }
1422 #else
1423 static bool qdf_might_be_prealloc(void *ptr)
1424 {
1425 	return false;
1426 }
1427 
1428 static inline void *qdf_mem_prealloc_get(size_t size)
1429 {
1430 	return NULL;
1431 }
1432 
1433 static inline bool qdf_mem_prealloc_put(void *ptr)
1434 {
1435 	return false;
1436 }
1437 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1438 
1439 /* External Function implementation */
1440 #ifdef MEMORY_DEBUG
1441 /**
1442  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1443  *
1444  * Return: value of mem_debug_disabled qdf module argument
1445  */
1446 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1447 bool qdf_mem_debug_config_get(void)
1448 {
1449 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1450 	return false;
1451 }
1452 #else
1453 bool qdf_mem_debug_config_get(void)
1454 {
1455 	return mem_debug_disabled;
1456 }
1457 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1458 
1459 /**
1460  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
1461  * @str_value: value of the module param
1462  *
1463  * This function will se qdf module param mem_debug_disabled
1464  *
1465  * Return: QDF_STATUS_SUCCESS on Success
1466  */
1467 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1468 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1469 {
1470 	QDF_STATUS status;
1471 
1472 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1473 	return status;
1474 }
1475 #endif
1476 
1477 /**
1478  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1479  *
1480  * Return: none
1481  */
1482 static void qdf_mem_debug_init(void)
1483 {
1484 	int i;
1485 
1486 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1487 
1488 	if (is_initial_mem_debug_disabled)
1489 		return;
1490 
1491 	/* Initalizing the list with maximum size of 60000 */
1492 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1493 		qdf_list_create(&qdf_mem_domains[i], 60000);
1494 	qdf_spinlock_create(&qdf_mem_list_lock);
1495 
1496 	/* dma */
1497 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1498 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1499 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1500 }
1501 
1502 static uint32_t
1503 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1504 			       qdf_list_t *mem_list)
1505 {
1506 	if (is_initial_mem_debug_disabled)
1507 		return 0;
1508 
1509 	if (qdf_list_empty(mem_list))
1510 		return 0;
1511 
1512 	qdf_err("Memory leaks detected in %s domain!",
1513 		qdf_debug_domain_name(domain));
1514 	qdf_mem_domain_print(mem_list,
1515 			     qdf_err_printer,
1516 			     NULL,
1517 			     0,
1518 			     qdf_mem_meta_table_print);
1519 
1520 	return mem_list->count;
1521 }
1522 
1523 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1524 {
1525 	uint32_t leak_count = 0;
1526 	int i;
1527 
1528 	if (is_initial_mem_debug_disabled)
1529 		return;
1530 
1531 	/* detect and print leaks */
1532 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1533 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1534 
1535 	if (leak_count)
1536 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1537 				   leak_count);
1538 }
1539 
1540 /**
1541  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1542  *
1543  * Return: none
1544  */
1545 static void qdf_mem_debug_exit(void)
1546 {
1547 	int i;
1548 
1549 	if (is_initial_mem_debug_disabled)
1550 		return;
1551 
1552 	/* mem */
1553 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1554 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1555 		qdf_list_destroy(qdf_mem_list_get(i));
1556 
1557 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1558 
1559 	/* dma */
1560 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1561 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1562 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1563 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1564 }
1565 
1566 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1567 			   void *caller, uint32_t flag)
1568 {
1569 	QDF_STATUS status;
1570 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1571 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1572 	struct qdf_mem_header *header;
1573 	void *ptr;
1574 	unsigned long start, duration;
1575 
1576 	if (is_initial_mem_debug_disabled)
1577 		return __qdf_mem_malloc(size, func, line);
1578 
1579 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1580 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1581 		return NULL;
1582 	}
1583 
1584 	ptr = qdf_mem_prealloc_get(size);
1585 	if (ptr)
1586 		return ptr;
1587 
1588 	if (!flag)
1589 		flag = qdf_mem_malloc_flags();
1590 
1591 	start = qdf_mc_timer_get_system_time();
1592 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1593 	duration = qdf_mc_timer_get_system_time() - start;
1594 
1595 	if (duration > QDF_MEM_WARN_THRESHOLD)
1596 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1597 			 duration, size, func, line);
1598 
1599 	if (!header) {
1600 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1601 		return NULL;
1602 	}
1603 
1604 	qdf_mem_header_init(header, size, func, line, caller);
1605 	qdf_mem_trailer_init(header);
1606 	ptr = qdf_mem_get_ptr(header);
1607 
1608 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1609 	status = qdf_list_insert_front(mem_list, &header->node);
1610 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1611 	if (QDF_IS_STATUS_ERROR(status))
1612 		qdf_err("Failed to insert memory header; status %d", status);
1613 
1614 	qdf_mem_kmalloc_inc(ksize(header));
1615 
1616 	return ptr;
1617 }
1618 qdf_export_symbol(qdf_mem_malloc_debug);
1619 
1620 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1621 {
1622 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1623 	struct qdf_mem_header *header;
1624 	enum qdf_mem_validation_bitmap error_bitmap;
1625 
1626 	if (is_initial_mem_debug_disabled) {
1627 		__qdf_mem_free(ptr);
1628 		return;
1629 	}
1630 
1631 	/* freeing a null pointer is valid */
1632 	if (qdf_unlikely(!ptr))
1633 		return;
1634 
1635 	if (qdf_mem_prealloc_put(ptr))
1636 		return;
1637 
1638 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1639 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1640 				   ptr);
1641 
1642 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1643 
1644 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1645 	header = qdf_mem_get_header(ptr);
1646 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1647 	error_bitmap |= qdf_mem_trailer_validate(header);
1648 
1649 	if (!error_bitmap) {
1650 		header->freed = true;
1651 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1652 				     &header->node);
1653 	}
1654 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1655 
1656 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1657 				    func, line);
1658 
1659 	qdf_mem_kmalloc_dec(ksize(header));
1660 	kfree(header);
1661 }
1662 qdf_export_symbol(qdf_mem_free_debug);
1663 
1664 void qdf_mem_check_for_leaks(void)
1665 {
1666 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1667 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1668 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1669 	uint32_t leaks_count = 0;
1670 
1671 	if (is_initial_mem_debug_disabled)
1672 		return;
1673 
1674 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1675 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1676 
1677 	if (leaks_count)
1678 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1679 				   leaks_count);
1680 }
1681 
1682 /**
1683  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1684  * qdf_mem_multi_pages_alloc
1685  * @osdev: OS device handle pointer
1686  * @pages: Multi page information storage
1687  * @element_size: Each element size
1688  * @element_num: Total number of elements should be allocated
1689  * @memctxt: Memory context
1690  * @cacheable: Coherent memory or cacheable memory
1691  * @func: Caller of this allocator
1692  * @line: Line number of the caller
1693  * @caller: Return address of the caller
1694  *
1695  * This function will allocate large size of memory over multiple pages.
1696  * Large size of contiguous memory allocation will fail frequently, then
1697  * instead of allocate large memory by one shot, allocate through multiple, non
1698  * contiguous memory and combine pages when actual usage
1699  *
1700  * Return: None
1701  */
1702 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1703 				     struct qdf_mem_multi_page_t *pages,
1704 				     size_t element_size, uint16_t element_num,
1705 				     qdf_dma_context_t memctxt, bool cacheable,
1706 				     const char *func, uint32_t line,
1707 				     void *caller)
1708 {
1709 	uint16_t page_idx;
1710 	struct qdf_mem_dma_page_t *dma_pages;
1711 	void **cacheable_pages = NULL;
1712 	uint16_t i;
1713 
1714 	if (!pages->page_size)
1715 		pages->page_size = qdf_page_size;
1716 
1717 	pages->num_element_per_page = pages->page_size / element_size;
1718 	if (!pages->num_element_per_page) {
1719 		qdf_print("Invalid page %d or element size %d",
1720 			  (int)pages->page_size, (int)element_size);
1721 		goto out_fail;
1722 	}
1723 
1724 	pages->num_pages = element_num / pages->num_element_per_page;
1725 	if (element_num % pages->num_element_per_page)
1726 		pages->num_pages++;
1727 
1728 	if (cacheable) {
1729 		/* Pages information storage */
1730 		pages->cacheable_pages = qdf_mem_malloc_debug(
1731 			pages->num_pages * sizeof(pages->cacheable_pages),
1732 			func, line, caller, 0);
1733 		if (!pages->cacheable_pages)
1734 			goto out_fail;
1735 
1736 		cacheable_pages = pages->cacheable_pages;
1737 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1738 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1739 				pages->page_size, func, line, caller, 0);
1740 			if (!cacheable_pages[page_idx])
1741 				goto page_alloc_fail;
1742 		}
1743 		pages->dma_pages = NULL;
1744 	} else {
1745 		pages->dma_pages = qdf_mem_malloc_debug(
1746 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1747 			func, line, caller, 0);
1748 		if (!pages->dma_pages)
1749 			goto out_fail;
1750 
1751 		dma_pages = pages->dma_pages;
1752 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1753 			dma_pages->page_v_addr_start =
1754 				qdf_mem_alloc_consistent_debug(
1755 					osdev, osdev->dev, pages->page_size,
1756 					&dma_pages->page_p_addr,
1757 					func, line, caller);
1758 			if (!dma_pages->page_v_addr_start) {
1759 				qdf_print("dmaable page alloc fail pi %d",
1760 					  page_idx);
1761 				goto page_alloc_fail;
1762 			}
1763 			dma_pages->page_v_addr_end =
1764 				dma_pages->page_v_addr_start + pages->page_size;
1765 			dma_pages++;
1766 		}
1767 		pages->cacheable_pages = NULL;
1768 	}
1769 	return;
1770 
1771 page_alloc_fail:
1772 	if (cacheable) {
1773 		for (i = 0; i < page_idx; i++)
1774 			qdf_mem_free_debug(pages->cacheable_pages[i],
1775 					   func, line);
1776 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1777 	} else {
1778 		dma_pages = pages->dma_pages;
1779 		for (i = 0; i < page_idx; i++) {
1780 			qdf_mem_free_consistent_debug(
1781 				osdev, osdev->dev,
1782 				pages->page_size, dma_pages->page_v_addr_start,
1783 				dma_pages->page_p_addr, memctxt, func, line);
1784 			dma_pages++;
1785 		}
1786 		qdf_mem_free_debug(pages->dma_pages, func, line);
1787 	}
1788 
1789 out_fail:
1790 	pages->cacheable_pages = NULL;
1791 	pages->dma_pages = NULL;
1792 	pages->num_pages = 0;
1793 }
1794 
1795 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1796 
1797 /**
1798  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1799  * @osdev: OS device handle pointer
1800  * @pages: Multi page information storage
1801  * @memctxt: Memory context
1802  * @cacheable: Coherent memory or cacheable memory
1803  * @func: Caller of this allocator
1804  * @line: Line number of the caller
1805  *
1806  * This function will free large size of memory over multiple pages.
1807  *
1808  * Return: None
1809  */
1810 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1811 				    struct qdf_mem_multi_page_t *pages,
1812 				    qdf_dma_context_t memctxt, bool cacheable,
1813 				    const char *func, uint32_t line)
1814 {
1815 	unsigned int page_idx;
1816 	struct qdf_mem_dma_page_t *dma_pages;
1817 
1818 	if (!pages->page_size)
1819 		pages->page_size = qdf_page_size;
1820 
1821 	if (cacheable) {
1822 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1823 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1824 					   func, line);
1825 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1826 	} else {
1827 		dma_pages = pages->dma_pages;
1828 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1829 			qdf_mem_free_consistent_debug(
1830 				osdev, osdev->dev, pages->page_size,
1831 				dma_pages->page_v_addr_start,
1832 				dma_pages->page_p_addr, memctxt, func, line);
1833 			dma_pages++;
1834 		}
1835 		qdf_mem_free_debug(pages->dma_pages, func, line);
1836 	}
1837 
1838 	pages->cacheable_pages = NULL;
1839 	pages->dma_pages = NULL;
1840 	pages->num_pages = 0;
1841 }
1842 
1843 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1844 
1845 #else
1846 static void qdf_mem_debug_init(void) {}
1847 
1848 static void qdf_mem_debug_exit(void) {}
1849 
1850 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1851 {
1852 	void *ptr;
1853 
1854 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1855 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1856 			     line);
1857 		return NULL;
1858 	}
1859 
1860 	ptr = qdf_mem_prealloc_get(size);
1861 	if (ptr)
1862 		return ptr;
1863 
1864 	ptr = kzalloc(size, GFP_ATOMIC);
1865 	if (!ptr) {
1866 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1867 			      size, func, line);
1868 		return NULL;
1869 	}
1870 
1871 	qdf_mem_kmalloc_inc(ksize(ptr));
1872 
1873 	return ptr;
1874 }
1875 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1876 
1877 /**
1878  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1879  * @osdev: OS device handle pointer
1880  * @pages: Multi page information storage
1881  * @element_size: Each element size
1882  * @element_num: Total number of elements should be allocated
1883  * @memctxt: Memory context
1884  * @cacheable: Coherent memory or cacheable memory
1885  *
1886  * This function will allocate large size of memory over multiple pages.
1887  * Large size of contiguous memory allocation will fail frequently, then
1888  * instead of allocate large memory by one shot, allocate through multiple, non
1889  * contiguous memory and combine pages when actual usage
1890  *
1891  * Return: None
1892  */
1893 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1894 			       struct qdf_mem_multi_page_t *pages,
1895 			       size_t element_size, uint16_t element_num,
1896 			       qdf_dma_context_t memctxt, bool cacheable)
1897 {
1898 	uint16_t page_idx;
1899 	struct qdf_mem_dma_page_t *dma_pages;
1900 	void **cacheable_pages = NULL;
1901 	uint16_t i;
1902 
1903 	if (!pages->page_size)
1904 		pages->page_size = qdf_page_size;
1905 
1906 	pages->num_element_per_page = pages->page_size / element_size;
1907 	if (!pages->num_element_per_page) {
1908 		qdf_print("Invalid page %d or element size %d",
1909 			  (int)pages->page_size, (int)element_size);
1910 		goto out_fail;
1911 	}
1912 
1913 	pages->num_pages = element_num / pages->num_element_per_page;
1914 	if (element_num % pages->num_element_per_page)
1915 		pages->num_pages++;
1916 
1917 	if (cacheable) {
1918 		/* Pages information storage */
1919 		pages->cacheable_pages = qdf_mem_malloc(
1920 			pages->num_pages * sizeof(pages->cacheable_pages));
1921 		if (!pages->cacheable_pages)
1922 			goto out_fail;
1923 
1924 		cacheable_pages = pages->cacheable_pages;
1925 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1926 			cacheable_pages[page_idx] =
1927 				qdf_mem_malloc(pages->page_size);
1928 			if (!cacheable_pages[page_idx])
1929 				goto page_alloc_fail;
1930 		}
1931 		pages->dma_pages = NULL;
1932 	} else {
1933 		pages->dma_pages = qdf_mem_malloc(
1934 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1935 		if (!pages->dma_pages)
1936 			goto out_fail;
1937 
1938 		dma_pages = pages->dma_pages;
1939 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1940 			dma_pages->page_v_addr_start =
1941 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1942 					 pages->page_size,
1943 					&dma_pages->page_p_addr);
1944 			if (!dma_pages->page_v_addr_start) {
1945 				qdf_print("dmaable page alloc fail pi %d",
1946 					page_idx);
1947 				goto page_alloc_fail;
1948 			}
1949 			dma_pages->page_v_addr_end =
1950 				dma_pages->page_v_addr_start + pages->page_size;
1951 			dma_pages++;
1952 		}
1953 		pages->cacheable_pages = NULL;
1954 	}
1955 	return;
1956 
1957 page_alloc_fail:
1958 	if (cacheable) {
1959 		for (i = 0; i < page_idx; i++)
1960 			qdf_mem_free(pages->cacheable_pages[i]);
1961 		qdf_mem_free(pages->cacheable_pages);
1962 	} else {
1963 		dma_pages = pages->dma_pages;
1964 		for (i = 0; i < page_idx; i++) {
1965 			qdf_mem_free_consistent(
1966 				osdev, osdev->dev, pages->page_size,
1967 				dma_pages->page_v_addr_start,
1968 				dma_pages->page_p_addr, memctxt);
1969 			dma_pages++;
1970 		}
1971 		qdf_mem_free(pages->dma_pages);
1972 	}
1973 
1974 out_fail:
1975 	pages->cacheable_pages = NULL;
1976 	pages->dma_pages = NULL;
1977 	pages->num_pages = 0;
1978 	return;
1979 }
1980 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1981 
1982 /**
1983  * qdf_mem_multi_pages_free() - free large size of kernel memory
1984  * @osdev: OS device handle pointer
1985  * @pages: Multi page information storage
1986  * @memctxt: Memory context
1987  * @cacheable: Coherent memory or cacheable memory
1988  *
1989  * This function will free large size of memory over multiple pages.
1990  *
1991  * Return: None
1992  */
1993 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1994 			      struct qdf_mem_multi_page_t *pages,
1995 			      qdf_dma_context_t memctxt, bool cacheable)
1996 {
1997 	unsigned int page_idx;
1998 	struct qdf_mem_dma_page_t *dma_pages;
1999 
2000 	if (!pages->page_size)
2001 		pages->page_size = qdf_page_size;
2002 
2003 	if (cacheable) {
2004 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2005 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2006 		qdf_mem_free(pages->cacheable_pages);
2007 	} else {
2008 		dma_pages = pages->dma_pages;
2009 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2010 			qdf_mem_free_consistent(
2011 				osdev, osdev->dev, pages->page_size,
2012 				dma_pages->page_v_addr_start,
2013 				dma_pages->page_p_addr, memctxt);
2014 			dma_pages++;
2015 		}
2016 		qdf_mem_free(pages->dma_pages);
2017 	}
2018 
2019 	pages->cacheable_pages = NULL;
2020 	pages->dma_pages = NULL;
2021 	pages->num_pages = 0;
2022 	return;
2023 }
2024 qdf_export_symbol(qdf_mem_multi_pages_free);
2025 #endif
2026 
2027 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2028 			      bool cacheable)
2029 {
2030 	unsigned int page_idx;
2031 	struct qdf_mem_dma_page_t *dma_pages;
2032 
2033 	if (!pages->page_size)
2034 		pages->page_size = qdf_page_size;
2035 
2036 	if (cacheable) {
2037 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2038 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2039 				     pages->page_size);
2040 	} else {
2041 		dma_pages = pages->dma_pages;
2042 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2043 			qdf_mem_zero(dma_pages->page_v_addr_start,
2044 				     pages->page_size);
2045 			dma_pages++;
2046 		}
2047 	}
2048 }
2049 
2050 qdf_export_symbol(qdf_mem_multi_pages_zero);
2051 
2052 void __qdf_mem_free(void *ptr)
2053 {
2054 	if (!ptr)
2055 		return;
2056 
2057 	if (qdf_might_be_prealloc(ptr)) {
2058 		if (qdf_mem_prealloc_put(ptr))
2059 			return;
2060 	}
2061 
2062 	qdf_mem_kmalloc_dec(ksize(ptr));
2063 
2064 	kfree(ptr);
2065 }
2066 
2067 qdf_export_symbol(__qdf_mem_free);
2068 
2069 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2070 {
2071 	void *ptr;
2072 
2073 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2074 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2075 			     line);
2076 		return NULL;
2077 	}
2078 
2079 	ptr = qdf_mem_prealloc_get(size);
2080 	if (ptr)
2081 		return ptr;
2082 
2083 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2084 	if (!ptr)
2085 		return NULL;
2086 
2087 	qdf_mem_kmalloc_inc(ksize(ptr));
2088 
2089 	return ptr;
2090 }
2091 
2092 qdf_export_symbol(__qdf_mem_malloc);
2093 
2094 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2095 void __qdf_untracked_mem_free(void *ptr)
2096 {
2097 	if (!ptr)
2098 		return;
2099 
2100 	kfree(ptr);
2101 }
2102 
2103 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2104 {
2105 	void *ptr;
2106 
2107 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2108 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2109 			     line);
2110 		return NULL;
2111 	}
2112 
2113 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2114 	if (!ptr)
2115 		return NULL;
2116 
2117 	return ptr;
2118 }
2119 #endif
2120 
2121 void *qdf_aligned_malloc_fl(uint32_t *size,
2122 			    void **vaddr_unaligned,
2123 				qdf_dma_addr_t *paddr_unaligned,
2124 				qdf_dma_addr_t *paddr_aligned,
2125 				uint32_t align,
2126 			    const char *func, uint32_t line)
2127 {
2128 	void *vaddr_aligned;
2129 	uint32_t align_alloc_size;
2130 
2131 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2132 			line);
2133 	if (!*vaddr_unaligned) {
2134 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2135 		return NULL;
2136 	}
2137 
2138 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2139 
2140 	/* Re-allocate additional bytes to align base address only if
2141 	 * above allocation returns unaligned address. Reason for
2142 	 * trying exact size allocation above is, OS tries to allocate
2143 	 * blocks of size power-of-2 pages and then free extra pages.
2144 	 * e.g., of a ring size of 1MB, the allocation below will
2145 	 * request 1MB plus 7 bytes for alignment, which will cause a
2146 	 * 2MB block allocation,and that is failing sometimes due to
2147 	 * memory fragmentation.
2148 	 */
2149 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2150 		align_alloc_size = *size + align - 1;
2151 
2152 		qdf_mem_free(*vaddr_unaligned);
2153 		*vaddr_unaligned = qdf_mem_malloc_fl(
2154 				(qdf_size_t)align_alloc_size, func, line);
2155 		if (!*vaddr_unaligned) {
2156 			qdf_warn("Failed to alloc %uB @ %s:%d",
2157 				 align_alloc_size, func, line);
2158 			return NULL;
2159 		}
2160 
2161 		*paddr_unaligned = qdf_mem_virt_to_phys(
2162 				*vaddr_unaligned);
2163 		*size = align_alloc_size;
2164 	}
2165 
2166 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2167 		((unsigned long)(*paddr_unaligned), align);
2168 
2169 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2170 			((unsigned long)(*paddr_aligned) -
2171 			 (unsigned long)(*paddr_unaligned)));
2172 
2173 	return vaddr_aligned;
2174 }
2175 
2176 qdf_export_symbol(qdf_aligned_malloc_fl);
2177 
2178 /**
2179  * qdf_mem_multi_page_link() - Make links for multi page elements
2180  * @osdev: OS device handle pointer
2181  * @pages: Multi page information storage
2182  * @elem_size: Single element size
2183  * @elem_count: elements count should be linked
2184  * @cacheable: Coherent memory or cacheable memory
2185  *
2186  * This function will make links for multi page allocated structure
2187  *
2188  * Return: 0 success
2189  */
2190 int qdf_mem_multi_page_link(qdf_device_t osdev,
2191 		struct qdf_mem_multi_page_t *pages,
2192 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2193 {
2194 	uint16_t i, i_int;
2195 	void *page_info;
2196 	void **c_elem = NULL;
2197 	uint32_t num_link = 0;
2198 
2199 	for (i = 0; i < pages->num_pages; i++) {
2200 		if (cacheable)
2201 			page_info = pages->cacheable_pages[i];
2202 		else
2203 			page_info = pages->dma_pages[i].page_v_addr_start;
2204 
2205 		if (!page_info)
2206 			return -ENOMEM;
2207 
2208 		c_elem = (void **)page_info;
2209 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2210 			if (i_int == (pages->num_element_per_page - 1)) {
2211 				if ((i + 1) == pages->num_pages)
2212 					break;
2213 				if (cacheable)
2214 					*c_elem = pages->
2215 						cacheable_pages[i + 1];
2216 				else
2217 					*c_elem = pages->
2218 						dma_pages[i + 1].
2219 							page_v_addr_start;
2220 				num_link++;
2221 				break;
2222 			} else {
2223 				*c_elem =
2224 					(void *)(((char *)c_elem) + elem_size);
2225 			}
2226 			num_link++;
2227 			c_elem = (void **)*c_elem;
2228 
2229 			/* Last link established exit */
2230 			if (num_link == (elem_count - 1))
2231 				break;
2232 		}
2233 	}
2234 
2235 	if (c_elem)
2236 		*c_elem = NULL;
2237 
2238 	return 0;
2239 }
2240 qdf_export_symbol(qdf_mem_multi_page_link);
2241 
2242 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2243 {
2244 	/* special case where dst_addr or src_addr can be NULL */
2245 	if (!num_bytes)
2246 		return;
2247 
2248 	QDF_BUG(dst_addr);
2249 	QDF_BUG(src_addr);
2250 	if (!dst_addr || !src_addr)
2251 		return;
2252 
2253 	memcpy(dst_addr, src_addr, num_bytes);
2254 }
2255 qdf_export_symbol(qdf_mem_copy);
2256 
2257 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2258 {
2259 	qdf_shared_mem_t *shared_mem;
2260 	qdf_dma_addr_t dma_addr, paddr;
2261 	int ret;
2262 
2263 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2264 	if (!shared_mem)
2265 		return NULL;
2266 
2267 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2268 				size, qdf_mem_get_dma_addr_ptr(osdev,
2269 						&shared_mem->mem_info));
2270 	if (!shared_mem->vaddr) {
2271 		qdf_err("Unable to allocate DMA memory for shared resource");
2272 		qdf_mem_free(shared_mem);
2273 		return NULL;
2274 	}
2275 
2276 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2277 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2278 
2279 	qdf_mem_zero(shared_mem->vaddr, size);
2280 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2281 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2282 
2283 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2284 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2285 				      shared_mem->vaddr, dma_addr, size);
2286 	if (ret) {
2287 		qdf_err("Unable to get DMA sgtable");
2288 		qdf_mem_free_consistent(osdev, osdev->dev,
2289 					shared_mem->mem_info.size,
2290 					shared_mem->vaddr,
2291 					dma_addr,
2292 					qdf_get_dma_mem_context(shared_mem,
2293 								memctx));
2294 		qdf_mem_free(shared_mem);
2295 		return NULL;
2296 	}
2297 
2298 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2299 
2300 	return shared_mem;
2301 }
2302 
2303 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2304 
2305 /**
2306  * qdf_mem_copy_toio() - copy memory
2307  * @dst_addr: Pointer to destination memory location (to copy to)
2308  * @src_addr: Pointer to source memory location (to copy from)
2309  * @num_bytes: Number of bytes to copy.
2310  *
2311  * Return: none
2312  */
2313 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2314 {
2315 	if (0 == num_bytes) {
2316 		/* special case where dst_addr or src_addr can be NULL */
2317 		return;
2318 	}
2319 
2320 	if ((!dst_addr) || (!src_addr)) {
2321 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2322 			  "%s called with NULL parameter, source:%pK destination:%pK",
2323 			  __func__, src_addr, dst_addr);
2324 		QDF_ASSERT(0);
2325 		return;
2326 	}
2327 	memcpy_toio(dst_addr, src_addr, num_bytes);
2328 }
2329 
2330 qdf_export_symbol(qdf_mem_copy_toio);
2331 
2332 /**
2333  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2334  * @ptr: Pointer to memory that will be set
2335  * @value: Byte set in memory
2336  * @num_bytes: Number of bytes to be set
2337  *
2338  * Return: None
2339  */
2340 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2341 {
2342 	if (!ptr) {
2343 		qdf_print("%s called with NULL parameter ptr", __func__);
2344 		return;
2345 	}
2346 	memset_io(ptr, value, num_bytes);
2347 }
2348 
2349 qdf_export_symbol(qdf_mem_set_io);
2350 
2351 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2352 {
2353 	QDF_BUG(ptr);
2354 	if (!ptr)
2355 		return;
2356 
2357 	memset(ptr, value, num_bytes);
2358 }
2359 qdf_export_symbol(qdf_mem_set);
2360 
2361 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2362 {
2363 	/* special case where dst_addr or src_addr can be NULL */
2364 	if (!num_bytes)
2365 		return;
2366 
2367 	QDF_BUG(dst_addr);
2368 	QDF_BUG(src_addr);
2369 	if (!dst_addr || !src_addr)
2370 		return;
2371 
2372 	memmove(dst_addr, src_addr, num_bytes);
2373 }
2374 qdf_export_symbol(qdf_mem_move);
2375 
2376 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2377 {
2378 	QDF_BUG(left);
2379 	QDF_BUG(right);
2380 
2381 	return memcmp(left, right, size);
2382 }
2383 qdf_export_symbol(qdf_mem_cmp);
2384 
2385 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2386 /**
2387  * qdf_mem_dma_alloc() - allocates memory for dma
2388  * @osdev: OS device handle
2389  * @dev: Pointer to device handle
2390  * @size: Size to be allocated
2391  * @phy_addr: Physical address
2392  *
2393  * Return: pointer of allocated memory or null if memory alloc fails
2394  */
2395 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2396 				      qdf_size_t size,
2397 				      qdf_dma_addr_t *phy_addr)
2398 {
2399 	void *vaddr;
2400 
2401 	vaddr = qdf_mem_malloc(size);
2402 	*phy_addr = ((uintptr_t) vaddr);
2403 	/* using this type conversion to suppress "cast from pointer to integer
2404 	 * of different size" warning on some platforms
2405 	 */
2406 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2407 	return vaddr;
2408 }
2409 
2410 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2411 	!defined(QCA_WIFI_QCN9000)
2412 
2413 #define QCA8074_RAM_BASE 0x50000000
2414 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2415 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2416 			qdf_dma_addr_t *phy_addr)
2417 {
2418 	void *vaddr = NULL;
2419 	int i;
2420 
2421 	*phy_addr = 0;
2422 
2423 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2424 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2425 					   qdf_mem_malloc_flags());
2426 
2427 		if (!vaddr) {
2428 			qdf_err("%s failed , size: %zu!", __func__, size);
2429 			return NULL;
2430 		}
2431 
2432 		if (*phy_addr >= QCA8074_RAM_BASE)
2433 			return vaddr;
2434 
2435 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2436 	}
2437 
2438 	return NULL;
2439 }
2440 
2441 #else
2442 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2443 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2444 {
2445 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2446 }
2447 #endif
2448 
2449 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2450 static inline void
2451 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2452 {
2453 	qdf_mem_free(vaddr);
2454 }
2455 #else
2456 
2457 static inline void
2458 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2459 {
2460 	dma_free_coherent(dev, size, vaddr, paddr);
2461 }
2462 #endif
2463 
2464 #ifdef MEMORY_DEBUG
2465 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2466 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2467 				     const char *func, uint32_t line,
2468 				     void *caller)
2469 {
2470 	QDF_STATUS status;
2471 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2472 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2473 	struct qdf_mem_header *header;
2474 	void *vaddr;
2475 
2476 	if (is_initial_mem_debug_disabled)
2477 		return __qdf_mem_alloc_consistent(osdev, dev,
2478 						  size, paddr,
2479 						  func, line);
2480 
2481 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2482 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2483 		return NULL;
2484 	}
2485 
2486 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2487 				   paddr);
2488 
2489 	if (!vaddr) {
2490 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2491 		return NULL;
2492 	}
2493 
2494 	header = qdf_mem_dma_get_header(vaddr, size);
2495 	/* For DMA buffers we only add trailers, this function will init
2496 	 * the header structure at the tail
2497 	 * Prefix the header into DMA buffer causes SMMU faults, so
2498 	 * do not prefix header into the DMA buffers
2499 	 */
2500 	qdf_mem_header_init(header, size, func, line, caller);
2501 
2502 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2503 	status = qdf_list_insert_front(mem_list, &header->node);
2504 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2505 	if (QDF_IS_STATUS_ERROR(status))
2506 		qdf_err("Failed to insert memory header; status %d", status);
2507 
2508 	qdf_mem_dma_inc(size);
2509 
2510 	return vaddr;
2511 }
2512 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2513 
2514 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2515 				   qdf_size_t size, void *vaddr,
2516 				   qdf_dma_addr_t paddr,
2517 				   qdf_dma_context_t memctx,
2518 				   const char *func, uint32_t line)
2519 {
2520 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2521 	struct qdf_mem_header *header;
2522 	enum qdf_mem_validation_bitmap error_bitmap;
2523 
2524 	if (is_initial_mem_debug_disabled) {
2525 		__qdf_mem_free_consistent(
2526 					  osdev, dev,
2527 					  size, vaddr,
2528 					  paddr, memctx);
2529 		return;
2530 	}
2531 
2532 	/* freeing a null pointer is valid */
2533 	if (qdf_unlikely(!vaddr))
2534 		return;
2535 
2536 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2537 
2538 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2539 	/* For DMA buffers we only add trailers, this function will retrieve
2540 	 * the header structure at the tail
2541 	 * Prefix the header into DMA buffer causes SMMU faults, so
2542 	 * do not prefix header into the DMA buffers
2543 	 */
2544 	header = qdf_mem_dma_get_header(vaddr, size);
2545 	error_bitmap = qdf_mem_header_validate(header, domain);
2546 	if (!error_bitmap) {
2547 		header->freed = true;
2548 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2549 				     &header->node);
2550 	}
2551 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2552 
2553 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2554 
2555 	qdf_mem_dma_dec(header->size);
2556 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2557 }
2558 qdf_export_symbol(qdf_mem_free_consistent_debug);
2559 #endif /* MEMORY_DEBUG */
2560 
2561 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2562 			       qdf_size_t size, void *vaddr,
2563 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2564 {
2565 	qdf_mem_dma_dec(size);
2566 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2567 }
2568 
2569 qdf_export_symbol(__qdf_mem_free_consistent);
2570 
2571 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2572 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2573 				 const char *func, uint32_t line)
2574 {
2575 	void *vaddr;
2576 
2577 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2578 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2579 			     size, func, line);
2580 		return NULL;
2581 	}
2582 
2583 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2584 
2585 	if (vaddr)
2586 		qdf_mem_dma_inc(size);
2587 
2588 	return vaddr;
2589 }
2590 
2591 qdf_export_symbol(__qdf_mem_alloc_consistent);
2592 
2593 void *qdf_aligned_mem_alloc_consistent_fl(
2594 	qdf_device_t osdev, uint32_t *size,
2595 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2596 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2597 	const char *func, uint32_t line)
2598 {
2599 	void *vaddr_aligned;
2600 	uint32_t align_alloc_size;
2601 
2602 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2603 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2604 	if (!*vaddr_unaligned) {
2605 		qdf_warn("Failed to alloc %uB @ %s:%d",
2606 			 *size, func, line);
2607 		return NULL;
2608 	}
2609 
2610 	/* Re-allocate additional bytes to align base address only if
2611 	 * above allocation returns unaligned address. Reason for
2612 	 * trying exact size allocation above is, OS tries to allocate
2613 	 * blocks of size power-of-2 pages and then free extra pages.
2614 	 * e.g., of a ring size of 1MB, the allocation below will
2615 	 * request 1MB plus 7 bytes for alignment, which will cause a
2616 	 * 2MB block allocation,and that is failing sometimes due to
2617 	 * memory fragmentation.
2618 	 */
2619 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2620 		align_alloc_size = *size + align - 1;
2621 
2622 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2623 					*vaddr_unaligned,
2624 					*paddr_unaligned, 0);
2625 
2626 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2627 				osdev, osdev->dev, align_alloc_size,
2628 				paddr_unaligned);
2629 		if (!*vaddr_unaligned) {
2630 			qdf_warn("Failed to alloc %uB @ %s:%d",
2631 				 align_alloc_size, func, line);
2632 			return NULL;
2633 		}
2634 
2635 		*size = align_alloc_size;
2636 	}
2637 
2638 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2639 			(unsigned long)(*paddr_unaligned), align);
2640 
2641 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2642 				 ((unsigned long)(*paddr_aligned) -
2643 				  (unsigned long)(*paddr_unaligned)));
2644 
2645 	return vaddr_aligned;
2646 }
2647 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2648 
2649 /**
2650  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2651  * @osdev: OS device handle
2652  * @bus_addr: dma address to give to the device
2653  * @size: Size of the memory block
2654  * @direction: direction data will be DMAed
2655  *
2656  * Assign memory to the remote device.
2657  * The cache lines are flushed to ram or invalidated as needed.
2658  *
2659  * Return: none
2660  */
2661 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2662 					qdf_dma_addr_t bus_addr,
2663 					qdf_size_t size,
2664 					enum dma_data_direction direction)
2665 {
2666 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2667 }
2668 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2669 
2670 /**
2671  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2672  * @osdev: OS device handle
2673  * @bus_addr: dma address to give to the cpu
2674  * @size: Size of the memory block
2675  * @direction: direction data will be DMAed
2676  *
2677  * Assign memory to the CPU.
2678  *
2679  * Return: none
2680  */
2681 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2682 				     qdf_dma_addr_t bus_addr,
2683 				     qdf_size_t size,
2684 				     enum dma_data_direction direction)
2685 {
2686 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2687 }
2688 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2689 
2690 void qdf_mem_init(void)
2691 {
2692 	qdf_mem_debug_init();
2693 	qdf_net_buf_debug_init();
2694 	qdf_frag_debug_init();
2695 	qdf_mem_debugfs_init();
2696 	qdf_mem_debug_debugfs_init();
2697 }
2698 qdf_export_symbol(qdf_mem_init);
2699 
2700 void qdf_mem_exit(void)
2701 {
2702 	qdf_mem_debug_debugfs_exit();
2703 	qdf_mem_debugfs_exit();
2704 	qdf_frag_debug_exit();
2705 	qdf_net_buf_debug_exit();
2706 	qdf_mem_debug_exit();
2707 }
2708 qdf_export_symbol(qdf_mem_exit);
2709 
2710 /**
2711  * qdf_ether_addr_copy() - copy an Ethernet address
2712  *
2713  * @dst_addr: A six-byte array Ethernet address destination
2714  * @src_addr: A six-byte array Ethernet address source
2715  *
2716  * Please note: dst & src must both be aligned to u16.
2717  *
2718  * Return: none
2719  */
2720 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2721 {
2722 	if ((!dst_addr) || (!src_addr)) {
2723 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2724 			  "%s called with NULL parameter, source:%pK destination:%pK",
2725 			  __func__, src_addr, dst_addr);
2726 		QDF_ASSERT(0);
2727 		return;
2728 	}
2729 	ether_addr_copy(dst_addr, src_addr);
2730 }
2731 qdf_export_symbol(qdf_ether_addr_copy);
2732 
2733 int32_t qdf_dma_mem_stats_read(void)
2734 {
2735 	return qdf_atomic_read(&qdf_mem_stat.dma);
2736 }
2737 
2738 qdf_export_symbol(qdf_dma_mem_stats_read);
2739 
2740 int32_t qdf_heap_mem_stats_read(void)
2741 {
2742 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2743 }
2744 
2745 qdf_export_symbol(qdf_heap_mem_stats_read);
2746 
2747 int32_t qdf_skb_mem_stats_read(void)
2748 {
2749 	return qdf_atomic_read(&qdf_mem_stat.skb);
2750 }
2751 
2752 qdf_export_symbol(qdf_skb_mem_stats_read);
2753 
2754 int32_t qdf_skb_total_mem_stats_read(void)
2755 {
2756 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2757 }
2758 
2759 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2760 
2761 int32_t qdf_skb_max_mem_stats_read(void)
2762 {
2763 	return qdf_mem_stat.skb_mem_max;
2764 }
2765 
2766 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2767 
2768 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2769 {
2770 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2771 }
2772 
2773 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2774 
2775 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2776 {
2777 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2778 }
2779 
2780 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2781 
2782 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2783 {
2784 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2785 }
2786 
2787 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2788 
2789 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2790 {
2791 	return qdf_mem_stat.dp_tx_skb_count_max;
2792 }
2793 
2794 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2795 
2796 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2797 {
2798 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2799 }
2800 
2801 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2802 
2803 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2804 {
2805 	return qdf_mem_stat.dp_rx_skb_count_max;
2806 }
2807 
2808 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2809 
2810 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2811 {
2812 	return qdf_mem_stat.dp_tx_skb_mem_max;
2813 }
2814 
2815 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2816 
2817 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2818 {
2819 	return qdf_mem_stat.dp_rx_skb_mem_max;
2820 }
2821 
2822 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2823 
2824 int32_t qdf_mem_tx_desc_cnt_read(void)
2825 {
2826 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2827 }
2828 
2829 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2830 
2831 int32_t qdf_mem_tx_desc_max_read(void)
2832 {
2833 	return qdf_mem_stat.tx_descs_max;
2834 }
2835 
2836 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2837 
2838 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2839 				int32_t tx_descs_max)
2840 {
2841 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2842 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2843 }
2844 
2845 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2846 
2847 void qdf_mem_stats_init(void)
2848 {
2849 	qdf_mem_stat.skb_mem_max = 0;
2850 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
2851 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
2852 	qdf_mem_stat.dp_tx_skb_count_max = 0;
2853 	qdf_mem_stat.dp_rx_skb_count_max = 0;
2854 	qdf_mem_stat.tx_descs_max = 0;
2855 }
2856 
2857 qdf_export_symbol(qdf_mem_stats_init);
2858 
2859