xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * This file provides OS dependent memory management APIs
23  */
24 
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46 
47 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
48 static bool mem_debug_disabled;
49 qdf_declare_param(mem_debug_disabled, bool);
50 qdf_export_symbol(mem_debug_disabled);
51 #endif
52 
53 #ifdef MEMORY_DEBUG
54 static bool is_initial_mem_debug_disabled;
55 #endif
56 
57 /* Preprocessor Definitions and Constants */
58 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
59 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
60 #define QDF_DEBUG_STRING_SIZE 512
61 
62 /**
63  * struct __qdf_mem_stat - qdf memory statistics
64  * @kmalloc: total kmalloc allocations
65  * @dma: total dma allocations
66  * @skb: total skb allocations
67  * @skb_total: total skb allocations in host driver
68  * @dp_tx_skb: total Tx skb allocations in datapath
69  * @dp_rx_skb: total Rx skb allocations in datapath
70  * @skb_mem_max: high watermark for skb allocations
71  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
72  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
73  * @dp_tx_skb_count: DP Tx buffer count
74  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
75  * @dp_rx_skb_count: DP Rx buffer count
76  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
77  * @tx_descs_outstanding: Current pending Tx descs count
78  * @tx_descs_max: High watermark for pending Tx descs count
79  */
80 static struct __qdf_mem_stat {
81 	qdf_atomic_t kmalloc;
82 	qdf_atomic_t dma;
83 	qdf_atomic_t skb;
84 	qdf_atomic_t skb_total;
85 	qdf_atomic_t dp_tx_skb;
86 	qdf_atomic_t dp_rx_skb;
87 	int32_t skb_mem_max;
88 	int32_t dp_tx_skb_mem_max;
89 	int32_t dp_rx_skb_mem_max;
90 	qdf_atomic_t dp_tx_skb_count;
91 	int32_t dp_tx_skb_count_max;
92 	qdf_atomic_t dp_rx_skb_count;
93 	int32_t dp_rx_skb_count_max;
94 	qdf_atomic_t tx_descs_outstanding;
95 	int32_t tx_descs_max;
96 } qdf_mem_stat;
97 
98 #ifdef MEMORY_DEBUG
99 #include "qdf_debug_domain.h"
100 
101 enum list_type {
102 	LIST_TYPE_MEM = 0,
103 	LIST_TYPE_DMA = 1,
104 	LIST_TYPE_NBUF = 2,
105 	LIST_TYPE_MAX,
106 };
107 
108 /**
109  * major_alloc_priv: private data registered to debugfs entry created to list
110  *                   the list major allocations
111  * @type:            type of the list to be parsed
112  * @threshold:       configured by user by overwriting the respective debugfs
113  *                   sys entry. This is to list the functions which requested
114  *                   memory/dma allocations more than threshold nubmer of times.
115  */
116 struct major_alloc_priv {
117 	enum list_type type;
118 	uint32_t threshold;
119 };
120 
121 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
122 static qdf_spinlock_t qdf_mem_list_lock;
123 
124 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
125 static qdf_spinlock_t qdf_mem_dma_list_lock;
126 
127 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
128 {
129 	return &qdf_mem_domains[domain];
130 }
131 
132 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
133 {
134 	return &qdf_mem_dma_domains[domain];
135 }
136 
137 /**
138  * struct qdf_mem_header - memory object to dubug
139  * @node: node to the list
140  * @domain: the active memory domain at time of allocation
141  * @freed: flag set during free, used to detect double frees
142  *	Use uint8_t so we can detect corruption
143  * @func: name of the function the allocation was made from
144  * @line: line number of the file the allocation was made from
145  * @size: size of the allocation in bytes
146  * @caller: Caller of the function for which memory is allocated
147  * @header: a known value, used to detect out-of-bounds access
148  * @time: timestamp at which allocation was made
149  */
150 struct qdf_mem_header {
151 	qdf_list_node_t node;
152 	enum qdf_debug_domain domain;
153 	uint8_t freed;
154 	char func[QDF_MEM_FUNC_NAME_SIZE];
155 	uint32_t line;
156 	uint32_t size;
157 	void *caller;
158 	uint64_t header;
159 	uint64_t time;
160 };
161 
162 /* align the qdf_mem_header to 8 bytes */
163 #define QDF_DMA_MEM_HEADER_ALIGN 8
164 
165 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
166 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
167 
168 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
169 {
170 	return (struct qdf_mem_header *)ptr - 1;
171 }
172 
173 /* make sure the header pointer is 8bytes aligned */
174 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
175 							    qdf_size_t size)
176 {
177 	return (struct qdf_mem_header *)
178 				qdf_roundup((size_t)((uint8_t *)ptr + size),
179 					    QDF_DMA_MEM_HEADER_ALIGN);
180 }
181 
182 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
183 {
184 	return (uint64_t *)((void *)(header + 1) + header->size);
185 }
186 
187 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
188 {
189 	return (void *)(header + 1);
190 }
191 
192 /* number of bytes needed for the qdf memory debug information */
193 #define QDF_MEM_DEBUG_SIZE \
194 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
195 
196 /* number of bytes needed for the qdf dma memory debug information */
197 #define QDF_DMA_MEM_DEBUG_SIZE \
198 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
199 
200 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
201 {
202 	QDF_BUG(header);
203 	if (!header)
204 		return;
205 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
206 }
207 
208 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
209 				const char *func, uint32_t line, void *caller)
210 {
211 	QDF_BUG(header);
212 	if (!header)
213 		return;
214 
215 	header->domain = qdf_debug_domain_get();
216 	header->freed = false;
217 
218 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
219 
220 	header->line = line;
221 	header->size = size;
222 	header->caller = caller;
223 	header->header = WLAN_MEM_HEADER;
224 	header->time = qdf_get_log_timestamp();
225 }
226 
227 enum qdf_mem_validation_bitmap {
228 	QDF_MEM_BAD_HEADER = 1 << 0,
229 	QDF_MEM_BAD_TRAILER = 1 << 1,
230 	QDF_MEM_BAD_SIZE = 1 << 2,
231 	QDF_MEM_DOUBLE_FREE = 1 << 3,
232 	QDF_MEM_BAD_FREED = 1 << 4,
233 	QDF_MEM_BAD_NODE = 1 << 5,
234 	QDF_MEM_BAD_DOMAIN = 1 << 6,
235 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
236 };
237 
238 static enum qdf_mem_validation_bitmap
239 qdf_mem_trailer_validate(struct qdf_mem_header *header)
240 {
241 	enum qdf_mem_validation_bitmap error_bitmap = 0;
242 
243 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
244 		error_bitmap |= QDF_MEM_BAD_TRAILER;
245 	return error_bitmap;
246 }
247 
248 static enum qdf_mem_validation_bitmap
249 qdf_mem_header_validate(struct qdf_mem_header *header,
250 			enum qdf_debug_domain domain)
251 {
252 	enum qdf_mem_validation_bitmap error_bitmap = 0;
253 
254 	if (header->header != WLAN_MEM_HEADER)
255 		error_bitmap |= QDF_MEM_BAD_HEADER;
256 
257 	if (header->size > QDF_MEM_MAX_MALLOC)
258 		error_bitmap |= QDF_MEM_BAD_SIZE;
259 
260 	if (header->freed == true)
261 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
262 	else if (header->freed)
263 		error_bitmap |= QDF_MEM_BAD_FREED;
264 
265 	if (!qdf_list_node_in_any_list(&header->node))
266 		error_bitmap |= QDF_MEM_BAD_NODE;
267 
268 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
269 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
270 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
271 	else if (header->domain != domain)
272 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
273 
274 	return error_bitmap;
275 }
276 
277 static void
278 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
279 			    enum qdf_debug_domain current_domain,
280 			    enum qdf_mem_validation_bitmap error_bitmap,
281 			    const char *func,
282 			    uint32_t line)
283 {
284 	if (!error_bitmap)
285 		return;
286 
287 	if (error_bitmap & QDF_MEM_BAD_HEADER)
288 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
289 			header->header, WLAN_MEM_HEADER);
290 
291 	if (error_bitmap & QDF_MEM_BAD_SIZE)
292 		qdf_err("Corrupted memory size %u (expected < %d)",
293 			header->size, QDF_MEM_MAX_MALLOC);
294 
295 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
296 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
297 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
298 
299 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
300 		qdf_err("Memory has previously been freed");
301 
302 	if (error_bitmap & QDF_MEM_BAD_FREED)
303 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
304 
305 	if (error_bitmap & QDF_MEM_BAD_NODE)
306 		qdf_err("Corrupted memory header node or double free");
307 
308 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
309 		qdf_err("Corrupted memory domain 0x%x", header->domain);
310 
311 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
312 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
313 			qdf_debug_domain_name(header->domain), header->domain,
314 			qdf_debug_domain_name(current_domain), current_domain);
315 
316 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
317 }
318 
319 /**
320  * struct __qdf_mem_info - memory statistics
321  * @func: the function which allocated memory
322  * @line: the line at which allocation happened
323  * @size: the size of allocation
324  * @caller: Address of the caller function
325  * @count: how many allocations of same type
326  * @time: timestamp at which allocation happened
327  */
328 struct __qdf_mem_info {
329 	char func[QDF_MEM_FUNC_NAME_SIZE];
330 	uint32_t line;
331 	uint32_t size;
332 	void *caller;
333 	uint32_t count;
334 	uint64_t time;
335 };
336 
337 /*
338  * The table depth defines the de-duplication proximity scope.
339  * A deeper table takes more time, so choose any optimum value.
340  */
341 #define QDF_MEM_STAT_TABLE_SIZE 8
342 
343 /**
344  * qdf_mem_debug_print_header() - memory debug header print logic
345  * @print: the print adapter function
346  * @print_priv: the private data to be consumed by @print
347  * @threshold: the threshold value set by user to list top allocations
348  *
349  * Return: None
350  */
351 static void qdf_mem_debug_print_header(qdf_abstract_print print,
352 				       void *print_priv,
353 				       uint32_t threshold)
354 {
355 	if (threshold)
356 		print(print_priv, "APIs requested allocations >= %u no of time",
357 		      threshold);
358 	print(print_priv,
359 	      "--------------------------------------------------------------");
360 	print(print_priv,
361 	      " count    size     total    filename     caller    timestamp");
362 	print(print_priv,
363 	      "--------------------------------------------------------------");
364 }
365 
366 /**
367  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
368  * @table: the memory metadata table to insert into
369  * @meta: the memory metadata to insert
370  *
371  * Return: true if the table is full after inserting, false otherwise
372  */
373 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
374 				      struct qdf_mem_header *meta)
375 {
376 	int i;
377 
378 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
379 		if (!table[i].count) {
380 			qdf_str_lcopy(table[i].func, meta->func,
381 				      QDF_MEM_FUNC_NAME_SIZE);
382 			table[i].line = meta->line;
383 			table[i].size = meta->size;
384 			table[i].count = 1;
385 			table[i].caller = meta->caller;
386 			table[i].time = meta->time;
387 			break;
388 		}
389 
390 		if (qdf_str_eq(table[i].func, meta->func) &&
391 		    table[i].line == meta->line &&
392 		    table[i].size == meta->size &&
393 		    table[i].caller == meta->caller) {
394 			table[i].count++;
395 			break;
396 		}
397 	}
398 
399 	/* return true if the table is now full */
400 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
401 }
402 
403 /**
404  * qdf_mem_domain_print() - output agnostic memory domain print logic
405  * @domain: the memory domain to print
406  * @print: the print adapter function
407  * @print_priv: the private data to be consumed by @print
408  * @threshold: the threshold value set by uset to list top allocations
409  * @mem_print: pointer to function which prints the memory allocation data
410  *
411  * Return: None
412  */
413 static void qdf_mem_domain_print(qdf_list_t *domain,
414 				 qdf_abstract_print print,
415 				 void *print_priv,
416 				 uint32_t threshold,
417 				 void (*mem_print)(struct __qdf_mem_info *,
418 						   qdf_abstract_print,
419 						   void *, uint32_t))
420 {
421 	QDF_STATUS status;
422 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
423 	qdf_list_node_t *node;
424 
425 	qdf_mem_zero(table, sizeof(table));
426 	qdf_mem_debug_print_header(print, print_priv, threshold);
427 
428 	/* hold lock while inserting to avoid use-after free of the metadata */
429 	qdf_spin_lock(&qdf_mem_list_lock);
430 	status = qdf_list_peek_front(domain, &node);
431 	while (QDF_IS_STATUS_SUCCESS(status)) {
432 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
433 		bool is_full = qdf_mem_meta_table_insert(table, meta);
434 
435 		qdf_spin_unlock(&qdf_mem_list_lock);
436 
437 		if (is_full) {
438 			(*mem_print)(table, print, print_priv, threshold);
439 			qdf_mem_zero(table, sizeof(table));
440 		}
441 
442 		qdf_spin_lock(&qdf_mem_list_lock);
443 		status = qdf_list_peek_next(domain, node, &node);
444 	}
445 	qdf_spin_unlock(&qdf_mem_list_lock);
446 
447 	(*mem_print)(table, print, print_priv, threshold);
448 }
449 
450 /**
451  * qdf_mem_meta_table_print() - memory metadata table print logic
452  * @table: the memory metadata table to print
453  * @print: the print adapter function
454  * @print_priv: the private data to be consumed by @print
455  * @threshold: the threshold value set by user to list top allocations
456  *
457  * Return: None
458  */
459 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
460 				     qdf_abstract_print print,
461 				     void *print_priv,
462 				     uint32_t threshold)
463 {
464 	int i;
465 	char debug_str[QDF_DEBUG_STRING_SIZE];
466 	size_t len = 0;
467 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
468 
469 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
470 			     "%s", debug_prefix);
471 
472 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
473 		if (!table[i].count)
474 			break;
475 
476 		print(print_priv,
477 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
478 		      table[i].count,
479 		      table[i].size,
480 		      table[i].count * table[i].size,
481 		      table[i].func,
482 		      table[i].line, table[i].caller,
483 		      table[i].time);
484 		len += qdf_scnprintf(debug_str + len,
485 				     sizeof(debug_str) - len,
486 				     " @ %s:%u %pS",
487 				     table[i].func,
488 				     table[i].line,
489 				     table[i].caller);
490 	}
491 	print(print_priv, "%s", debug_str);
492 }
493 
494 static int qdf_err_printer(void *priv, const char *fmt, ...)
495 {
496 	va_list args;
497 
498 	va_start(args, fmt);
499 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
500 	va_end(args);
501 
502 	return 0;
503 }
504 
505 #endif /* MEMORY_DEBUG */
506 
507 bool prealloc_disabled = 1;
508 qdf_declare_param(prealloc_disabled, bool);
509 qdf_export_symbol(prealloc_disabled);
510 
511 int qdf_mem_malloc_flags(void)
512 {
513 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
514 		return GFP_ATOMIC;
515 
516 	return GFP_KERNEL;
517 }
518 
519 qdf_export_symbol(qdf_mem_malloc_flags);
520 
521 /**
522  * qdf_prealloc_disabled_config_get() - Get the user configuration of
523  *                                       prealloc_disabled
524  *
525  * Return: value of prealloc_disabled qdf module argument
526  */
527 bool qdf_prealloc_disabled_config_get(void)
528 {
529 	return prealloc_disabled;
530 }
531 
532 qdf_export_symbol(qdf_prealloc_disabled_config_get);
533 
534 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
535 /**
536  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
537  * @str_value: value of the module param
538  *
539  * This function will set qdf module param prealloc_disabled
540  *
541  * Return: QDF_STATUS_SUCCESS on Success
542  */
543 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
544 {
545 	QDF_STATUS status;
546 
547 	status = qdf_bool_parse(str_value, &prealloc_disabled);
548 	return status;
549 }
550 #endif
551 
552 #if defined WLAN_DEBUGFS
553 
554 /* Debugfs root directory for qdf_mem */
555 static struct dentry *qdf_mem_debugfs_root;
556 
557 #ifdef MEMORY_DEBUG
558 static int seq_printf_printer(void *priv, const char *fmt, ...)
559 {
560 	struct seq_file *file = priv;
561 	va_list args;
562 
563 	va_start(args, fmt);
564 	seq_vprintf(file, fmt, args);
565 	seq_puts(file, "\n");
566 	va_end(args);
567 
568 	return 0;
569 }
570 
571 /**
572  * qdf_print_major_alloc() - memory metadata table print logic
573  * @table: the memory metadata table to print
574  * @print: the print adapter function
575  * @print_priv: the private data to be consumed by @print
576  * @threshold: the threshold value set by uset to list top allocations
577  *
578  * Return: None
579  */
580 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
581 				  qdf_abstract_print print,
582 				  void *print_priv,
583 				  uint32_t threshold)
584 {
585 	int i;
586 
587 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
588 		if (!table[i].count)
589 			break;
590 		if (table[i].count >= threshold)
591 			print(print_priv,
592 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
593 			      table[i].count,
594 			      table[i].size,
595 			      table[i].count * table[i].size,
596 			      table[i].func,
597 			      table[i].line, table[i].caller,
598 			      table[i].time);
599 	}
600 }
601 
602 /**
603  * qdf_mem_seq_start() - sequential callback to start
604  * @seq: seq_file handle
605  * @pos: The start position of the sequence
606  *
607  * Return: iterator pointer, or NULL if iteration is complete
608  */
609 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
610 {
611 	enum qdf_debug_domain domain = *pos;
612 
613 	if (!qdf_debug_domain_valid(domain))
614 		return NULL;
615 
616 	/* just use the current position as our iterator */
617 	return pos;
618 }
619 
620 /**
621  * qdf_mem_seq_next() - next sequential callback
622  * @seq: seq_file handle
623  * @v: the current iterator
624  * @pos: the current position
625  *
626  * Get the next node and release previous node.
627  *
628  * Return: iterator pointer, or NULL if iteration is complete
629  */
630 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
631 {
632 	++*pos;
633 
634 	return qdf_mem_seq_start(seq, pos);
635 }
636 
637 /**
638  * qdf_mem_seq_stop() - stop sequential callback
639  * @seq: seq_file handle
640  * @v: current iterator
641  *
642  * Return: None
643  */
644 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
645 
646 /**
647  * qdf_mem_seq_show() - print sequential callback
648  * @seq: seq_file handle
649  * @v: current iterator
650  *
651  * Return: 0 - success
652  */
653 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
654 {
655 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
656 
657 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
658 		   qdf_debug_domain_name(domain_id), domain_id);
659 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
660 			     seq_printf_printer,
661 			     seq,
662 			     0,
663 			     qdf_mem_meta_table_print);
664 
665 	return 0;
666 }
667 
668 /* sequential file operation table */
669 static const struct seq_operations qdf_mem_seq_ops = {
670 	.start = qdf_mem_seq_start,
671 	.next  = qdf_mem_seq_next,
672 	.stop  = qdf_mem_seq_stop,
673 	.show  = qdf_mem_seq_show,
674 };
675 
676 
677 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
678 {
679 	return seq_open(file, &qdf_mem_seq_ops);
680 }
681 
682 /**
683  * qdf_major_alloc_show() - print sequential callback
684  * @seq: seq_file handle
685  * @v: current iterator
686  *
687  * Return: 0 - success
688  */
689 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
690 {
691 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
692 	struct major_alloc_priv *priv;
693 	qdf_list_t *list;
694 
695 	priv = (struct major_alloc_priv *)seq->private;
696 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
697 		   qdf_debug_domain_name(domain_id), domain_id);
698 
699 	switch (priv->type) {
700 	case LIST_TYPE_MEM:
701 		list = qdf_mem_list_get(domain_id);
702 		break;
703 	case LIST_TYPE_DMA:
704 		list = qdf_mem_dma_list(domain_id);
705 		break;
706 	default:
707 		list = NULL;
708 		break;
709 	}
710 
711 	if (list)
712 		qdf_mem_domain_print(list,
713 				     seq_printf_printer,
714 				     seq,
715 				     priv->threshold,
716 				     qdf_print_major_alloc);
717 
718 	return 0;
719 }
720 
721 /* sequential file operation table created to track major allocs */
722 static const struct seq_operations qdf_major_allocs_seq_ops = {
723 	.start = qdf_mem_seq_start,
724 	.next = qdf_mem_seq_next,
725 	.stop = qdf_mem_seq_stop,
726 	.show = qdf_major_alloc_show,
727 };
728 
729 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
730 {
731 	void *private = inode->i_private;
732 	struct seq_file *seq;
733 	int rc;
734 
735 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
736 	if (rc == 0) {
737 		seq = file->private_data;
738 		seq->private = private;
739 	}
740 	return rc;
741 }
742 
743 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
744 					     const char __user *user_buf,
745 					     size_t count,
746 					     loff_t *pos)
747 {
748 	char buf[32];
749 	ssize_t buf_size;
750 	uint32_t threshold;
751 	struct seq_file *seq = file->private_data;
752 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
753 
754 	buf_size = min(count, (sizeof(buf) - 1));
755 	if (buf_size <= 0)
756 		return 0;
757 	if (copy_from_user(buf, user_buf, buf_size))
758 		return -EFAULT;
759 	buf[buf_size] = '\0';
760 	if (!kstrtou32(buf, 10, &threshold))
761 		priv->threshold = threshold;
762 	return buf_size;
763 }
764 
765 /**
766  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
767  * @threshold: the threshold value set by uset to list top allocations
768  * @print: the print adapter function
769  * @print_priv: the private data to be consumed by @print
770  * @mem_print: pointer to function which prints the memory allocation data
771  *
772  * Return: None
773  */
774 static void
775 qdf_print_major_nbuf_allocs(uint32_t threshold,
776 			    qdf_abstract_print print,
777 			    void *print_priv,
778 			    void (*mem_print)(struct __qdf_mem_info *,
779 					      qdf_abstract_print,
780 					      void *, uint32_t))
781 {
782 	uint32_t nbuf_iter;
783 	unsigned long irq_flag = 0;
784 	QDF_NBUF_TRACK *p_node;
785 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
786 	struct qdf_mem_header meta;
787 	bool is_full;
788 
789 	qdf_mem_zero(table, sizeof(table));
790 	qdf_mem_debug_print_header(print, print_priv, threshold);
791 
792 	if (is_initial_mem_debug_disabled)
793 		return;
794 
795 	qdf_rl_info("major nbuf print with threshold %u", threshold);
796 
797 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
798 	     nbuf_iter++) {
799 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
800 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
801 		while (p_node) {
802 			meta.line = p_node->line_num;
803 			meta.size = p_node->size;
804 			meta.caller = NULL;
805 			meta.time = p_node->time;
806 			qdf_str_lcopy(meta.func, p_node->func_name,
807 				      QDF_MEM_FUNC_NAME_SIZE);
808 
809 			is_full = qdf_mem_meta_table_insert(table, &meta);
810 
811 			if (is_full) {
812 				(*mem_print)(table, print,
813 					     print_priv, threshold);
814 				qdf_mem_zero(table, sizeof(table));
815 			}
816 
817 			p_node = p_node->p_next;
818 		}
819 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
820 	}
821 
822 	(*mem_print)(table, print, print_priv, threshold);
823 
824 	qdf_rl_info("major nbuf print end");
825 }
826 
827 /**
828  * qdf_major_nbuf_alloc_show() - print sequential callback
829  * @seq: seq_file handle
830  * @v: current iterator
831  *
832  * Return: 0 - success
833  */
834 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
835 {
836 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
837 
838 	if (!priv) {
839 		qdf_err("priv is null");
840 		return -EINVAL;
841 	}
842 
843 	qdf_print_major_nbuf_allocs(priv->threshold,
844 				    seq_printf_printer,
845 				    seq,
846 				    qdf_print_major_alloc);
847 
848 	return 0;
849 }
850 
851 /**
852  * qdf_nbuf_seq_start() - sequential callback to start
853  * @seq: seq_file handle
854  * @pos: The start position of the sequence
855  *
856  * Return: iterator pointer, or NULL if iteration is complete
857  */
858 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
859 {
860 	enum qdf_debug_domain domain = *pos;
861 
862 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
863 		return NULL;
864 
865 	return pos;
866 }
867 
868 /**
869  * qdf_nbuf_seq_next() - next sequential callback
870  * @seq: seq_file handle
871  * @v: the current iterator
872  * @pos: the current position
873  *
874  * Get the next node and release previous node.
875  *
876  * Return: iterator pointer, or NULL if iteration is complete
877  */
878 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
879 {
880 	++*pos;
881 
882 	return qdf_nbuf_seq_start(seq, pos);
883 }
884 
885 /**
886  * qdf_nbuf_seq_stop() - stop sequential callback
887  * @seq: seq_file handle
888  * @v: current iterator
889  *
890  * Return: None
891  */
892 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
893 
894 /* sequential file operation table created to track major skb allocs */
895 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
896 	.start = qdf_nbuf_seq_start,
897 	.next = qdf_nbuf_seq_next,
898 	.stop = qdf_nbuf_seq_stop,
899 	.show = qdf_major_nbuf_alloc_show,
900 };
901 
902 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
903 {
904 	void *private = inode->i_private;
905 	struct seq_file *seq;
906 	int rc;
907 
908 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
909 	if (rc == 0) {
910 		seq = file->private_data;
911 		seq->private = private;
912 	}
913 	return rc;
914 }
915 
916 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
917 						  const char __user *user_buf,
918 						  size_t count,
919 						  loff_t *pos)
920 {
921 	char buf[32];
922 	ssize_t buf_size;
923 	uint32_t threshold;
924 	struct seq_file *seq = file->private_data;
925 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
926 
927 	buf_size = min(count, (sizeof(buf) - 1));
928 	if (buf_size <= 0)
929 		return 0;
930 	if (copy_from_user(buf, user_buf, buf_size))
931 		return -EFAULT;
932 	buf[buf_size] = '\0';
933 	if (!kstrtou32(buf, 10, &threshold))
934 		priv->threshold = threshold;
935 	return buf_size;
936 }
937 
938 /* file operation table for listing major allocs */
939 static const struct file_operations fops_qdf_major_allocs = {
940 	.owner = THIS_MODULE,
941 	.open = qdf_major_allocs_open,
942 	.read = seq_read,
943 	.llseek = seq_lseek,
944 	.release = seq_release,
945 	.write = qdf_major_alloc_set_threshold,
946 };
947 
948 /* debugfs file operation table */
949 static const struct file_operations fops_qdf_mem_debugfs = {
950 	.owner = THIS_MODULE,
951 	.open = qdf_mem_debugfs_open,
952 	.read = seq_read,
953 	.llseek = seq_lseek,
954 	.release = seq_release,
955 };
956 
957 /* file operation table for listing major allocs */
958 static const struct file_operations fops_qdf_nbuf_major_allocs = {
959 	.owner = THIS_MODULE,
960 	.open = qdf_major_nbuf_allocs_open,
961 	.read = seq_read,
962 	.llseek = seq_lseek,
963 	.release = seq_release,
964 	.write = qdf_major_nbuf_alloc_set_threshold,
965 };
966 
967 static struct major_alloc_priv mem_priv = {
968 	/* List type set to mem */
969 	LIST_TYPE_MEM,
970 	/* initial threshold to list APIs which allocates mem >= 50 times */
971 	50
972 };
973 
974 static struct major_alloc_priv dma_priv = {
975 	/* List type set to DMA */
976 	LIST_TYPE_DMA,
977 	/* initial threshold to list APIs which allocates dma >= 50 times */
978 	50
979 };
980 
981 static struct major_alloc_priv nbuf_priv = {
982 	/* List type set to NBUF */
983 	LIST_TYPE_NBUF,
984 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
985 	50
986 };
987 
988 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
989 {
990 	if (is_initial_mem_debug_disabled)
991 		return QDF_STATUS_SUCCESS;
992 
993 	if (!qdf_mem_debugfs_root)
994 		return QDF_STATUS_E_FAILURE;
995 
996 	debugfs_create_file("list",
997 			    S_IRUSR,
998 			    qdf_mem_debugfs_root,
999 			    NULL,
1000 			    &fops_qdf_mem_debugfs);
1001 
1002 	debugfs_create_file("major_mem_allocs",
1003 			    0600,
1004 			    qdf_mem_debugfs_root,
1005 			    &mem_priv,
1006 			    &fops_qdf_major_allocs);
1007 
1008 	debugfs_create_file("major_dma_allocs",
1009 			    0600,
1010 			    qdf_mem_debugfs_root,
1011 			    &dma_priv,
1012 			    &fops_qdf_major_allocs);
1013 
1014 	debugfs_create_file("major_nbuf_allocs",
1015 			    0600,
1016 			    qdf_mem_debugfs_root,
1017 			    &nbuf_priv,
1018 			    &fops_qdf_nbuf_major_allocs);
1019 
1020 	return QDF_STATUS_SUCCESS;
1021 }
1022 
1023 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1024 {
1025 	return QDF_STATUS_SUCCESS;
1026 }
1027 
1028 #else /* MEMORY_DEBUG */
1029 
1030 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1031 {
1032 	return QDF_STATUS_E_NOSUPPORT;
1033 }
1034 
1035 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1036 {
1037 	return QDF_STATUS_E_NOSUPPORT;
1038 }
1039 
1040 #endif /* MEMORY_DEBUG */
1041 
1042 
1043 static void qdf_mem_debugfs_exit(void)
1044 {
1045 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1046 	qdf_mem_debugfs_root = NULL;
1047 }
1048 
1049 static QDF_STATUS qdf_mem_debugfs_init(void)
1050 {
1051 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1052 
1053 	if (!qdf_debugfs_root)
1054 		return QDF_STATUS_E_FAILURE;
1055 
1056 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1057 
1058 	if (!qdf_mem_debugfs_root)
1059 		return QDF_STATUS_E_FAILURE;
1060 
1061 
1062 	debugfs_create_atomic_t("kmalloc",
1063 				S_IRUSR,
1064 				qdf_mem_debugfs_root,
1065 				&qdf_mem_stat.kmalloc);
1066 
1067 	debugfs_create_atomic_t("dma",
1068 				S_IRUSR,
1069 				qdf_mem_debugfs_root,
1070 				&qdf_mem_stat.dma);
1071 
1072 	debugfs_create_atomic_t("skb",
1073 				S_IRUSR,
1074 				qdf_mem_debugfs_root,
1075 				&qdf_mem_stat.skb);
1076 
1077 	return QDF_STATUS_SUCCESS;
1078 }
1079 
1080 #else /* WLAN_DEBUGFS */
1081 
1082 static QDF_STATUS qdf_mem_debugfs_init(void)
1083 {
1084 	return QDF_STATUS_E_NOSUPPORT;
1085 }
1086 static void qdf_mem_debugfs_exit(void) {}
1087 
1088 
1089 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1090 {
1091 	return QDF_STATUS_E_NOSUPPORT;
1092 }
1093 
1094 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1095 {
1096 	return QDF_STATUS_E_NOSUPPORT;
1097 }
1098 
1099 #endif /* WLAN_DEBUGFS */
1100 
1101 void qdf_mem_kmalloc_inc(qdf_size_t size)
1102 {
1103 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1104 }
1105 
1106 static void qdf_mem_dma_inc(qdf_size_t size)
1107 {
1108 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1109 }
1110 
1111 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1112 void qdf_mem_skb_inc(qdf_size_t size)
1113 {
1114 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1115 }
1116 
1117 void qdf_mem_skb_dec(qdf_size_t size)
1118 {
1119 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1120 }
1121 
1122 void qdf_mem_skb_total_inc(qdf_size_t size)
1123 {
1124 	int32_t skb_mem_max = 0;
1125 
1126 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1127 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1128 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1129 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1130 }
1131 
1132 void qdf_mem_skb_total_dec(qdf_size_t size)
1133 {
1134 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1135 }
1136 
1137 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1138 {
1139 	int32_t curr_dp_tx_skb_mem_max = 0;
1140 
1141 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1142 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1143 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1144 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1145 }
1146 
1147 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1148 {
1149 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1150 }
1151 
1152 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1153 {
1154 	int32_t curr_dp_rx_skb_mem_max = 0;
1155 
1156 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1157 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1158 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1159 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1160 }
1161 
1162 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1163 {
1164 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1165 }
1166 
1167 void qdf_mem_dp_tx_skb_cnt_inc(void)
1168 {
1169 	int32_t curr_dp_tx_skb_count_max = 0;
1170 
1171 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1172 	curr_dp_tx_skb_count_max =
1173 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1174 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1175 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1176 }
1177 
1178 void qdf_mem_dp_tx_skb_cnt_dec(void)
1179 {
1180 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1181 }
1182 
1183 void qdf_mem_dp_rx_skb_cnt_inc(void)
1184 {
1185 	int32_t curr_dp_rx_skb_count_max = 0;
1186 
1187 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1188 	curr_dp_rx_skb_count_max =
1189 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1190 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1191 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1192 }
1193 
1194 void qdf_mem_dp_rx_skb_cnt_dec(void)
1195 {
1196 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1197 }
1198 #endif
1199 
1200 void qdf_mem_kmalloc_dec(qdf_size_t size)
1201 {
1202 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1203 }
1204 
1205 static inline void qdf_mem_dma_dec(qdf_size_t size)
1206 {
1207 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1208 }
1209 
1210 /**
1211  * __qdf_mempool_init() - Create and initialize memory pool
1212  *
1213  * @osdev: platform device object
1214  * @pool_addr: address of the pool created
1215  * @elem_cnt: no. of elements in pool
1216  * @elem_size: size of each pool element in bytes
1217  * @flags: flags
1218  *
1219  * return: Handle to memory pool or NULL if allocation failed
1220  */
1221 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1222 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1223 {
1224 	__qdf_mempool_ctxt_t *new_pool = NULL;
1225 	u_int32_t align = L1_CACHE_BYTES;
1226 	unsigned long aligned_pool_mem;
1227 	int pool_id;
1228 	int i;
1229 
1230 	if (prealloc_disabled) {
1231 		/* TBD: We can maintain a list of pools in qdf_device_t
1232 		 * to help debugging
1233 		 * when pre-allocation is not enabled
1234 		 */
1235 		new_pool = (__qdf_mempool_ctxt_t *)
1236 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1237 		if (!new_pool)
1238 			return QDF_STATUS_E_NOMEM;
1239 
1240 		memset(new_pool, 0, sizeof(*new_pool));
1241 		/* TBD: define flags for zeroing buffers etc */
1242 		new_pool->flags = flags;
1243 		new_pool->elem_size = elem_size;
1244 		new_pool->max_elem = elem_cnt;
1245 		*pool_addr = new_pool;
1246 		return 0;
1247 	}
1248 
1249 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1250 		if (!osdev->mem_pool[pool_id])
1251 			break;
1252 	}
1253 
1254 	if (pool_id == MAX_MEM_POOLS)
1255 		return -ENOMEM;
1256 
1257 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1258 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1259 	if (!new_pool)
1260 		return -ENOMEM;
1261 
1262 	memset(new_pool, 0, sizeof(*new_pool));
1263 	/* TBD: define flags for zeroing buffers etc */
1264 	new_pool->flags = flags;
1265 	new_pool->pool_id = pool_id;
1266 
1267 	/* Round up the element size to cacheline */
1268 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1269 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1270 				((align)?(align - 1):0);
1271 
1272 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1273 	if (!new_pool->pool_mem) {
1274 			/* TBD: Check if we need get_free_pages above */
1275 		kfree(new_pool);
1276 		osdev->mem_pool[pool_id] = NULL;
1277 		return -ENOMEM;
1278 	}
1279 
1280 	spin_lock_init(&new_pool->lock);
1281 
1282 	/* Initialize free list */
1283 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1284 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1285 	STAILQ_INIT(&new_pool->free_list);
1286 
1287 	for (i = 0; i < elem_cnt; i++)
1288 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1289 			(mempool_elem_t *)(aligned_pool_mem +
1290 			(new_pool->elem_size * i)), mempool_entry);
1291 
1292 
1293 	new_pool->free_cnt = elem_cnt;
1294 	*pool_addr = new_pool;
1295 	return 0;
1296 }
1297 qdf_export_symbol(__qdf_mempool_init);
1298 
1299 /**
1300  * __qdf_mempool_destroy() - Destroy memory pool
1301  * @osdev: platform device object
1302  * @Handle: to memory pool
1303  *
1304  * Returns: none
1305  */
1306 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1307 {
1308 	int pool_id = 0;
1309 
1310 	if (!pool)
1311 		return;
1312 
1313 	if (prealloc_disabled) {
1314 		kfree(pool);
1315 		return;
1316 	}
1317 
1318 	pool_id = pool->pool_id;
1319 
1320 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1321 	kfree(pool->pool_mem);
1322 	kfree(pool);
1323 	osdev->mem_pool[pool_id] = NULL;
1324 }
1325 qdf_export_symbol(__qdf_mempool_destroy);
1326 
1327 /**
1328  * __qdf_mempool_alloc() - Allocate an element memory pool
1329  *
1330  * @osdev: platform device object
1331  * @Handle: to memory pool
1332  *
1333  * Return: Pointer to the allocated element or NULL if the pool is empty
1334  */
1335 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1336 {
1337 	void *buf = NULL;
1338 
1339 	if (!pool)
1340 		return NULL;
1341 
1342 	if (prealloc_disabled)
1343 		return  qdf_mem_malloc(pool->elem_size);
1344 
1345 	spin_lock_bh(&pool->lock);
1346 
1347 	buf = STAILQ_FIRST(&pool->free_list);
1348 	if (buf) {
1349 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1350 		pool->free_cnt--;
1351 	}
1352 
1353 	/* TBD: Update free count if debug is enabled */
1354 	spin_unlock_bh(&pool->lock);
1355 
1356 	return buf;
1357 }
1358 qdf_export_symbol(__qdf_mempool_alloc);
1359 
1360 /**
1361  * __qdf_mempool_free() - Free a memory pool element
1362  * @osdev: Platform device object
1363  * @pool: Handle to memory pool
1364  * @buf: Element to be freed
1365  *
1366  * Returns: none
1367  */
1368 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1369 {
1370 	if (!pool)
1371 		return;
1372 
1373 
1374 	if (prealloc_disabled)
1375 		return qdf_mem_free(buf);
1376 
1377 	spin_lock_bh(&pool->lock);
1378 	pool->free_cnt++;
1379 
1380 	STAILQ_INSERT_TAIL
1381 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1382 	spin_unlock_bh(&pool->lock);
1383 }
1384 qdf_export_symbol(__qdf_mempool_free);
1385 
1386 #ifdef CNSS_MEM_PRE_ALLOC
1387 static bool qdf_might_be_prealloc(void *ptr)
1388 {
1389 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1390 		return true;
1391 	else
1392 		return false;
1393 }
1394 
1395 /**
1396  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1397  * @size: the number of bytes to allocate
1398  *
1399  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1400  * a chunk of pre-allocated memory. If size if less than or equal to
1401  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1402  *
1403  * Return: NULL on failure, non-NULL on success
1404  */
1405 static void *qdf_mem_prealloc_get(size_t size)
1406 {
1407 	void *ptr;
1408 
1409 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1410 		return NULL;
1411 
1412 	ptr = wcnss_prealloc_get(size);
1413 	if (!ptr)
1414 		return NULL;
1415 
1416 	memset(ptr, 0, size);
1417 
1418 	return ptr;
1419 }
1420 
1421 static inline bool qdf_mem_prealloc_put(void *ptr)
1422 {
1423 	return wcnss_prealloc_put(ptr);
1424 }
1425 #else
1426 static bool qdf_might_be_prealloc(void *ptr)
1427 {
1428 	return false;
1429 }
1430 
1431 static inline void *qdf_mem_prealloc_get(size_t size)
1432 {
1433 	return NULL;
1434 }
1435 
1436 static inline bool qdf_mem_prealloc_put(void *ptr)
1437 {
1438 	return false;
1439 }
1440 #endif /* CNSS_MEM_PRE_ALLOC */
1441 
1442 /* External Function implementation */
1443 #ifdef MEMORY_DEBUG
1444 /**
1445  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1446  *
1447  * Return: value of mem_debug_disabled qdf module argument
1448  */
1449 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1450 bool qdf_mem_debug_config_get(void)
1451 {
1452 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1453 	return false;
1454 }
1455 #else
1456 bool qdf_mem_debug_config_get(void)
1457 {
1458 	return mem_debug_disabled;
1459 }
1460 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1461 
1462 /**
1463  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
1464  * @str_value: value of the module param
1465  *
1466  * This function will se qdf module param mem_debug_disabled
1467  *
1468  * Return: QDF_STATUS_SUCCESS on Success
1469  */
1470 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1471 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1472 {
1473 	QDF_STATUS status;
1474 
1475 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1476 	return status;
1477 }
1478 #endif
1479 
1480 /**
1481  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1482  *
1483  * Return: none
1484  */
1485 static void qdf_mem_debug_init(void)
1486 {
1487 	int i;
1488 
1489 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1490 
1491 	if (is_initial_mem_debug_disabled)
1492 		return;
1493 
1494 	/* Initalizing the list with maximum size of 60000 */
1495 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1496 		qdf_list_create(&qdf_mem_domains[i], 60000);
1497 	qdf_spinlock_create(&qdf_mem_list_lock);
1498 
1499 	/* dma */
1500 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1501 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1502 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1503 }
1504 
1505 static uint32_t
1506 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1507 			       qdf_list_t *mem_list)
1508 {
1509 	if (is_initial_mem_debug_disabled)
1510 		return 0;
1511 
1512 	if (qdf_list_empty(mem_list))
1513 		return 0;
1514 
1515 	qdf_err("Memory leaks detected in %s domain!",
1516 		qdf_debug_domain_name(domain));
1517 	qdf_mem_domain_print(mem_list,
1518 			     qdf_err_printer,
1519 			     NULL,
1520 			     0,
1521 			     qdf_mem_meta_table_print);
1522 
1523 	return mem_list->count;
1524 }
1525 
1526 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1527 {
1528 	uint32_t leak_count = 0;
1529 	int i;
1530 
1531 	if (is_initial_mem_debug_disabled)
1532 		return;
1533 
1534 	/* detect and print leaks */
1535 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1536 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1537 
1538 	if (leak_count)
1539 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1540 				   leak_count);
1541 }
1542 
1543 /**
1544  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1545  *
1546  * Return: none
1547  */
1548 static void qdf_mem_debug_exit(void)
1549 {
1550 	int i;
1551 
1552 	if (is_initial_mem_debug_disabled)
1553 		return;
1554 
1555 	/* mem */
1556 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1557 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1558 		qdf_list_destroy(qdf_mem_list_get(i));
1559 
1560 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1561 
1562 	/* dma */
1563 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1564 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1565 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1566 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1567 }
1568 
1569 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1570 			   void *caller, uint32_t flag)
1571 {
1572 	QDF_STATUS status;
1573 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1574 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1575 	struct qdf_mem_header *header;
1576 	void *ptr;
1577 	unsigned long start, duration;
1578 
1579 	if (is_initial_mem_debug_disabled)
1580 		return __qdf_mem_malloc(size, func, line);
1581 
1582 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1583 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1584 		return NULL;
1585 	}
1586 
1587 	ptr = qdf_mem_prealloc_get(size);
1588 	if (ptr)
1589 		return ptr;
1590 
1591 	if (!flag)
1592 		flag = qdf_mem_malloc_flags();
1593 
1594 	start = qdf_mc_timer_get_system_time();
1595 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1596 	duration = qdf_mc_timer_get_system_time() - start;
1597 
1598 	if (duration > QDF_MEM_WARN_THRESHOLD)
1599 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1600 			 duration, size, func, line);
1601 
1602 	if (!header) {
1603 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1604 		return NULL;
1605 	}
1606 
1607 	qdf_mem_header_init(header, size, func, line, caller);
1608 	qdf_mem_trailer_init(header);
1609 	ptr = qdf_mem_get_ptr(header);
1610 
1611 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1612 	status = qdf_list_insert_front(mem_list, &header->node);
1613 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1614 	if (QDF_IS_STATUS_ERROR(status))
1615 		qdf_err("Failed to insert memory header; status %d", status);
1616 
1617 	qdf_mem_kmalloc_inc(ksize(header));
1618 
1619 	return ptr;
1620 }
1621 qdf_export_symbol(qdf_mem_malloc_debug);
1622 
1623 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1624 				  uint32_t line, void *caller)
1625 {
1626 	QDF_STATUS status;
1627 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1628 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1629 	struct qdf_mem_header *header;
1630 	void *ptr;
1631 	unsigned long start, duration;
1632 
1633 	if (is_initial_mem_debug_disabled)
1634 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1635 
1636 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1637 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1638 		return NULL;
1639 	}
1640 
1641 	ptr = qdf_mem_prealloc_get(size);
1642 	if (ptr)
1643 		return ptr;
1644 
1645 	start = qdf_mc_timer_get_system_time();
1646 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1647 	duration = qdf_mc_timer_get_system_time() - start;
1648 
1649 	if (duration > QDF_MEM_WARN_THRESHOLD)
1650 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1651 			 duration, size, func, line);
1652 
1653 	if (!header) {
1654 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1655 		return NULL;
1656 	}
1657 
1658 	qdf_mem_header_init(header, size, func, line, caller);
1659 	qdf_mem_trailer_init(header);
1660 	ptr = qdf_mem_get_ptr(header);
1661 
1662 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1663 	status = qdf_list_insert_front(mem_list, &header->node);
1664 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1665 	if (QDF_IS_STATUS_ERROR(status))
1666 		qdf_err("Failed to insert memory header; status %d", status);
1667 
1668 	qdf_mem_kmalloc_inc(ksize(header));
1669 
1670 	return ptr;
1671 }
1672 
1673 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1674 
1675 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1676 				     uint32_t line)
1677 {
1678 	void *ptr;
1679 
1680 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1681 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1682 			     line);
1683 		return NULL;
1684 	}
1685 
1686 	ptr = qdf_mem_prealloc_get(size);
1687 	if (ptr)
1688 		return ptr;
1689 
1690 	ptr = kzalloc(size, GFP_ATOMIC);
1691 	if (!ptr) {
1692 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1693 			      size, func, line);
1694 		return NULL;
1695 	}
1696 
1697 	qdf_mem_kmalloc_inc(ksize(ptr));
1698 
1699 	return ptr;
1700 }
1701 
1702 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1703 
1704 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1705 {
1706 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1707 	struct qdf_mem_header *header;
1708 	enum qdf_mem_validation_bitmap error_bitmap;
1709 
1710 	if (is_initial_mem_debug_disabled) {
1711 		__qdf_mem_free(ptr);
1712 		return;
1713 	}
1714 
1715 	/* freeing a null pointer is valid */
1716 	if (qdf_unlikely(!ptr))
1717 		return;
1718 
1719 	if (qdf_mem_prealloc_put(ptr))
1720 		return;
1721 
1722 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1723 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1724 				   ptr);
1725 
1726 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1727 
1728 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1729 	header = qdf_mem_get_header(ptr);
1730 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1731 	error_bitmap |= qdf_mem_trailer_validate(header);
1732 
1733 	if (!error_bitmap) {
1734 		header->freed = true;
1735 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1736 				     &header->node);
1737 	}
1738 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1739 
1740 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1741 				    func, line);
1742 
1743 	qdf_mem_kmalloc_dec(ksize(header));
1744 	kfree(header);
1745 }
1746 qdf_export_symbol(qdf_mem_free_debug);
1747 
1748 void qdf_mem_check_for_leaks(void)
1749 {
1750 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1751 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1752 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1753 	uint32_t leaks_count = 0;
1754 
1755 	if (is_initial_mem_debug_disabled)
1756 		return;
1757 
1758 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1759 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1760 
1761 	if (leaks_count)
1762 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1763 				   leaks_count);
1764 }
1765 
1766 /**
1767  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1768  * qdf_mem_multi_pages_alloc
1769  * @osdev: OS device handle pointer
1770  * @pages: Multi page information storage
1771  * @element_size: Each element size
1772  * @element_num: Total number of elements should be allocated
1773  * @memctxt: Memory context
1774  * @cacheable: Coherent memory or cacheable memory
1775  * @func: Caller of this allocator
1776  * @line: Line number of the caller
1777  * @caller: Return address of the caller
1778  *
1779  * This function will allocate large size of memory over multiple pages.
1780  * Large size of contiguous memory allocation will fail frequently, then
1781  * instead of allocate large memory by one shot, allocate through multiple, non
1782  * contiguous memory and combine pages when actual usage
1783  *
1784  * Return: None
1785  */
1786 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1787 				     struct qdf_mem_multi_page_t *pages,
1788 				     size_t element_size, uint32_t element_num,
1789 				     qdf_dma_context_t memctxt, bool cacheable,
1790 				     const char *func, uint32_t line,
1791 				     void *caller)
1792 {
1793 	uint16_t page_idx;
1794 	struct qdf_mem_dma_page_t *dma_pages;
1795 	void **cacheable_pages = NULL;
1796 	uint16_t i;
1797 
1798 	if (!pages->page_size)
1799 		pages->page_size = qdf_page_size;
1800 
1801 	pages->num_element_per_page = pages->page_size / element_size;
1802 	if (!pages->num_element_per_page) {
1803 		qdf_print("Invalid page %d or element size %d",
1804 			  (int)pages->page_size, (int)element_size);
1805 		goto out_fail;
1806 	}
1807 
1808 	pages->num_pages = element_num / pages->num_element_per_page;
1809 	if (element_num % pages->num_element_per_page)
1810 		pages->num_pages++;
1811 
1812 	if (cacheable) {
1813 		/* Pages information storage */
1814 		pages->cacheable_pages = qdf_mem_malloc_debug(
1815 			pages->num_pages * sizeof(pages->cacheable_pages),
1816 			func, line, caller, 0);
1817 		if (!pages->cacheable_pages)
1818 			goto out_fail;
1819 
1820 		cacheable_pages = pages->cacheable_pages;
1821 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1822 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1823 				pages->page_size, func, line, caller, 0);
1824 			if (!cacheable_pages[page_idx])
1825 				goto page_alloc_fail;
1826 		}
1827 		pages->dma_pages = NULL;
1828 	} else {
1829 		pages->dma_pages = qdf_mem_malloc_debug(
1830 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1831 			func, line, caller, 0);
1832 		if (!pages->dma_pages)
1833 			goto out_fail;
1834 
1835 		dma_pages = pages->dma_pages;
1836 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1837 			dma_pages->page_v_addr_start =
1838 				qdf_mem_alloc_consistent_debug(
1839 					osdev, osdev->dev, pages->page_size,
1840 					&dma_pages->page_p_addr,
1841 					func, line, caller);
1842 			if (!dma_pages->page_v_addr_start) {
1843 				qdf_print("dmaable page alloc fail pi %d",
1844 					  page_idx);
1845 				goto page_alloc_fail;
1846 			}
1847 			dma_pages->page_v_addr_end =
1848 				dma_pages->page_v_addr_start + pages->page_size;
1849 			dma_pages++;
1850 		}
1851 		pages->cacheable_pages = NULL;
1852 	}
1853 	return;
1854 
1855 page_alloc_fail:
1856 	if (cacheable) {
1857 		for (i = 0; i < page_idx; i++)
1858 			qdf_mem_free_debug(pages->cacheable_pages[i],
1859 					   func, line);
1860 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1861 	} else {
1862 		dma_pages = pages->dma_pages;
1863 		for (i = 0; i < page_idx; i++) {
1864 			qdf_mem_free_consistent_debug(
1865 				osdev, osdev->dev,
1866 				pages->page_size, dma_pages->page_v_addr_start,
1867 				dma_pages->page_p_addr, memctxt, func, line);
1868 			dma_pages++;
1869 		}
1870 		qdf_mem_free_debug(pages->dma_pages, func, line);
1871 	}
1872 
1873 out_fail:
1874 	pages->cacheable_pages = NULL;
1875 	pages->dma_pages = NULL;
1876 	pages->num_pages = 0;
1877 }
1878 
1879 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1880 
1881 /**
1882  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1883  * @osdev: OS device handle pointer
1884  * @pages: Multi page information storage
1885  * @memctxt: Memory context
1886  * @cacheable: Coherent memory or cacheable memory
1887  * @func: Caller of this allocator
1888  * @line: Line number of the caller
1889  *
1890  * This function will free large size of memory over multiple pages.
1891  *
1892  * Return: None
1893  */
1894 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1895 				    struct qdf_mem_multi_page_t *pages,
1896 				    qdf_dma_context_t memctxt, bool cacheable,
1897 				    const char *func, uint32_t line)
1898 {
1899 	unsigned int page_idx;
1900 	struct qdf_mem_dma_page_t *dma_pages;
1901 
1902 	if (!pages->page_size)
1903 		pages->page_size = qdf_page_size;
1904 
1905 	if (cacheable) {
1906 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1907 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1908 					   func, line);
1909 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1910 	} else {
1911 		dma_pages = pages->dma_pages;
1912 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1913 			qdf_mem_free_consistent_debug(
1914 				osdev, osdev->dev, pages->page_size,
1915 				dma_pages->page_v_addr_start,
1916 				dma_pages->page_p_addr, memctxt, func, line);
1917 			dma_pages++;
1918 		}
1919 		qdf_mem_free_debug(pages->dma_pages, func, line);
1920 	}
1921 
1922 	pages->cacheable_pages = NULL;
1923 	pages->dma_pages = NULL;
1924 	pages->num_pages = 0;
1925 }
1926 
1927 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1928 
1929 #else
1930 static void qdf_mem_debug_init(void) {}
1931 
1932 static void qdf_mem_debug_exit(void) {}
1933 
1934 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1935 {
1936 	void *ptr;
1937 
1938 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1939 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1940 			     line);
1941 		return NULL;
1942 	}
1943 
1944 	ptr = qdf_mem_prealloc_get(size);
1945 	if (ptr)
1946 		return ptr;
1947 
1948 	ptr = kzalloc(size, GFP_ATOMIC);
1949 	if (!ptr) {
1950 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1951 			      size, func, line);
1952 		return NULL;
1953 	}
1954 
1955 	qdf_mem_kmalloc_inc(ksize(ptr));
1956 
1957 	return ptr;
1958 }
1959 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1960 
1961 /**
1962  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1963  * @osdev: OS device handle pointer
1964  * @pages: Multi page information storage
1965  * @element_size: Each element size
1966  * @element_num: Total number of elements should be allocated
1967  * @memctxt: Memory context
1968  * @cacheable: Coherent memory or cacheable memory
1969  *
1970  * This function will allocate large size of memory over multiple pages.
1971  * Large size of contiguous memory allocation will fail frequently, then
1972  * instead of allocate large memory by one shot, allocate through multiple, non
1973  * contiguous memory and combine pages when actual usage
1974  *
1975  * Return: None
1976  */
1977 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1978 			       struct qdf_mem_multi_page_t *pages,
1979 			       size_t element_size, uint32_t element_num,
1980 			       qdf_dma_context_t memctxt, bool cacheable)
1981 {
1982 	uint16_t page_idx;
1983 	struct qdf_mem_dma_page_t *dma_pages;
1984 	void **cacheable_pages = NULL;
1985 	uint16_t i;
1986 
1987 	if (!pages->page_size)
1988 		pages->page_size = qdf_page_size;
1989 
1990 	pages->num_element_per_page = pages->page_size / element_size;
1991 	if (!pages->num_element_per_page) {
1992 		qdf_print("Invalid page %d or element size %d",
1993 			  (int)pages->page_size, (int)element_size);
1994 		goto out_fail;
1995 	}
1996 
1997 	pages->num_pages = element_num / pages->num_element_per_page;
1998 	if (element_num % pages->num_element_per_page)
1999 		pages->num_pages++;
2000 
2001 	if (cacheable) {
2002 		/* Pages information storage */
2003 		pages->cacheable_pages = qdf_mem_malloc(
2004 			pages->num_pages * sizeof(pages->cacheable_pages));
2005 		if (!pages->cacheable_pages)
2006 			goto out_fail;
2007 
2008 		cacheable_pages = pages->cacheable_pages;
2009 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2010 			cacheable_pages[page_idx] =
2011 				qdf_mem_malloc(pages->page_size);
2012 			if (!cacheable_pages[page_idx])
2013 				goto page_alloc_fail;
2014 		}
2015 		pages->dma_pages = NULL;
2016 	} else {
2017 		pages->dma_pages = qdf_mem_malloc(
2018 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
2019 		if (!pages->dma_pages)
2020 			goto out_fail;
2021 
2022 		dma_pages = pages->dma_pages;
2023 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2024 			dma_pages->page_v_addr_start =
2025 				qdf_mem_alloc_consistent(osdev, osdev->dev,
2026 					 pages->page_size,
2027 					&dma_pages->page_p_addr);
2028 			if (!dma_pages->page_v_addr_start) {
2029 				qdf_print("dmaable page alloc fail pi %d",
2030 					page_idx);
2031 				goto page_alloc_fail;
2032 			}
2033 			dma_pages->page_v_addr_end =
2034 				dma_pages->page_v_addr_start + pages->page_size;
2035 			dma_pages++;
2036 		}
2037 		pages->cacheable_pages = NULL;
2038 	}
2039 	return;
2040 
2041 page_alloc_fail:
2042 	if (cacheable) {
2043 		for (i = 0; i < page_idx; i++)
2044 			qdf_mem_free(pages->cacheable_pages[i]);
2045 		qdf_mem_free(pages->cacheable_pages);
2046 	} else {
2047 		dma_pages = pages->dma_pages;
2048 		for (i = 0; i < page_idx; i++) {
2049 			qdf_mem_free_consistent(
2050 				osdev, osdev->dev, pages->page_size,
2051 				dma_pages->page_v_addr_start,
2052 				dma_pages->page_p_addr, memctxt);
2053 			dma_pages++;
2054 		}
2055 		qdf_mem_free(pages->dma_pages);
2056 	}
2057 
2058 out_fail:
2059 	pages->cacheable_pages = NULL;
2060 	pages->dma_pages = NULL;
2061 	pages->num_pages = 0;
2062 	return;
2063 }
2064 qdf_export_symbol(qdf_mem_multi_pages_alloc);
2065 
2066 /**
2067  * qdf_mem_multi_pages_free() - free large size of kernel memory
2068  * @osdev: OS device handle pointer
2069  * @pages: Multi page information storage
2070  * @memctxt: Memory context
2071  * @cacheable: Coherent memory or cacheable memory
2072  *
2073  * This function will free large size of memory over multiple pages.
2074  *
2075  * Return: None
2076  */
2077 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2078 			      struct qdf_mem_multi_page_t *pages,
2079 			      qdf_dma_context_t memctxt, bool cacheable)
2080 {
2081 	unsigned int page_idx;
2082 	struct qdf_mem_dma_page_t *dma_pages;
2083 
2084 	if (!pages->page_size)
2085 		pages->page_size = qdf_page_size;
2086 
2087 	if (cacheable) {
2088 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2089 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2090 		qdf_mem_free(pages->cacheable_pages);
2091 	} else {
2092 		dma_pages = pages->dma_pages;
2093 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2094 			qdf_mem_free_consistent(
2095 				osdev, osdev->dev, pages->page_size,
2096 				dma_pages->page_v_addr_start,
2097 				dma_pages->page_p_addr, memctxt);
2098 			dma_pages++;
2099 		}
2100 		qdf_mem_free(pages->dma_pages);
2101 	}
2102 
2103 	pages->cacheable_pages = NULL;
2104 	pages->dma_pages = NULL;
2105 	pages->num_pages = 0;
2106 	return;
2107 }
2108 qdf_export_symbol(qdf_mem_multi_pages_free);
2109 #endif
2110 
2111 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2112 			      bool cacheable)
2113 {
2114 	unsigned int page_idx;
2115 	struct qdf_mem_dma_page_t *dma_pages;
2116 
2117 	if (!pages->page_size)
2118 		pages->page_size = qdf_page_size;
2119 
2120 	if (cacheable) {
2121 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2122 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2123 				     pages->page_size);
2124 	} else {
2125 		dma_pages = pages->dma_pages;
2126 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2127 			qdf_mem_zero(dma_pages->page_v_addr_start,
2128 				     pages->page_size);
2129 			dma_pages++;
2130 		}
2131 	}
2132 }
2133 
2134 qdf_export_symbol(qdf_mem_multi_pages_zero);
2135 
2136 void __qdf_mem_free(void *ptr)
2137 {
2138 	if (!ptr)
2139 		return;
2140 
2141 	if (qdf_might_be_prealloc(ptr)) {
2142 		if (qdf_mem_prealloc_put(ptr))
2143 			return;
2144 	}
2145 
2146 	qdf_mem_kmalloc_dec(ksize(ptr));
2147 
2148 	kfree(ptr);
2149 }
2150 
2151 qdf_export_symbol(__qdf_mem_free);
2152 
2153 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2154 {
2155 	void *ptr;
2156 
2157 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2158 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2159 			     line);
2160 		return NULL;
2161 	}
2162 
2163 	ptr = qdf_mem_prealloc_get(size);
2164 	if (ptr)
2165 		return ptr;
2166 
2167 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2168 	if (!ptr)
2169 		return NULL;
2170 
2171 	qdf_mem_kmalloc_inc(ksize(ptr));
2172 
2173 	return ptr;
2174 }
2175 
2176 qdf_export_symbol(__qdf_mem_malloc);
2177 
2178 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2179 void __qdf_untracked_mem_free(void *ptr)
2180 {
2181 	if (!ptr)
2182 		return;
2183 
2184 	kfree(ptr);
2185 }
2186 
2187 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2188 {
2189 	void *ptr;
2190 
2191 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2192 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2193 			     line);
2194 		return NULL;
2195 	}
2196 
2197 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2198 	if (!ptr)
2199 		return NULL;
2200 
2201 	return ptr;
2202 }
2203 #endif
2204 
2205 void *qdf_aligned_malloc_fl(uint32_t *size,
2206 			    void **vaddr_unaligned,
2207 				qdf_dma_addr_t *paddr_unaligned,
2208 				qdf_dma_addr_t *paddr_aligned,
2209 				uint32_t align,
2210 			    const char *func, uint32_t line)
2211 {
2212 	void *vaddr_aligned;
2213 	uint32_t align_alloc_size;
2214 
2215 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2216 			line);
2217 	if (!*vaddr_unaligned) {
2218 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2219 		return NULL;
2220 	}
2221 
2222 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2223 
2224 	/* Re-allocate additional bytes to align base address only if
2225 	 * above allocation returns unaligned address. Reason for
2226 	 * trying exact size allocation above is, OS tries to allocate
2227 	 * blocks of size power-of-2 pages and then free extra pages.
2228 	 * e.g., of a ring size of 1MB, the allocation below will
2229 	 * request 1MB plus 7 bytes for alignment, which will cause a
2230 	 * 2MB block allocation,and that is failing sometimes due to
2231 	 * memory fragmentation.
2232 	 */
2233 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2234 		align_alloc_size = *size + align - 1;
2235 
2236 		qdf_mem_free(*vaddr_unaligned);
2237 		*vaddr_unaligned = qdf_mem_malloc_fl(
2238 				(qdf_size_t)align_alloc_size, func, line);
2239 		if (!*vaddr_unaligned) {
2240 			qdf_warn("Failed to alloc %uB @ %s:%d",
2241 				 align_alloc_size, func, line);
2242 			return NULL;
2243 		}
2244 
2245 		*paddr_unaligned = qdf_mem_virt_to_phys(
2246 				*vaddr_unaligned);
2247 		*size = align_alloc_size;
2248 	}
2249 
2250 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2251 		((unsigned long)(*paddr_unaligned), align);
2252 
2253 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2254 			((unsigned long)(*paddr_aligned) -
2255 			 (unsigned long)(*paddr_unaligned)));
2256 
2257 	return vaddr_aligned;
2258 }
2259 
2260 qdf_export_symbol(qdf_aligned_malloc_fl);
2261 
2262 #ifdef DP_UMAC_HW_RESET_SUPPORT
2263 /**
2264  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
2265  * @ctxt: Context to be passed to the cb
2266  * @pages: Multi page information storage
2267  * @elem_size: Each element size
2268  * @elem_count: Total number of elements should be allocated
2269  * @cacheable: Coherent memory or cacheable memory
2270  * @cb: Callback to free the elements
2271  * @elem_list: elem list for delayed free
2272  *
2273  * Return: 0 on Succscc, or Error code
2274  */
2275 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2276 			       uint32_t elem_size, uint32_t elem_count,
2277 			       uint8_t cacheable, qdf_mem_release_cb cb,
2278 			       void *elem_list)
2279 {
2280 	uint16_t i, i_int;
2281 	void *page_info;
2282 	void *elem;
2283 	uint32_t num_link = 0;
2284 
2285 	for (i = 0; i < pages->num_pages; i++) {
2286 		if (cacheable)
2287 			page_info = pages->cacheable_pages[i];
2288 		else
2289 			page_info = pages->dma_pages[i].page_v_addr_start;
2290 
2291 		if (!page_info)
2292 			return -ENOMEM;
2293 
2294 		elem = page_info;
2295 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2296 			if (i_int == (pages->num_element_per_page - 1)) {
2297 				cb(ctxt, elem, elem_list);
2298 
2299 				if ((i + 1) == pages->num_pages)
2300 					break;
2301 				if (cacheable)
2302 					elem =
2303 					(void *)(pages->cacheable_pages[i + 1]);
2304 				else
2305 					elem = (void *)(pages->
2306 					dma_pages[i + 1].page_v_addr_start);
2307 
2308 				num_link++;
2309 
2310 				break;
2311 			}
2312 
2313 			cb(ctxt, elem, elem_list);
2314 			elem = ((char *)elem + elem_size);
2315 			num_link++;
2316 
2317 			/* Last link established exit */
2318 			if (num_link == (elem_count - 1))
2319 				break;
2320 		}
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2327 #endif
2328 
2329 /**
2330  * qdf_mem_multi_page_link() - Make links for multi page elements
2331  * @osdev: OS device handle pointer
2332  * @pages: Multi page information storage
2333  * @elem_size: Single element size
2334  * @elem_count: elements count should be linked
2335  * @cacheable: Coherent memory or cacheable memory
2336  *
2337  * This function will make links for multi page allocated structure
2338  *
2339  * Return: 0 success
2340  */
2341 int qdf_mem_multi_page_link(qdf_device_t osdev,
2342 		struct qdf_mem_multi_page_t *pages,
2343 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2344 {
2345 	uint16_t i, i_int;
2346 	void *page_info;
2347 	void **c_elem = NULL;
2348 	uint32_t num_link = 0;
2349 
2350 	for (i = 0; i < pages->num_pages; i++) {
2351 		if (cacheable)
2352 			page_info = pages->cacheable_pages[i];
2353 		else
2354 			page_info = pages->dma_pages[i].page_v_addr_start;
2355 
2356 		if (!page_info)
2357 			return -ENOMEM;
2358 
2359 		c_elem = (void **)page_info;
2360 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2361 			if (i_int == (pages->num_element_per_page - 1)) {
2362 				if ((i + 1) == pages->num_pages)
2363 					break;
2364 				if (cacheable)
2365 					*c_elem = pages->
2366 						cacheable_pages[i + 1];
2367 				else
2368 					*c_elem = pages->
2369 						dma_pages[i + 1].
2370 							page_v_addr_start;
2371 				num_link++;
2372 				break;
2373 			} else {
2374 				*c_elem =
2375 					(void *)(((char *)c_elem) + elem_size);
2376 			}
2377 			num_link++;
2378 			c_elem = (void **)*c_elem;
2379 
2380 			/* Last link established exit */
2381 			if (num_link == (elem_count - 1))
2382 				break;
2383 		}
2384 	}
2385 
2386 	if (c_elem)
2387 		*c_elem = NULL;
2388 
2389 	return 0;
2390 }
2391 qdf_export_symbol(qdf_mem_multi_page_link);
2392 
2393 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2394 {
2395 	/* special case where dst_addr or src_addr can be NULL */
2396 	if (!num_bytes)
2397 		return;
2398 
2399 	QDF_BUG(dst_addr);
2400 	QDF_BUG(src_addr);
2401 	if (!dst_addr || !src_addr)
2402 		return;
2403 
2404 	memcpy(dst_addr, src_addr, num_bytes);
2405 }
2406 qdf_export_symbol(qdf_mem_copy);
2407 
2408 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2409 {
2410 	qdf_shared_mem_t *shared_mem;
2411 	qdf_dma_addr_t dma_addr, paddr;
2412 	int ret;
2413 
2414 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2415 	if (!shared_mem)
2416 		return NULL;
2417 
2418 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2419 				size, qdf_mem_get_dma_addr_ptr(osdev,
2420 						&shared_mem->mem_info));
2421 	if (!shared_mem->vaddr) {
2422 		qdf_err("Unable to allocate DMA memory for shared resource");
2423 		qdf_mem_free(shared_mem);
2424 		return NULL;
2425 	}
2426 
2427 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2428 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2429 
2430 	qdf_mem_zero(shared_mem->vaddr, size);
2431 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2432 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2433 
2434 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2435 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2436 				      shared_mem->vaddr, dma_addr, size);
2437 	if (ret) {
2438 		qdf_err("Unable to get DMA sgtable");
2439 		qdf_mem_free_consistent(osdev, osdev->dev,
2440 					shared_mem->mem_info.size,
2441 					shared_mem->vaddr,
2442 					dma_addr,
2443 					qdf_get_dma_mem_context(shared_mem,
2444 								memctx));
2445 		qdf_mem_free(shared_mem);
2446 		return NULL;
2447 	}
2448 
2449 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2450 
2451 	return shared_mem;
2452 }
2453 
2454 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2455 
2456 /**
2457  * qdf_mem_copy_toio() - copy memory
2458  * @dst_addr: Pointer to destination memory location (to copy to)
2459  * @src_addr: Pointer to source memory location (to copy from)
2460  * @num_bytes: Number of bytes to copy.
2461  *
2462  * Return: none
2463  */
2464 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2465 {
2466 	if (0 == num_bytes) {
2467 		/* special case where dst_addr or src_addr can be NULL */
2468 		return;
2469 	}
2470 
2471 	if ((!dst_addr) || (!src_addr)) {
2472 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2473 			  "%s called with NULL parameter, source:%pK destination:%pK",
2474 			  __func__, src_addr, dst_addr);
2475 		QDF_ASSERT(0);
2476 		return;
2477 	}
2478 	memcpy_toio(dst_addr, src_addr, num_bytes);
2479 }
2480 
2481 qdf_export_symbol(qdf_mem_copy_toio);
2482 
2483 /**
2484  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2485  * @ptr: Pointer to memory that will be set
2486  * @value: Byte set in memory
2487  * @num_bytes: Number of bytes to be set
2488  *
2489  * Return: None
2490  */
2491 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2492 {
2493 	if (!ptr) {
2494 		qdf_print("%s called with NULL parameter ptr", __func__);
2495 		return;
2496 	}
2497 	memset_io(ptr, value, num_bytes);
2498 }
2499 
2500 qdf_export_symbol(qdf_mem_set_io);
2501 
2502 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2503 {
2504 	QDF_BUG(ptr);
2505 	if (!ptr)
2506 		return;
2507 
2508 	memset(ptr, value, num_bytes);
2509 }
2510 qdf_export_symbol(qdf_mem_set);
2511 
2512 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2513 {
2514 	/* special case where dst_addr or src_addr can be NULL */
2515 	if (!num_bytes)
2516 		return;
2517 
2518 	QDF_BUG(dst_addr);
2519 	QDF_BUG(src_addr);
2520 	if (!dst_addr || !src_addr)
2521 		return;
2522 
2523 	memmove(dst_addr, src_addr, num_bytes);
2524 }
2525 qdf_export_symbol(qdf_mem_move);
2526 
2527 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2528 {
2529 	QDF_BUG(left);
2530 	QDF_BUG(right);
2531 
2532 	return memcmp(left, right, size);
2533 }
2534 qdf_export_symbol(qdf_mem_cmp);
2535 
2536 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2537 /**
2538  * qdf_mem_dma_alloc() - allocates memory for dma
2539  * @osdev: OS device handle
2540  * @dev: Pointer to device handle
2541  * @size: Size to be allocated
2542  * @phy_addr: Physical address
2543  *
2544  * Return: pointer of allocated memory or null if memory alloc fails
2545  */
2546 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2547 				      qdf_size_t size,
2548 				      qdf_dma_addr_t *phy_addr)
2549 {
2550 	void *vaddr;
2551 
2552 	vaddr = qdf_mem_malloc(size);
2553 	*phy_addr = ((uintptr_t) vaddr);
2554 	/* using this type conversion to suppress "cast from pointer to integer
2555 	 * of different size" warning on some platforms
2556 	 */
2557 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2558 	return vaddr;
2559 }
2560 
2561 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2562 	!defined(QCA_WIFI_QCN9000)
2563 
2564 #define QCA8074_RAM_BASE 0x50000000
2565 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2566 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2567 			qdf_dma_addr_t *phy_addr)
2568 {
2569 	void *vaddr = NULL;
2570 	int i;
2571 
2572 	*phy_addr = 0;
2573 
2574 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2575 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2576 					   qdf_mem_malloc_flags());
2577 
2578 		if (!vaddr) {
2579 			qdf_err("%s failed , size: %zu!", __func__, size);
2580 			return NULL;
2581 		}
2582 
2583 		if (*phy_addr >= QCA8074_RAM_BASE)
2584 			return vaddr;
2585 
2586 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2587 	}
2588 
2589 	return NULL;
2590 }
2591 
2592 #else
2593 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2594 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2595 {
2596 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2597 }
2598 #endif
2599 
2600 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2601 static inline void
2602 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2603 {
2604 	qdf_mem_free(vaddr);
2605 }
2606 #else
2607 
2608 static inline void
2609 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2610 {
2611 	dma_free_coherent(dev, size, vaddr, paddr);
2612 }
2613 #endif
2614 
2615 #ifdef MEMORY_DEBUG
2616 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2617 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2618 				     const char *func, uint32_t line,
2619 				     void *caller)
2620 {
2621 	QDF_STATUS status;
2622 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2623 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2624 	struct qdf_mem_header *header;
2625 	void *vaddr;
2626 
2627 	if (is_initial_mem_debug_disabled)
2628 		return __qdf_mem_alloc_consistent(osdev, dev,
2629 						  size, paddr,
2630 						  func, line);
2631 
2632 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2633 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2634 		return NULL;
2635 	}
2636 
2637 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2638 				   paddr);
2639 
2640 	if (!vaddr) {
2641 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2642 		return NULL;
2643 	}
2644 
2645 	header = qdf_mem_dma_get_header(vaddr, size);
2646 	/* For DMA buffers we only add trailers, this function will init
2647 	 * the header structure at the tail
2648 	 * Prefix the header into DMA buffer causes SMMU faults, so
2649 	 * do not prefix header into the DMA buffers
2650 	 */
2651 	qdf_mem_header_init(header, size, func, line, caller);
2652 
2653 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2654 	status = qdf_list_insert_front(mem_list, &header->node);
2655 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2656 	if (QDF_IS_STATUS_ERROR(status))
2657 		qdf_err("Failed to insert memory header; status %d", status);
2658 
2659 	qdf_mem_dma_inc(size);
2660 
2661 	return vaddr;
2662 }
2663 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2664 
2665 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2666 				   qdf_size_t size, void *vaddr,
2667 				   qdf_dma_addr_t paddr,
2668 				   qdf_dma_context_t memctx,
2669 				   const char *func, uint32_t line)
2670 {
2671 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2672 	struct qdf_mem_header *header;
2673 	enum qdf_mem_validation_bitmap error_bitmap;
2674 
2675 	if (is_initial_mem_debug_disabled) {
2676 		__qdf_mem_free_consistent(
2677 					  osdev, dev,
2678 					  size, vaddr,
2679 					  paddr, memctx);
2680 		return;
2681 	}
2682 
2683 	/* freeing a null pointer is valid */
2684 	if (qdf_unlikely(!vaddr))
2685 		return;
2686 
2687 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2688 
2689 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2690 	/* For DMA buffers we only add trailers, this function will retrieve
2691 	 * the header structure at the tail
2692 	 * Prefix the header into DMA buffer causes SMMU faults, so
2693 	 * do not prefix header into the DMA buffers
2694 	 */
2695 	header = qdf_mem_dma_get_header(vaddr, size);
2696 	error_bitmap = qdf_mem_header_validate(header, domain);
2697 	if (!error_bitmap) {
2698 		header->freed = true;
2699 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2700 				     &header->node);
2701 	}
2702 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2703 
2704 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2705 
2706 	qdf_mem_dma_dec(header->size);
2707 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2708 }
2709 qdf_export_symbol(qdf_mem_free_consistent_debug);
2710 #endif /* MEMORY_DEBUG */
2711 
2712 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2713 			       qdf_size_t size, void *vaddr,
2714 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2715 {
2716 	qdf_mem_dma_dec(size);
2717 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2718 }
2719 
2720 qdf_export_symbol(__qdf_mem_free_consistent);
2721 
2722 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2723 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2724 				 const char *func, uint32_t line)
2725 {
2726 	void *vaddr;
2727 
2728 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2729 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2730 			     size, func, line);
2731 		return NULL;
2732 	}
2733 
2734 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2735 
2736 	if (vaddr)
2737 		qdf_mem_dma_inc(size);
2738 
2739 	return vaddr;
2740 }
2741 
2742 qdf_export_symbol(__qdf_mem_alloc_consistent);
2743 
2744 void *qdf_aligned_mem_alloc_consistent_fl(
2745 	qdf_device_t osdev, uint32_t *size,
2746 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2747 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2748 	const char *func, uint32_t line)
2749 {
2750 	void *vaddr_aligned;
2751 	uint32_t align_alloc_size;
2752 
2753 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2754 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2755 	if (!*vaddr_unaligned) {
2756 		qdf_warn("Failed to alloc %uB @ %s:%d",
2757 			 *size, func, line);
2758 		return NULL;
2759 	}
2760 
2761 	/* Re-allocate additional bytes to align base address only if
2762 	 * above allocation returns unaligned address. Reason for
2763 	 * trying exact size allocation above is, OS tries to allocate
2764 	 * blocks of size power-of-2 pages and then free extra pages.
2765 	 * e.g., of a ring size of 1MB, the allocation below will
2766 	 * request 1MB plus 7 bytes for alignment, which will cause a
2767 	 * 2MB block allocation,and that is failing sometimes due to
2768 	 * memory fragmentation.
2769 	 */
2770 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2771 		align_alloc_size = *size + align - 1;
2772 
2773 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2774 					*vaddr_unaligned,
2775 					*paddr_unaligned, 0);
2776 
2777 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2778 				osdev, osdev->dev, align_alloc_size,
2779 				paddr_unaligned);
2780 		if (!*vaddr_unaligned) {
2781 			qdf_warn("Failed to alloc %uB @ %s:%d",
2782 				 align_alloc_size, func, line);
2783 			return NULL;
2784 		}
2785 
2786 		*size = align_alloc_size;
2787 	}
2788 
2789 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2790 			(unsigned long)(*paddr_unaligned), align);
2791 
2792 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2793 				 ((unsigned long)(*paddr_aligned) -
2794 				  (unsigned long)(*paddr_unaligned)));
2795 
2796 	return vaddr_aligned;
2797 }
2798 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2799 
2800 /**
2801  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2802  * @osdev: OS device handle
2803  * @bus_addr: dma address to give to the device
2804  * @size: Size of the memory block
2805  * @direction: direction data will be DMAed
2806  *
2807  * Assign memory to the remote device.
2808  * The cache lines are flushed to ram or invalidated as needed.
2809  *
2810  * Return: none
2811  */
2812 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2813 					qdf_dma_addr_t bus_addr,
2814 					qdf_size_t size,
2815 					enum dma_data_direction direction)
2816 {
2817 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2818 }
2819 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2820 
2821 /**
2822  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2823  * @osdev: OS device handle
2824  * @bus_addr: dma address to give to the cpu
2825  * @size: Size of the memory block
2826  * @direction: direction data will be DMAed
2827  *
2828  * Assign memory to the CPU.
2829  *
2830  * Return: none
2831  */
2832 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2833 				     qdf_dma_addr_t bus_addr,
2834 				     qdf_size_t size,
2835 				     enum dma_data_direction direction)
2836 {
2837 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2838 }
2839 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2840 
2841 void qdf_mem_init(void)
2842 {
2843 	qdf_mem_debug_init();
2844 	qdf_net_buf_debug_init();
2845 	qdf_frag_debug_init();
2846 	qdf_mem_debugfs_init();
2847 	qdf_mem_debug_debugfs_init();
2848 }
2849 qdf_export_symbol(qdf_mem_init);
2850 
2851 void qdf_mem_exit(void)
2852 {
2853 	qdf_mem_debug_debugfs_exit();
2854 	qdf_mem_debugfs_exit();
2855 	qdf_frag_debug_exit();
2856 	qdf_net_buf_debug_exit();
2857 	qdf_mem_debug_exit();
2858 }
2859 qdf_export_symbol(qdf_mem_exit);
2860 
2861 /**
2862  * qdf_ether_addr_copy() - copy an Ethernet address
2863  *
2864  * @dst_addr: A six-byte array Ethernet address destination
2865  * @src_addr: A six-byte array Ethernet address source
2866  *
2867  * Please note: dst & src must both be aligned to u16.
2868  *
2869  * Return: none
2870  */
2871 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2872 {
2873 	if ((!dst_addr) || (!src_addr)) {
2874 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2875 			  "%s called with NULL parameter, source:%pK destination:%pK",
2876 			  __func__, src_addr, dst_addr);
2877 		QDF_ASSERT(0);
2878 		return;
2879 	}
2880 	ether_addr_copy(dst_addr, src_addr);
2881 }
2882 qdf_export_symbol(qdf_ether_addr_copy);
2883 
2884 int32_t qdf_dma_mem_stats_read(void)
2885 {
2886 	return qdf_atomic_read(&qdf_mem_stat.dma);
2887 }
2888 
2889 qdf_export_symbol(qdf_dma_mem_stats_read);
2890 
2891 int32_t qdf_heap_mem_stats_read(void)
2892 {
2893 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2894 }
2895 
2896 qdf_export_symbol(qdf_heap_mem_stats_read);
2897 
2898 int32_t qdf_skb_mem_stats_read(void)
2899 {
2900 	return qdf_atomic_read(&qdf_mem_stat.skb);
2901 }
2902 
2903 qdf_export_symbol(qdf_skb_mem_stats_read);
2904 
2905 int32_t qdf_skb_total_mem_stats_read(void)
2906 {
2907 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2908 }
2909 
2910 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2911 
2912 int32_t qdf_skb_max_mem_stats_read(void)
2913 {
2914 	return qdf_mem_stat.skb_mem_max;
2915 }
2916 
2917 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2918 
2919 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2920 {
2921 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2922 }
2923 
2924 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2925 
2926 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2927 {
2928 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2929 }
2930 
2931 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2932 
2933 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2934 {
2935 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2936 }
2937 
2938 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2939 
2940 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2941 {
2942 	return qdf_mem_stat.dp_tx_skb_count_max;
2943 }
2944 
2945 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2946 
2947 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2948 {
2949 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2950 }
2951 
2952 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
2953 
2954 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
2955 {
2956 	return qdf_mem_stat.dp_rx_skb_count_max;
2957 }
2958 
2959 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
2960 
2961 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
2962 {
2963 	return qdf_mem_stat.dp_tx_skb_mem_max;
2964 }
2965 
2966 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
2967 
2968 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
2969 {
2970 	return qdf_mem_stat.dp_rx_skb_mem_max;
2971 }
2972 
2973 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
2974 
2975 int32_t qdf_mem_tx_desc_cnt_read(void)
2976 {
2977 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
2978 }
2979 
2980 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
2981 
2982 int32_t qdf_mem_tx_desc_max_read(void)
2983 {
2984 	return qdf_mem_stat.tx_descs_max;
2985 }
2986 
2987 qdf_export_symbol(qdf_mem_tx_desc_max_read);
2988 
2989 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
2990 				int32_t tx_descs_max)
2991 {
2992 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
2993 	qdf_mem_stat.tx_descs_max = tx_descs_max;
2994 }
2995 
2996 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
2997 
2998 void qdf_mem_stats_init(void)
2999 {
3000 	qdf_mem_stat.skb_mem_max = 0;
3001 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
3002 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
3003 	qdf_mem_stat.dp_tx_skb_count_max = 0;
3004 	qdf_mem_stat.dp_rx_skb_count_max = 0;
3005 	qdf_mem_stat.tx_descs_max = 0;
3006 }
3007 
3008 qdf_export_symbol(qdf_mem_stats_init);
3009 
3010 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
3011 {
3012 	void *ptr;
3013 
3014 	if (!size) {
3015 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
3016 		return NULL;
3017 	}
3018 
3019 	ptr = vzalloc(size);
3020 
3021 	return ptr;
3022 }
3023 
3024 qdf_export_symbol(__qdf_mem_valloc);
3025 
3026 void __qdf_mem_vfree(void *ptr)
3027 {
3028 	if (qdf_unlikely(!ptr))
3029 		return;
3030 
3031 	vfree(ptr);
3032 }
3033 
3034 qdf_export_symbol(__qdf_mem_vfree);
3035 
3036 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
3037 int
3038 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
3039 			  enum qdf_iommu_attr attr, void *data)
3040 {
3041 	return __qdf_iommu_domain_get_attr(domain, attr, data);
3042 }
3043 
3044 qdf_export_symbol(qdf_iommu_domain_get_attr);
3045 #endif
3046 
3047 #ifdef ENHANCED_OS_ABSTRACTION
3048 void qdf_update_mem_map_table(qdf_device_t osdev,
3049 			      qdf_mem_info_t *mem_info,
3050 			      qdf_dma_addr_t dma_addr,
3051 			      uint32_t mem_size)
3052 {
3053 	if (!mem_info) {
3054 		qdf_nofl_err("%s: NULL mem_info", __func__);
3055 		return;
3056 	}
3057 
3058 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
3059 }
3060 
3061 qdf_export_symbol(qdf_update_mem_map_table);
3062 
3063 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
3064 					  qdf_dma_addr_t dma_addr)
3065 {
3066 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
3067 }
3068 
3069 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
3070 #endif
3071 
3072 #ifdef QCA_KMEM_CACHE_SUPPORT
3073 qdf_kmem_cache_t
3074 __qdf_kmem_cache_create(const char *cache_name,
3075 			qdf_size_t size)
3076 {
3077 	struct kmem_cache *cache;
3078 
3079 	cache = kmem_cache_create(cache_name, size,
3080 				  0, 0, NULL);
3081 
3082 	if (!cache)
3083 		return NULL;
3084 
3085 	return cache;
3086 }
3087 qdf_export_symbol(__qdf_kmem_cache_create);
3088 
3089 void
3090 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3091 {
3092 	kmem_cache_destroy(cache);
3093 }
3094 
3095 qdf_export_symbol(__qdf_kmem_cache_destroy);
3096 
3097 void*
3098 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3099 {
3100 	int flags = GFP_KERNEL;
3101 
3102 	if (in_interrupt() || irqs_disabled() || in_atomic())
3103 		flags = GFP_ATOMIC;
3104 
3105 	return kmem_cache_alloc(cache, flags);
3106 }
3107 
3108 qdf_export_symbol(__qdf_kmem_cache_alloc);
3109 
3110 void
3111 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3112 
3113 {
3114 	kmem_cache_free(cache, node);
3115 }
3116 
3117 qdf_export_symbol(__qdf_kmem_cache_free);
3118 #else
3119 qdf_kmem_cache_t
3120 __qdf_kmem_cache_create(const char *cache_name,
3121 			qdf_size_t size)
3122 {
3123 	return NULL;
3124 }
3125 
3126 void
3127 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3128 {
3129 }
3130 
3131 void *
3132 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3133 {
3134 	return NULL;
3135 }
3136 
3137 void
3138 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3139 {
3140 }
3141 #endif
3142