xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 
38 #if defined(CONFIG_CNSS)
39 #include <net/cnss.h>
40 #endif
41 
42 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
43 #include <net/cnss_prealloc.h>
44 #endif
45 
46 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
47 static bool mem_debug_disabled;
48 qdf_declare_param(mem_debug_disabled, bool);
49 qdf_export_symbol(mem_debug_disabled);
50 static bool is_initial_mem_debug_disabled;
51 #endif
52 
53 /* Preprocessor Definitions and Constants */
54 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
55 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
56 #define QDF_DEBUG_STRING_SIZE 512
57 
58 #ifdef MEMORY_DEBUG
59 #include "qdf_debug_domain.h"
60 #include <qdf_list.h>
61 
62 enum list_type {
63 	LIST_TYPE_MEM = 0,
64 	LIST_TYPE_DMA = 1,
65 	LIST_TYPE_MAX,
66 };
67 
68 /**
69  * major_alloc_priv: private data registered to debugfs entry created to list
70  *                   the list major allocations
71  * @type:            type of the list to be parsed
72  * @threshold:       configured by user by overwriting the respective debugfs
73  *                   sys entry. This is to list the functions which requested
74  *                   memory/dma allocations more than threshold nubmer of times.
75  */
76 struct major_alloc_priv {
77 	enum list_type type;
78 	uint32_t threshold;
79 };
80 
81 static struct major_alloc_priv mem_priv = {
82 	/* List type set to mem */
83 	LIST_TYPE_MEM,
84 	/* initial threshold to list APIs which allocates mem >= 50 times */
85 	50
86 };
87 
88 static struct major_alloc_priv dma_priv = {
89 	/* List type set to DMA */
90 	LIST_TYPE_DMA,
91 	/* initial threshold to list APIs which allocates dma >= 50 times */
92 	50
93 };
94 
95 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
96 static qdf_spinlock_t qdf_mem_list_lock;
97 
98 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
99 static qdf_spinlock_t qdf_mem_dma_list_lock;
100 
101 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
102 {
103 	return &qdf_mem_domains[domain];
104 }
105 
106 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
107 {
108 	return &qdf_mem_dma_domains[domain];
109 }
110 
111 /**
112  * struct qdf_mem_header - memory object to dubug
113  * @node: node to the list
114  * @domain: the active memory domain at time of allocation
115  * @freed: flag set during free, used to detect double frees
116  *	Use uint8_t so we can detect corruption
117  * @func: name of the function the allocation was made from
118  * @line: line number of the file the allocation was made from
119  * @size: size of the allocation in bytes
120  * @caller: Caller of the function for which memory is allocated
121  * @header: a known value, used to detect out-of-bounds access
122  * @time: timestamp at which allocation was made
123  */
124 struct qdf_mem_header {
125 	qdf_list_node_t node;
126 	enum qdf_debug_domain domain;
127 	uint8_t freed;
128 	char func[QDF_MEM_FUNC_NAME_SIZE];
129 	uint32_t line;
130 	uint32_t size;
131 	void *caller;
132 	uint64_t header;
133 	uint64_t time;
134 };
135 
136 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
137 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
138 
139 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
140 {
141 	return (struct qdf_mem_header *)ptr - 1;
142 }
143 
144 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
145 							    qdf_size_t size)
146 {
147 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
148 }
149 
150 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
151 {
152 	return (uint64_t *)((void *)(header + 1) + header->size);
153 }
154 
155 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
156 {
157 	return (void *)(header + 1);
158 }
159 
160 /* number of bytes needed for the qdf memory debug information */
161 #define QDF_MEM_DEBUG_SIZE \
162 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
163 
164 /* number of bytes needed for the qdf dma memory debug information */
165 #define QDF_DMA_MEM_DEBUG_SIZE \
166 	(sizeof(struct qdf_mem_header))
167 
168 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
169 {
170 	QDF_BUG(header);
171 	if (!header)
172 		return;
173 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
174 }
175 
176 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
177 				const char *func, uint32_t line, void *caller)
178 {
179 	QDF_BUG(header);
180 	if (!header)
181 		return;
182 
183 	header->domain = qdf_debug_domain_get();
184 	header->freed = false;
185 
186 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
187 
188 	header->line = line;
189 	header->size = size;
190 	header->caller = caller;
191 	header->header = WLAN_MEM_HEADER;
192 	header->time = qdf_get_log_timestamp();
193 }
194 
195 enum qdf_mem_validation_bitmap {
196 	QDF_MEM_BAD_HEADER = 1 << 0,
197 	QDF_MEM_BAD_TRAILER = 1 << 1,
198 	QDF_MEM_BAD_SIZE = 1 << 2,
199 	QDF_MEM_DOUBLE_FREE = 1 << 3,
200 	QDF_MEM_BAD_FREED = 1 << 4,
201 	QDF_MEM_BAD_NODE = 1 << 5,
202 	QDF_MEM_BAD_DOMAIN = 1 << 6,
203 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
204 };
205 
206 static enum qdf_mem_validation_bitmap
207 qdf_mem_trailer_validate(struct qdf_mem_header *header)
208 {
209 	enum qdf_mem_validation_bitmap error_bitmap = 0;
210 
211 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
212 		error_bitmap |= QDF_MEM_BAD_TRAILER;
213 	return error_bitmap;
214 }
215 
216 static enum qdf_mem_validation_bitmap
217 qdf_mem_header_validate(struct qdf_mem_header *header,
218 			enum qdf_debug_domain domain)
219 {
220 	enum qdf_mem_validation_bitmap error_bitmap = 0;
221 
222 	if (header->header != WLAN_MEM_HEADER)
223 		error_bitmap |= QDF_MEM_BAD_HEADER;
224 
225 	if (header->size > QDF_MEM_MAX_MALLOC)
226 		error_bitmap |= QDF_MEM_BAD_SIZE;
227 
228 	if (header->freed == true)
229 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
230 	else if (header->freed)
231 		error_bitmap |= QDF_MEM_BAD_FREED;
232 
233 	if (!qdf_list_node_in_any_list(&header->node))
234 		error_bitmap |= QDF_MEM_BAD_NODE;
235 
236 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
237 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
238 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
239 	else if (header->domain != domain)
240 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
241 
242 	return error_bitmap;
243 }
244 
245 static void
246 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
247 			    enum qdf_debug_domain current_domain,
248 			    enum qdf_mem_validation_bitmap error_bitmap,
249 			    const char *func,
250 			    uint32_t line)
251 {
252 	if (!error_bitmap)
253 		return;
254 
255 	if (error_bitmap & QDF_MEM_BAD_HEADER)
256 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
257 			header->header, WLAN_MEM_HEADER);
258 
259 	if (error_bitmap & QDF_MEM_BAD_SIZE)
260 		qdf_err("Corrupted memory size %u (expected < %d)",
261 			header->size, QDF_MEM_MAX_MALLOC);
262 
263 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
264 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
265 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
266 
267 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
268 		qdf_err("Memory has previously been freed");
269 
270 	if (error_bitmap & QDF_MEM_BAD_FREED)
271 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
272 
273 	if (error_bitmap & QDF_MEM_BAD_NODE)
274 		qdf_err("Corrupted memory header node or double free");
275 
276 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
277 		qdf_err("Corrupted memory domain 0x%x", header->domain);
278 
279 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
280 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
281 			qdf_debug_domain_name(header->domain), header->domain,
282 			qdf_debug_domain_name(current_domain), current_domain);
283 
284 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
285 }
286 #endif /* MEMORY_DEBUG */
287 
288 u_int8_t prealloc_disabled = 1;
289 qdf_declare_param(prealloc_disabled, byte);
290 qdf_export_symbol(prealloc_disabled);
291 
292 #if defined WLAN_DEBUGFS
293 
294 /* Debugfs root directory for qdf_mem */
295 static struct dentry *qdf_mem_debugfs_root;
296 
297 /**
298  * struct __qdf_mem_stat - qdf memory statistics
299  * @kmalloc:	total kmalloc allocations
300  * @dma:	total dma allocations
301  * @skb:	total skb allocations
302  */
303 static struct __qdf_mem_stat {
304 	qdf_atomic_t kmalloc;
305 	qdf_atomic_t dma;
306 	qdf_atomic_t skb;
307 } qdf_mem_stat;
308 
309 void qdf_mem_kmalloc_inc(qdf_size_t size)
310 {
311 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
312 }
313 
314 static void qdf_mem_dma_inc(qdf_size_t size)
315 {
316 	qdf_atomic_add(size, &qdf_mem_stat.dma);
317 }
318 
319 void qdf_mem_skb_inc(qdf_size_t size)
320 {
321 	qdf_atomic_add(size, &qdf_mem_stat.skb);
322 }
323 
324 void qdf_mem_kmalloc_dec(qdf_size_t size)
325 {
326 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
327 }
328 
329 static inline void qdf_mem_dma_dec(qdf_size_t size)
330 {
331 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
332 }
333 
334 void qdf_mem_skb_dec(qdf_size_t size)
335 {
336 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
337 }
338 
339 #ifdef MEMORY_DEBUG
340 static int qdf_err_printer(void *priv, const char *fmt, ...)
341 {
342 	va_list args;
343 
344 	va_start(args, fmt);
345 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
346 	va_end(args);
347 
348 	return 0;
349 }
350 
351 static int seq_printf_printer(void *priv, const char *fmt, ...)
352 {
353 	struct seq_file *file = priv;
354 	va_list args;
355 
356 	va_start(args, fmt);
357 	seq_vprintf(file, fmt, args);
358 	seq_puts(file, "\n");
359 	va_end(args);
360 
361 	return 0;
362 }
363 
364 /**
365  * struct __qdf_mem_info - memory statistics
366  * @func: the function which allocated memory
367  * @line: the line at which allocation happened
368  * @size: the size of allocation
369  * @caller: Address of the caller function
370  * @count: how many allocations of same type
371  * @time: timestamp at which allocation happened
372  */
373 struct __qdf_mem_info {
374 	char func[QDF_MEM_FUNC_NAME_SIZE];
375 	uint32_t line;
376 	uint32_t size;
377 	void *caller;
378 	uint32_t count;
379 	uint64_t time;
380 };
381 
382 /*
383  * The table depth defines the de-duplication proximity scope.
384  * A deeper table takes more time, so choose any optimum value.
385  */
386 #define QDF_MEM_STAT_TABLE_SIZE 8
387 
388 /**
389  * qdf_mem_debug_print_header() - memory debug header print logic
390  * @print: the print adapter function
391  * @print_priv: the private data to be consumed by @print
392  * @threshold: the threshold value set by user to list top allocations
393  *
394  * Return: None
395  */
396 static void qdf_mem_debug_print_header(qdf_abstract_print print,
397 				       void *print_priv,
398 				       uint32_t threshold)
399 {
400 	if (threshold)
401 		print(print_priv, "APIs requested allocations >= %u no of time",
402 		      threshold);
403 	print(print_priv,
404 	      "--------------------------------------------------------------");
405 	print(print_priv,
406 	      " count    size     total    filename     caller    timestamp");
407 	print(print_priv,
408 	      "--------------------------------------------------------------");
409 }
410 
411 /**
412  * qdf_mem_meta_table_print() - memory metadata table print logic
413  * @table: the memory metadata table to print
414  * @print: the print adapter function
415  * @print_priv: the private data to be consumed by @print
416  * @threshold: the threshold value set by user to list top allocations
417  *
418  * Return: None
419  */
420 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
421 				     qdf_abstract_print print,
422 				     void *print_priv,
423 				     uint32_t threshold)
424 {
425 	int i;
426 	char debug_str[QDF_DEBUG_STRING_SIZE];
427 	size_t len = 0;
428 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
429 
430 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
431 			     "%s", debug_prefix);
432 
433 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
434 		if (!table[i].count)
435 			break;
436 
437 		print(print_priv,
438 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
439 		      table[i].count,
440 		      table[i].size,
441 		      table[i].count * table[i].size,
442 		      table[i].func,
443 		      table[i].line, table[i].caller,
444 		      table[i].time);
445 		len += qdf_scnprintf(debug_str + len,
446 				     sizeof(debug_str) - len,
447 				     " @ %s:%u %pS",
448 				     table[i].func,
449 				     table[i].line,
450 				     table[i].caller);
451 	}
452 	print(print_priv, "%s", debug_str);
453 }
454 
455 /**
456  * qdf_print_major_alloc() - memory metadata table print logic
457  * @table: the memory metadata table to print
458  * @print: the print adapter function
459  * @print_priv: the private data to be consumed by @print
460  * @threshold: the threshold value set by uset to list top allocations
461  *
462  * Return: None
463  */
464 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
465 				  qdf_abstract_print print,
466 				  void *print_priv,
467 				  uint32_t threshold)
468 {
469 	int i;
470 
471 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 		if (!table[i].count)
473 			break;
474 		if (table[i].count >= threshold)
475 			print(print_priv,
476 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
477 			      table[i].count,
478 			      table[i].size,
479 			      table[i].count * table[i].size,
480 			      table[i].func,
481 			      table[i].line, table[i].caller,
482 			      table[i].time);
483 	}
484 }
485 
486 /**
487  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
488  * @table: the memory metadata table to insert into
489  * @meta: the memory metadata to insert
490  *
491  * Return: true if the table is full after inserting, false otherwise
492  */
493 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
494 				      struct qdf_mem_header *meta)
495 {
496 	int i;
497 
498 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
499 		if (!table[i].count) {
500 			qdf_str_lcopy(table[i].func, meta->func,
501 				      QDF_MEM_FUNC_NAME_SIZE);
502 			table[i].line = meta->line;
503 			table[i].size = meta->size;
504 			table[i].count = 1;
505 			table[i].caller = meta->caller;
506 			table[i].time = meta->time;
507 			break;
508 		}
509 
510 		if (qdf_str_eq(table[i].func, meta->func) &&
511 		    table[i].line == meta->line &&
512 		    table[i].size == meta->size &&
513 		    table[i].caller == meta->caller) {
514 			table[i].count++;
515 			break;
516 		}
517 	}
518 
519 	/* return true if the table is now full */
520 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
521 }
522 
523 /**
524  * qdf_mem_domain_print() - output agnostic memory domain print logic
525  * @domain: the memory domain to print
526  * @print: the print adapter function
527  * @print_priv: the private data to be consumed by @print
528  * @threshold: the threshold value set by uset to list top allocations
529  * @mem_print: pointer to function which prints the memory allocation data
530  *
531  * Return: None
532  */
533 static void qdf_mem_domain_print(qdf_list_t *domain,
534 				 qdf_abstract_print print,
535 				 void *print_priv,
536 				 uint32_t threshold,
537 				 void (*mem_print)(struct __qdf_mem_info *,
538 						   qdf_abstract_print,
539 						   void *, uint32_t))
540 {
541 	QDF_STATUS status;
542 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
543 	qdf_list_node_t *node;
544 
545 	qdf_mem_zero(table, sizeof(table));
546 	qdf_mem_debug_print_header(print, print_priv, threshold);
547 
548 	/* hold lock while inserting to avoid use-after free of the metadata */
549 	qdf_spin_lock(&qdf_mem_list_lock);
550 	status = qdf_list_peek_front(domain, &node);
551 	while (QDF_IS_STATUS_SUCCESS(status)) {
552 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
553 		bool is_full = qdf_mem_meta_table_insert(table, meta);
554 
555 		qdf_spin_unlock(&qdf_mem_list_lock);
556 
557 		if (is_full) {
558 			(*mem_print)(table, print, print_priv, threshold);
559 			qdf_mem_zero(table, sizeof(table));
560 		}
561 
562 		qdf_spin_lock(&qdf_mem_list_lock);
563 		status = qdf_list_peek_next(domain, node, &node);
564 	}
565 	qdf_spin_unlock(&qdf_mem_list_lock);
566 
567 	(*mem_print)(table, print, print_priv, threshold);
568 }
569 
570 /**
571  * qdf_mem_seq_start() - sequential callback to start
572  * @seq: seq_file handle
573  * @pos: The start position of the sequence
574  *
575  * Return: iterator pointer, or NULL if iteration is complete
576  */
577 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
578 {
579 	enum qdf_debug_domain domain = *pos;
580 
581 	if (!qdf_debug_domain_valid(domain))
582 		return NULL;
583 
584 	/* just use the current position as our iterator */
585 	return pos;
586 }
587 
588 /**
589  * qdf_mem_seq_next() - next sequential callback
590  * @seq: seq_file handle
591  * @v: the current iterator
592  * @pos: the current position
593  *
594  * Get the next node and release previous node.
595  *
596  * Return: iterator pointer, or NULL if iteration is complete
597  */
598 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
599 {
600 	++*pos;
601 
602 	return qdf_mem_seq_start(seq, pos);
603 }
604 
605 /**
606  * qdf_mem_seq_stop() - stop sequential callback
607  * @seq: seq_file handle
608  * @v: current iterator
609  *
610  * Return: None
611  */
612 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
613 
614 /**
615  * qdf_mem_seq_show() - print sequential callback
616  * @seq: seq_file handle
617  * @v: current iterator
618  *
619  * Return: 0 - success
620  */
621 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
622 {
623 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
624 
625 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
626 		   qdf_debug_domain_name(domain_id), domain_id);
627 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
628 			     seq_printf_printer,
629 			     seq,
630 			     0,
631 			     qdf_mem_meta_table_print);
632 
633 	return 0;
634 }
635 
636 /* sequential file operation table */
637 static const struct seq_operations qdf_mem_seq_ops = {
638 	.start = qdf_mem_seq_start,
639 	.next  = qdf_mem_seq_next,
640 	.stop  = qdf_mem_seq_stop,
641 	.show  = qdf_mem_seq_show,
642 };
643 
644 
645 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
646 {
647 	return seq_open(file, &qdf_mem_seq_ops);
648 }
649 
650 /**
651  * qdf_major_alloc_show() - print sequential callback
652  * @seq: seq_file handle
653  * @v: current iterator
654  *
655  * Return: 0 - success
656  */
657 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
658 {
659 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
660 	struct major_alloc_priv *priv;
661 	qdf_list_t *list;
662 
663 	priv = (struct major_alloc_priv *)seq->private;
664 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
665 		   qdf_debug_domain_name(domain_id), domain_id);
666 
667 	switch (priv->type) {
668 	case LIST_TYPE_MEM:
669 		list = qdf_mem_list_get(domain_id);
670 		break;
671 	case LIST_TYPE_DMA:
672 		list = qdf_mem_dma_list(domain_id);
673 		break;
674 	default:
675 		list = NULL;
676 		break;
677 	}
678 
679 	if (list)
680 		qdf_mem_domain_print(list,
681 				     seq_printf_printer,
682 				     seq,
683 				     priv->threshold,
684 				     qdf_print_major_alloc);
685 
686 	return 0;
687 }
688 
689 /* sequential file operation table created to track major allocs */
690 static const struct seq_operations qdf_major_allocs_seq_ops = {
691 	.start = qdf_mem_seq_start,
692 	.next = qdf_mem_seq_next,
693 	.stop = qdf_mem_seq_stop,
694 	.show = qdf_major_alloc_show,
695 };
696 
697 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
698 {
699 	void *private = inode->i_private;
700 	struct seq_file *seq;
701 	int rc;
702 
703 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
704 	if (rc == 0) {
705 		seq = file->private_data;
706 		seq->private = private;
707 	}
708 	return rc;
709 }
710 
711 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
712 					     const char __user *user_buf,
713 					     size_t count,
714 					     loff_t *pos)
715 {
716 	char buf[32];
717 	ssize_t buf_size;
718 	uint32_t threshold;
719 	struct seq_file *seq = file->private_data;
720 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
721 
722 	buf_size = min(count, (sizeof(buf) - 1));
723 	if (buf_size <= 0)
724 		return 0;
725 	if (copy_from_user(buf, user_buf, buf_size))
726 		return -EFAULT;
727 	buf[buf_size] = '\0';
728 	if (!kstrtou32(buf, 10, &threshold))
729 		priv->threshold = threshold;
730 	return buf_size;
731 }
732 
733 /* file operation table for listing major allocs */
734 static const struct file_operations fops_qdf_major_allocs = {
735 	.owner = THIS_MODULE,
736 	.open = qdf_major_allocs_open,
737 	.read = seq_read,
738 	.llseek = seq_lseek,
739 	.release = seq_release,
740 	.write = qdf_major_alloc_set_threshold,
741 };
742 
743 /* debugfs file operation table */
744 static const struct file_operations fops_qdf_mem_debugfs = {
745 	.owner = THIS_MODULE,
746 	.open = qdf_mem_debugfs_open,
747 	.read = seq_read,
748 	.llseek = seq_lseek,
749 	.release = seq_release,
750 };
751 
752 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
753 {
754 	if (is_initial_mem_debug_disabled)
755 		return QDF_STATUS_SUCCESS;
756 
757 	if (!qdf_mem_debugfs_root)
758 		return QDF_STATUS_E_FAILURE;
759 
760 	debugfs_create_file("list",
761 			    S_IRUSR,
762 			    qdf_mem_debugfs_root,
763 			    NULL,
764 			    &fops_qdf_mem_debugfs);
765 
766 	debugfs_create_file("major_mem_allocs",
767 			    0600,
768 			    qdf_mem_debugfs_root,
769 			    &mem_priv,
770 			    &fops_qdf_major_allocs);
771 
772 	debugfs_create_file("major_dma_allocs",
773 			    0600,
774 			    qdf_mem_debugfs_root,
775 			    &dma_priv,
776 			    &fops_qdf_major_allocs);
777 
778 	return QDF_STATUS_SUCCESS;
779 }
780 
781 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
782 {
783 	return QDF_STATUS_SUCCESS;
784 }
785 
786 #else /* MEMORY_DEBUG */
787 
788 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
789 {
790 	return QDF_STATUS_E_NOSUPPORT;
791 }
792 
793 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
794 {
795 	return QDF_STATUS_E_NOSUPPORT;
796 }
797 
798 #endif /* MEMORY_DEBUG */
799 
800 
801 static void qdf_mem_debugfs_exit(void)
802 {
803 	debugfs_remove_recursive(qdf_mem_debugfs_root);
804 	qdf_mem_debugfs_root = NULL;
805 }
806 
807 static QDF_STATUS qdf_mem_debugfs_init(void)
808 {
809 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
810 
811 	if (!qdf_debugfs_root)
812 		return QDF_STATUS_E_FAILURE;
813 
814 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
815 
816 	if (!qdf_mem_debugfs_root)
817 		return QDF_STATUS_E_FAILURE;
818 
819 
820 	debugfs_create_atomic_t("kmalloc",
821 				S_IRUSR,
822 				qdf_mem_debugfs_root,
823 				&qdf_mem_stat.kmalloc);
824 
825 	debugfs_create_atomic_t("dma",
826 				S_IRUSR,
827 				qdf_mem_debugfs_root,
828 				&qdf_mem_stat.dma);
829 
830 	debugfs_create_atomic_t("skb",
831 				S_IRUSR,
832 				qdf_mem_debugfs_root,
833 				&qdf_mem_stat.skb);
834 
835 	return QDF_STATUS_SUCCESS;
836 }
837 
838 #else /* WLAN_DEBUGFS */
839 
840 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
841 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
842 
843 static QDF_STATUS qdf_mem_debugfs_init(void)
844 {
845 	return QDF_STATUS_E_NOSUPPORT;
846 }
847 static void qdf_mem_debugfs_exit(void) {}
848 
849 
850 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
851 {
852 	return QDF_STATUS_E_NOSUPPORT;
853 }
854 
855 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
856 {
857 	return QDF_STATUS_E_NOSUPPORT;
858 }
859 
860 #endif /* WLAN_DEBUGFS */
861 
862 /**
863  * __qdf_mempool_init() - Create and initialize memory pool
864  *
865  * @osdev: platform device object
866  * @pool_addr: address of the pool created
867  * @elem_cnt: no. of elements in pool
868  * @elem_size: size of each pool element in bytes
869  * @flags: flags
870  *
871  * return: Handle to memory pool or NULL if allocation failed
872  */
873 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
874 		       int elem_cnt, size_t elem_size, u_int32_t flags)
875 {
876 	__qdf_mempool_ctxt_t *new_pool = NULL;
877 	u_int32_t align = L1_CACHE_BYTES;
878 	unsigned long aligned_pool_mem;
879 	int pool_id;
880 	int i;
881 
882 	if (prealloc_disabled) {
883 		/* TBD: We can maintain a list of pools in qdf_device_t
884 		 * to help debugging
885 		 * when pre-allocation is not enabled
886 		 */
887 		new_pool = (__qdf_mempool_ctxt_t *)
888 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
889 		if (!new_pool)
890 			return QDF_STATUS_E_NOMEM;
891 
892 		memset(new_pool, 0, sizeof(*new_pool));
893 		/* TBD: define flags for zeroing buffers etc */
894 		new_pool->flags = flags;
895 		new_pool->elem_size = elem_size;
896 		new_pool->max_elem = elem_cnt;
897 		*pool_addr = new_pool;
898 		return 0;
899 	}
900 
901 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
902 		if (!osdev->mem_pool[pool_id])
903 			break;
904 	}
905 
906 	if (pool_id == MAX_MEM_POOLS)
907 		return -ENOMEM;
908 
909 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
910 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
911 	if (!new_pool)
912 		return -ENOMEM;
913 
914 	memset(new_pool, 0, sizeof(*new_pool));
915 	/* TBD: define flags for zeroing buffers etc */
916 	new_pool->flags = flags;
917 	new_pool->pool_id = pool_id;
918 
919 	/* Round up the element size to cacheline */
920 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
921 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
922 				((align)?(align - 1):0);
923 
924 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
925 	if (!new_pool->pool_mem) {
926 			/* TBD: Check if we need get_free_pages above */
927 		kfree(new_pool);
928 		osdev->mem_pool[pool_id] = NULL;
929 		return -ENOMEM;
930 	}
931 
932 	spin_lock_init(&new_pool->lock);
933 
934 	/* Initialize free list */
935 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
936 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
937 	STAILQ_INIT(&new_pool->free_list);
938 
939 	for (i = 0; i < elem_cnt; i++)
940 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
941 			(mempool_elem_t *)(aligned_pool_mem +
942 			(new_pool->elem_size * i)), mempool_entry);
943 
944 
945 	new_pool->free_cnt = elem_cnt;
946 	*pool_addr = new_pool;
947 	return 0;
948 }
949 qdf_export_symbol(__qdf_mempool_init);
950 
951 /**
952  * __qdf_mempool_destroy() - Destroy memory pool
953  * @osdev: platform device object
954  * @Handle: to memory pool
955  *
956  * Returns: none
957  */
958 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
959 {
960 	int pool_id = 0;
961 
962 	if (!pool)
963 		return;
964 
965 	if (prealloc_disabled) {
966 		kfree(pool);
967 		return;
968 	}
969 
970 	pool_id = pool->pool_id;
971 
972 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
973 	kfree(pool->pool_mem);
974 	kfree(pool);
975 	osdev->mem_pool[pool_id] = NULL;
976 }
977 qdf_export_symbol(__qdf_mempool_destroy);
978 
979 /**
980  * __qdf_mempool_alloc() - Allocate an element memory pool
981  *
982  * @osdev: platform device object
983  * @Handle: to memory pool
984  *
985  * Return: Pointer to the allocated element or NULL if the pool is empty
986  */
987 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
988 {
989 	void *buf = NULL;
990 
991 	if (!pool)
992 		return NULL;
993 
994 	if (prealloc_disabled)
995 		return  qdf_mem_malloc(pool->elem_size);
996 
997 	spin_lock_bh(&pool->lock);
998 
999 	buf = STAILQ_FIRST(&pool->free_list);
1000 	if (buf) {
1001 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1002 		pool->free_cnt--;
1003 	}
1004 
1005 	/* TBD: Update free count if debug is enabled */
1006 	spin_unlock_bh(&pool->lock);
1007 
1008 	return buf;
1009 }
1010 qdf_export_symbol(__qdf_mempool_alloc);
1011 
1012 /**
1013  * __qdf_mempool_free() - Free a memory pool element
1014  * @osdev: Platform device object
1015  * @pool: Handle to memory pool
1016  * @buf: Element to be freed
1017  *
1018  * Returns: none
1019  */
1020 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1021 {
1022 	if (!pool)
1023 		return;
1024 
1025 
1026 	if (prealloc_disabled)
1027 		return qdf_mem_free(buf);
1028 
1029 	spin_lock_bh(&pool->lock);
1030 	pool->free_cnt++;
1031 
1032 	STAILQ_INSERT_TAIL
1033 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1034 	spin_unlock_bh(&pool->lock);
1035 }
1036 qdf_export_symbol(__qdf_mempool_free);
1037 
1038 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
1039 /**
1040  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1041  * @size: the number of bytes to allocate
1042  *
1043  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1044  * a chunk of pre-allocated memory. If size if less than or equal to
1045  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1046  *
1047  * Return: NULL on failure, non-NULL on success
1048  */
1049 static void *qdf_mem_prealloc_get(size_t size)
1050 {
1051 	void *ptr;
1052 
1053 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1054 		return NULL;
1055 
1056 	ptr = wcnss_prealloc_get(size);
1057 	if (!ptr)
1058 		return NULL;
1059 
1060 	memset(ptr, 0, size);
1061 
1062 	return ptr;
1063 }
1064 
1065 static inline bool qdf_mem_prealloc_put(void *ptr)
1066 {
1067 	return wcnss_prealloc_put(ptr);
1068 }
1069 #else
1070 static inline void *qdf_mem_prealloc_get(size_t size)
1071 {
1072 	return NULL;
1073 }
1074 
1075 static inline bool qdf_mem_prealloc_put(void *ptr)
1076 {
1077 	return false;
1078 }
1079 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1080 
1081 static int qdf_mem_malloc_flags(void)
1082 {
1083 	if (in_interrupt() || irqs_disabled() || in_atomic())
1084 		return GFP_ATOMIC;
1085 
1086 	return GFP_KERNEL;
1087 }
1088 
1089 /* External Function implementation */
1090 #ifdef MEMORY_DEBUG
1091 /**
1092  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1093  *
1094  * Return: value of mem_debug_disabled qdf module argument
1095  */
1096 bool qdf_mem_debug_config_get(void)
1097 {
1098 	return mem_debug_disabled;
1099 }
1100 
1101 /**
1102  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1103  *
1104  * Return: none
1105  */
1106 static void qdf_mem_debug_init(void)
1107 {
1108 	int i;
1109 
1110 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1111 
1112 	if (is_initial_mem_debug_disabled)
1113 		return;
1114 
1115 	/* Initalizing the list with maximum size of 60000 */
1116 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1117 		qdf_list_create(&qdf_mem_domains[i], 60000);
1118 	qdf_spinlock_create(&qdf_mem_list_lock);
1119 
1120 	/* dma */
1121 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1122 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1123 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1124 }
1125 
1126 static uint32_t
1127 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1128 			       qdf_list_t *mem_list)
1129 {
1130 	if (is_initial_mem_debug_disabled)
1131 		return 0;
1132 
1133 	if (qdf_list_empty(mem_list))
1134 		return 0;
1135 
1136 	qdf_err("Memory leaks detected in %s domain!",
1137 		qdf_debug_domain_name(domain));
1138 	qdf_mem_domain_print(mem_list,
1139 			     qdf_err_printer,
1140 			     NULL,
1141 			     0,
1142 			     qdf_mem_meta_table_print);
1143 
1144 	return mem_list->count;
1145 }
1146 
1147 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1148 {
1149 	uint32_t leak_count = 0;
1150 	int i;
1151 
1152 	if (is_initial_mem_debug_disabled)
1153 		return;
1154 
1155 	/* detect and print leaks */
1156 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1157 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1158 
1159 	if (leak_count)
1160 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1161 				   leak_count);
1162 }
1163 
1164 /**
1165  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1166  *
1167  * Return: none
1168  */
1169 static void qdf_mem_debug_exit(void)
1170 {
1171 	int i;
1172 
1173 	if (is_initial_mem_debug_disabled)
1174 		return;
1175 
1176 	/* mem */
1177 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1178 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1179 		qdf_list_destroy(qdf_mem_list_get(i));
1180 
1181 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1182 
1183 	/* dma */
1184 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1185 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1186 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1187 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1188 }
1189 
1190 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1191 			   void *caller, uint32_t flag)
1192 {
1193 	QDF_STATUS status;
1194 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1195 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1196 	struct qdf_mem_header *header;
1197 	void *ptr;
1198 	unsigned long start, duration;
1199 
1200 	if (is_initial_mem_debug_disabled)
1201 		return __qdf_mem_malloc(size, func, line);
1202 
1203 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1204 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1205 		return NULL;
1206 	}
1207 
1208 	ptr = qdf_mem_prealloc_get(size);
1209 	if (ptr)
1210 		return ptr;
1211 
1212 	if (!flag)
1213 		flag = qdf_mem_malloc_flags();
1214 
1215 	start = qdf_mc_timer_get_system_time();
1216 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1217 	duration = qdf_mc_timer_get_system_time() - start;
1218 
1219 	if (duration > QDF_MEM_WARN_THRESHOLD)
1220 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1221 			 duration, size, func, line);
1222 
1223 	if (!header) {
1224 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1225 		return NULL;
1226 	}
1227 
1228 	qdf_mem_header_init(header, size, func, line, caller);
1229 	qdf_mem_trailer_init(header);
1230 	ptr = qdf_mem_get_ptr(header);
1231 
1232 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1233 	status = qdf_list_insert_front(mem_list, &header->node);
1234 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1235 	if (QDF_IS_STATUS_ERROR(status))
1236 		qdf_err("Failed to insert memory header; status %d", status);
1237 
1238 	qdf_mem_kmalloc_inc(ksize(header));
1239 
1240 	return ptr;
1241 }
1242 qdf_export_symbol(qdf_mem_malloc_debug);
1243 
1244 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1245 {
1246 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1247 	struct qdf_mem_header *header;
1248 	enum qdf_mem_validation_bitmap error_bitmap;
1249 
1250 	if (is_initial_mem_debug_disabled) {
1251 		__qdf_mem_free(ptr);
1252 		return;
1253 	}
1254 
1255 	/* freeing a null pointer is valid */
1256 	if (qdf_unlikely(!ptr))
1257 		return;
1258 
1259 	if (qdf_mem_prealloc_put(ptr))
1260 		return;
1261 
1262 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1263 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1264 				   ptr);
1265 
1266 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1267 
1268 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1269 	header = qdf_mem_get_header(ptr);
1270 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1271 	error_bitmap |= qdf_mem_trailer_validate(header);
1272 
1273 	if (!error_bitmap) {
1274 		header->freed = true;
1275 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1276 				     &header->node);
1277 	}
1278 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1279 
1280 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1281 				    func, line);
1282 
1283 	qdf_mem_kmalloc_dec(ksize(header));
1284 	kfree(header);
1285 }
1286 qdf_export_symbol(qdf_mem_free_debug);
1287 
1288 void qdf_mem_check_for_leaks(void)
1289 {
1290 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1291 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1292 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1293 	uint32_t leaks_count = 0;
1294 
1295 	if (is_initial_mem_debug_disabled)
1296 		return;
1297 
1298 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1299 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1300 
1301 	if (leaks_count)
1302 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1303 				   leaks_count);
1304 }
1305 
1306 /**
1307  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1308  * qdf_mem_multi_pages_alloc
1309  * @osdev: OS device handle pointer
1310  * @pages: Multi page information storage
1311  * @element_size: Each element size
1312  * @element_num: Total number of elements should be allocated
1313  * @memctxt: Memory context
1314  * @cacheable: Coherent memory or cacheable memory
1315  * @func: Caller of this allocator
1316  * @line: Line number of the caller
1317  * @caller: Return address of the caller
1318  *
1319  * This function will allocate large size of memory over multiple pages.
1320  * Large size of contiguous memory allocation will fail frequently, then
1321  * instead of allocate large memory by one shot, allocate through multiple, non
1322  * contiguous memory and combine pages when actual usage
1323  *
1324  * Return: None
1325  */
1326 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1327 				     struct qdf_mem_multi_page_t *pages,
1328 				     size_t element_size, uint16_t element_num,
1329 				     qdf_dma_context_t memctxt, bool cacheable,
1330 				     const char *func, uint32_t line,
1331 				     void *caller)
1332 {
1333 	uint16_t page_idx;
1334 	struct qdf_mem_dma_page_t *dma_pages;
1335 	void **cacheable_pages = NULL;
1336 	uint16_t i;
1337 
1338 	pages->num_element_per_page = PAGE_SIZE / element_size;
1339 	if (!pages->num_element_per_page) {
1340 		qdf_print("Invalid page %d or element size %d",
1341 			  (int)PAGE_SIZE, (int)element_size);
1342 		goto out_fail;
1343 	}
1344 
1345 	pages->num_pages = element_num / pages->num_element_per_page;
1346 	if (element_num % pages->num_element_per_page)
1347 		pages->num_pages++;
1348 
1349 	if (cacheable) {
1350 		/* Pages information storage */
1351 		pages->cacheable_pages = qdf_mem_malloc_debug(
1352 			pages->num_pages * sizeof(pages->cacheable_pages),
1353 			func, line, caller, 0);
1354 		if (!pages->cacheable_pages)
1355 			goto out_fail;
1356 
1357 		cacheable_pages = pages->cacheable_pages;
1358 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1359 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1360 					PAGE_SIZE, func, line, caller, 0);
1361 			if (!cacheable_pages[page_idx])
1362 				goto page_alloc_fail;
1363 		}
1364 		pages->dma_pages = NULL;
1365 	} else {
1366 		pages->dma_pages = qdf_mem_malloc_debug(
1367 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1368 			func, line, caller, 0);
1369 		if (!pages->dma_pages)
1370 			goto out_fail;
1371 
1372 		dma_pages = pages->dma_pages;
1373 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1374 			dma_pages->page_v_addr_start =
1375 				qdf_mem_alloc_consistent_debug(
1376 					osdev, osdev->dev, PAGE_SIZE,
1377 					&dma_pages->page_p_addr,
1378 					func, line, caller);
1379 			if (!dma_pages->page_v_addr_start) {
1380 				qdf_print("dmaable page alloc fail pi %d",
1381 					  page_idx);
1382 				goto page_alloc_fail;
1383 			}
1384 			dma_pages->page_v_addr_end =
1385 				dma_pages->page_v_addr_start + PAGE_SIZE;
1386 			dma_pages++;
1387 		}
1388 		pages->cacheable_pages = NULL;
1389 	}
1390 	return;
1391 
1392 page_alloc_fail:
1393 	if (cacheable) {
1394 		for (i = 0; i < page_idx; i++)
1395 			qdf_mem_free_debug(pages->cacheable_pages[i],
1396 					   func, line);
1397 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1398 	} else {
1399 		dma_pages = pages->dma_pages;
1400 		for (i = 0; i < page_idx; i++) {
1401 			qdf_mem_free_consistent_debug(
1402 				osdev, osdev->dev,
1403 				PAGE_SIZE, dma_pages->page_v_addr_start,
1404 				dma_pages->page_p_addr, memctxt, func, line);
1405 			dma_pages++;
1406 		}
1407 		qdf_mem_free_debug(pages->dma_pages, func, line);
1408 	}
1409 
1410 out_fail:
1411 	pages->cacheable_pages = NULL;
1412 	pages->dma_pages = NULL;
1413 	pages->num_pages = 0;
1414 }
1415 
1416 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1417 
1418 /**
1419  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1420  * @osdev: OS device handle pointer
1421  * @pages: Multi page information storage
1422  * @memctxt: Memory context
1423  * @cacheable: Coherent memory or cacheable memory
1424  * @func: Caller of this allocator
1425  * @line: Line number of the caller
1426  *
1427  * This function will free large size of memory over multiple pages.
1428  *
1429  * Return: None
1430  */
1431 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1432 				    struct qdf_mem_multi_page_t *pages,
1433 				    qdf_dma_context_t memctxt, bool cacheable,
1434 				    const char *func, uint32_t line)
1435 {
1436 	unsigned int page_idx;
1437 	struct qdf_mem_dma_page_t *dma_pages;
1438 
1439 	if (cacheable) {
1440 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1441 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1442 					   func, line);
1443 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1444 	} else {
1445 		dma_pages = pages->dma_pages;
1446 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1447 			qdf_mem_free_consistent_debug(
1448 				osdev, osdev->dev, PAGE_SIZE,
1449 				dma_pages->page_v_addr_start,
1450 				dma_pages->page_p_addr, memctxt, func, line);
1451 			dma_pages++;
1452 		}
1453 		qdf_mem_free_debug(pages->dma_pages, func, line);
1454 	}
1455 
1456 	pages->cacheable_pages = NULL;
1457 	pages->dma_pages = NULL;
1458 	pages->num_pages = 0;
1459 }
1460 
1461 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1462 
1463 #else
1464 static void qdf_mem_debug_init(void) {}
1465 
1466 static void qdf_mem_debug_exit(void) {}
1467 
1468 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1469 {
1470 	void *ptr;
1471 
1472 	ptr = qdf_mem_prealloc_get(size);
1473 	if (ptr)
1474 		return ptr;
1475 
1476 	ptr = kzalloc(size, GFP_ATOMIC);
1477 	if (!ptr) {
1478 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1479 			      size, func, line);
1480 		return NULL;
1481 	}
1482 
1483 	qdf_mem_kmalloc_inc(ksize(ptr));
1484 
1485 	return ptr;
1486 }
1487 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1488 
1489 /**
1490  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1491  * @osdev: OS device handle pointer
1492  * @pages: Multi page information storage
1493  * @element_size: Each element size
1494  * @element_num: Total number of elements should be allocated
1495  * @memctxt: Memory context
1496  * @cacheable: Coherent memory or cacheable memory
1497  *
1498  * This function will allocate large size of memory over multiple pages.
1499  * Large size of contiguous memory allocation will fail frequently, then
1500  * instead of allocate large memory by one shot, allocate through multiple, non
1501  * contiguous memory and combine pages when actual usage
1502  *
1503  * Return: None
1504  */
1505 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1506 			       struct qdf_mem_multi_page_t *pages,
1507 			       size_t element_size, uint16_t element_num,
1508 			       qdf_dma_context_t memctxt, bool cacheable)
1509 {
1510 	uint16_t page_idx;
1511 	struct qdf_mem_dma_page_t *dma_pages;
1512 	void **cacheable_pages = NULL;
1513 	uint16_t i;
1514 
1515 	pages->num_element_per_page = PAGE_SIZE / element_size;
1516 	if (!pages->num_element_per_page) {
1517 		qdf_print("Invalid page %d or element size %d",
1518 			  (int)PAGE_SIZE, (int)element_size);
1519 		goto out_fail;
1520 	}
1521 
1522 	pages->num_pages = element_num / pages->num_element_per_page;
1523 	if (element_num % pages->num_element_per_page)
1524 		pages->num_pages++;
1525 
1526 	if (cacheable) {
1527 		/* Pages information storage */
1528 		pages->cacheable_pages = qdf_mem_malloc(
1529 			pages->num_pages * sizeof(pages->cacheable_pages));
1530 		if (!pages->cacheable_pages)
1531 			goto out_fail;
1532 
1533 		cacheable_pages = pages->cacheable_pages;
1534 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1535 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1536 			if (!cacheable_pages[page_idx])
1537 				goto page_alloc_fail;
1538 		}
1539 		pages->dma_pages = NULL;
1540 	} else {
1541 		pages->dma_pages = qdf_mem_malloc(
1542 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1543 		if (!pages->dma_pages)
1544 			goto out_fail;
1545 
1546 		dma_pages = pages->dma_pages;
1547 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1548 			dma_pages->page_v_addr_start =
1549 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1550 					 PAGE_SIZE,
1551 					&dma_pages->page_p_addr);
1552 			if (!dma_pages->page_v_addr_start) {
1553 				qdf_print("dmaable page alloc fail pi %d",
1554 					page_idx);
1555 				goto page_alloc_fail;
1556 			}
1557 			dma_pages->page_v_addr_end =
1558 				dma_pages->page_v_addr_start + PAGE_SIZE;
1559 			dma_pages++;
1560 		}
1561 		pages->cacheable_pages = NULL;
1562 	}
1563 	return;
1564 
1565 page_alloc_fail:
1566 	if (cacheable) {
1567 		for (i = 0; i < page_idx; i++)
1568 			qdf_mem_free(pages->cacheable_pages[i]);
1569 		qdf_mem_free(pages->cacheable_pages);
1570 	} else {
1571 		dma_pages = pages->dma_pages;
1572 		for (i = 0; i < page_idx; i++) {
1573 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1574 				dma_pages->page_v_addr_start,
1575 				dma_pages->page_p_addr, memctxt);
1576 			dma_pages++;
1577 		}
1578 		qdf_mem_free(pages->dma_pages);
1579 	}
1580 
1581 out_fail:
1582 	pages->cacheable_pages = NULL;
1583 	pages->dma_pages = NULL;
1584 	pages->num_pages = 0;
1585 	return;
1586 }
1587 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1588 
1589 /**
1590  * qdf_mem_multi_pages_free() - free large size of kernel memory
1591  * @osdev: OS device handle pointer
1592  * @pages: Multi page information storage
1593  * @memctxt: Memory context
1594  * @cacheable: Coherent memory or cacheable memory
1595  *
1596  * This function will free large size of memory over multiple pages.
1597  *
1598  * Return: None
1599  */
1600 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1601 			      struct qdf_mem_multi_page_t *pages,
1602 			      qdf_dma_context_t memctxt, bool cacheable)
1603 {
1604 	unsigned int page_idx;
1605 	struct qdf_mem_dma_page_t *dma_pages;
1606 
1607 	if (cacheable) {
1608 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1609 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1610 		qdf_mem_free(pages->cacheable_pages);
1611 	} else {
1612 		dma_pages = pages->dma_pages;
1613 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1614 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1615 				dma_pages->page_v_addr_start,
1616 				dma_pages->page_p_addr, memctxt);
1617 			dma_pages++;
1618 		}
1619 		qdf_mem_free(pages->dma_pages);
1620 	}
1621 
1622 	pages->cacheable_pages = NULL;
1623 	pages->dma_pages = NULL;
1624 	pages->num_pages = 0;
1625 	return;
1626 }
1627 qdf_export_symbol(qdf_mem_multi_pages_free);
1628 #endif
1629 
1630 void __qdf_mem_free(void *ptr)
1631 {
1632 	if (!ptr)
1633 		return;
1634 
1635 	if (qdf_mem_prealloc_put(ptr))
1636 		return;
1637 
1638 	qdf_mem_kmalloc_dec(ksize(ptr));
1639 
1640 	kfree(ptr);
1641 }
1642 
1643 qdf_export_symbol(__qdf_mem_free);
1644 
1645 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
1646 {
1647 	void *ptr;
1648 
1649 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1650 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1651 			     line);
1652 		return NULL;
1653 	}
1654 
1655 	ptr = qdf_mem_prealloc_get(size);
1656 	if (ptr)
1657 		return ptr;
1658 
1659 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1660 	if (!ptr)
1661 		return NULL;
1662 
1663 	qdf_mem_kmalloc_inc(ksize(ptr));
1664 
1665 	return ptr;
1666 }
1667 
1668 qdf_export_symbol(__qdf_mem_malloc);
1669 
1670 void *qdf_aligned_malloc_fl(uint32_t *size,
1671 			    void **vaddr_unaligned,
1672 				qdf_dma_addr_t *paddr_unaligned,
1673 				qdf_dma_addr_t *paddr_aligned,
1674 				uint32_t align,
1675 			    const char *func, uint32_t line)
1676 {
1677 	void *vaddr_aligned;
1678 	uint32_t align_alloc_size;
1679 
1680 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
1681 			line);
1682 	if (!*vaddr_unaligned) {
1683 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
1684 		return NULL;
1685 	}
1686 
1687 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
1688 
1689 	/* Re-allocate additional bytes to align base address only if
1690 	 * above allocation returns unaligned address. Reason for
1691 	 * trying exact size allocation above is, OS tries to allocate
1692 	 * blocks of size power-of-2 pages and then free extra pages.
1693 	 * e.g., of a ring size of 1MB, the allocation below will
1694 	 * request 1MB plus 7 bytes for alignment, which will cause a
1695 	 * 2MB block allocation,and that is failing sometimes due to
1696 	 * memory fragmentation.
1697 	 */
1698 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1699 		align_alloc_size = *size + align - 1;
1700 
1701 		qdf_mem_free(*vaddr_unaligned);
1702 		*vaddr_unaligned = qdf_mem_malloc_fl(
1703 				(qdf_size_t)align_alloc_size, func, line);
1704 		if (!*vaddr_unaligned) {
1705 			qdf_warn("Failed to alloc %uB @ %s:%d",
1706 				 align_alloc_size, func, line);
1707 			return NULL;
1708 		}
1709 
1710 		*paddr_unaligned = qdf_mem_virt_to_phys(
1711 				*vaddr_unaligned);
1712 		*size = align_alloc_size;
1713 	}
1714 
1715 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
1716 		((unsigned long)(*paddr_unaligned), align);
1717 
1718 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1719 			((unsigned long)(*paddr_aligned) -
1720 			 (unsigned long)(*paddr_unaligned)));
1721 
1722 	return vaddr_aligned;
1723 }
1724 
1725 qdf_export_symbol(qdf_aligned_malloc_fl);
1726 
1727 /**
1728  * qdf_mem_multi_page_link() - Make links for multi page elements
1729  * @osdev: OS device handle pointer
1730  * @pages: Multi page information storage
1731  * @elem_size: Single element size
1732  * @elem_count: elements count should be linked
1733  * @cacheable: Coherent memory or cacheable memory
1734  *
1735  * This function will make links for multi page allocated structure
1736  *
1737  * Return: 0 success
1738  */
1739 int qdf_mem_multi_page_link(qdf_device_t osdev,
1740 		struct qdf_mem_multi_page_t *pages,
1741 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1742 {
1743 	uint16_t i, i_int;
1744 	void *page_info;
1745 	void **c_elem = NULL;
1746 	uint32_t num_link = 0;
1747 
1748 	for (i = 0; i < pages->num_pages; i++) {
1749 		if (cacheable)
1750 			page_info = pages->cacheable_pages[i];
1751 		else
1752 			page_info = pages->dma_pages[i].page_v_addr_start;
1753 
1754 		if (!page_info)
1755 			return -ENOMEM;
1756 
1757 		c_elem = (void **)page_info;
1758 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1759 			if (i_int == (pages->num_element_per_page - 1)) {
1760 				if (cacheable)
1761 					*c_elem = pages->
1762 						cacheable_pages[i + 1];
1763 				else
1764 					*c_elem = pages->
1765 						dma_pages[i + 1].
1766 							page_v_addr_start;
1767 				num_link++;
1768 				break;
1769 			} else {
1770 				*c_elem =
1771 					(void *)(((char *)c_elem) + elem_size);
1772 			}
1773 			num_link++;
1774 			c_elem = (void **)*c_elem;
1775 
1776 			/* Last link established exit */
1777 			if (num_link == (elem_count - 1))
1778 				break;
1779 		}
1780 	}
1781 
1782 	if (c_elem)
1783 		*c_elem = NULL;
1784 
1785 	return 0;
1786 }
1787 qdf_export_symbol(qdf_mem_multi_page_link);
1788 
1789 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1790 {
1791 	/* special case where dst_addr or src_addr can be NULL */
1792 	if (!num_bytes)
1793 		return;
1794 
1795 	QDF_BUG(dst_addr);
1796 	QDF_BUG(src_addr);
1797 	if (!dst_addr || !src_addr)
1798 		return;
1799 
1800 	memcpy(dst_addr, src_addr, num_bytes);
1801 }
1802 qdf_export_symbol(qdf_mem_copy);
1803 
1804 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1805 {
1806 	qdf_shared_mem_t *shared_mem;
1807 	qdf_dma_addr_t dma_addr, paddr;
1808 	int ret;
1809 
1810 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1811 	if (!shared_mem)
1812 		return NULL;
1813 
1814 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1815 				size, qdf_mem_get_dma_addr_ptr(osdev,
1816 						&shared_mem->mem_info));
1817 	if (!shared_mem->vaddr) {
1818 		qdf_err("Unable to allocate DMA memory for shared resource");
1819 		qdf_mem_free(shared_mem);
1820 		return NULL;
1821 	}
1822 
1823 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1824 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1825 
1826 	qdf_mem_zero(shared_mem->vaddr, size);
1827 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1828 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1829 
1830 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1831 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1832 				      shared_mem->vaddr, dma_addr, size);
1833 	if (ret) {
1834 		qdf_err("Unable to get DMA sgtable");
1835 		qdf_mem_free_consistent(osdev, osdev->dev,
1836 					shared_mem->mem_info.size,
1837 					shared_mem->vaddr,
1838 					dma_addr,
1839 					qdf_get_dma_mem_context(shared_mem,
1840 								memctx));
1841 		qdf_mem_free(shared_mem);
1842 		return NULL;
1843 	}
1844 
1845 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1846 
1847 	return shared_mem;
1848 }
1849 
1850 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1851 
1852 /**
1853  * qdf_mem_copy_toio() - copy memory
1854  * @dst_addr: Pointer to destination memory location (to copy to)
1855  * @src_addr: Pointer to source memory location (to copy from)
1856  * @num_bytes: Number of bytes to copy.
1857  *
1858  * Return: none
1859  */
1860 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1861 {
1862 	if (0 == num_bytes) {
1863 		/* special case where dst_addr or src_addr can be NULL */
1864 		return;
1865 	}
1866 
1867 	if ((!dst_addr) || (!src_addr)) {
1868 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1869 			  "%s called with NULL parameter, source:%pK destination:%pK",
1870 			  __func__, src_addr, dst_addr);
1871 		QDF_ASSERT(0);
1872 		return;
1873 	}
1874 	memcpy_toio(dst_addr, src_addr, num_bytes);
1875 }
1876 
1877 qdf_export_symbol(qdf_mem_copy_toio);
1878 
1879 /**
1880  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1881  * @ptr: Pointer to memory that will be set
1882  * @value: Byte set in memory
1883  * @num_bytes: Number of bytes to be set
1884  *
1885  * Return: None
1886  */
1887 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1888 {
1889 	if (!ptr) {
1890 		qdf_print("%s called with NULL parameter ptr", __func__);
1891 		return;
1892 	}
1893 	memset_io(ptr, value, num_bytes);
1894 }
1895 
1896 qdf_export_symbol(qdf_mem_set_io);
1897 
1898 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1899 {
1900 	QDF_BUG(ptr);
1901 	if (!ptr)
1902 		return;
1903 
1904 	memset(ptr, value, num_bytes);
1905 }
1906 qdf_export_symbol(qdf_mem_set);
1907 
1908 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1909 {
1910 	/* special case where dst_addr or src_addr can be NULL */
1911 	if (!num_bytes)
1912 		return;
1913 
1914 	QDF_BUG(dst_addr);
1915 	QDF_BUG(src_addr);
1916 	if (!dst_addr || !src_addr)
1917 		return;
1918 
1919 	memmove(dst_addr, src_addr, num_bytes);
1920 }
1921 qdf_export_symbol(qdf_mem_move);
1922 
1923 int qdf_mem_cmp(const void *left, const void *right, size_t size)
1924 {
1925 	QDF_BUG(left);
1926 	QDF_BUG(right);
1927 
1928 	return memcmp(left, right, size);
1929 }
1930 qdf_export_symbol(qdf_mem_cmp);
1931 
1932 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1933 /**
1934  * qdf_mem_dma_alloc() - allocates memory for dma
1935  * @osdev: OS device handle
1936  * @dev: Pointer to device handle
1937  * @size: Size to be allocated
1938  * @phy_addr: Physical address
1939  *
1940  * Return: pointer of allocated memory or null if memory alloc fails
1941  */
1942 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1943 				      qdf_size_t size,
1944 				      qdf_dma_addr_t *phy_addr)
1945 {
1946 	void *vaddr;
1947 
1948 	vaddr = qdf_mem_malloc(size);
1949 	*phy_addr = ((uintptr_t) vaddr);
1950 	/* using this type conversion to suppress "cast from pointer to integer
1951 	 * of different size" warning on some platforms
1952 	 */
1953 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1954 	return vaddr;
1955 }
1956 
1957 #elif defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \
1958 	!defined(QCA_WIFI_QCN9000)
1959 
1960 #define QCA8074_RAM_BASE 0x50000000
1961 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1962 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1963 			qdf_dma_addr_t *phy_addr)
1964 {
1965 	void *vaddr = NULL;
1966 	int i;
1967 
1968 	*phy_addr = 0;
1969 
1970 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1971 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1972 					   qdf_mem_malloc_flags());
1973 
1974 		if (!vaddr) {
1975 			qdf_err("%s failed , size: %zu!", __func__, size);
1976 			return NULL;
1977 		}
1978 
1979 		if (*phy_addr >= QCA8074_RAM_BASE)
1980 			return vaddr;
1981 
1982 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1983 	}
1984 
1985 	return NULL;
1986 }
1987 
1988 #else
1989 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1990 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1991 {
1992 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1993 }
1994 #endif
1995 
1996 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1997 static inline void
1998 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1999 {
2000 	qdf_mem_free(vaddr);
2001 }
2002 #else
2003 
2004 static inline void
2005 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2006 {
2007 	dma_free_coherent(dev, size, vaddr, paddr);
2008 }
2009 #endif
2010 
2011 #ifdef MEMORY_DEBUG
2012 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2013 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2014 				     const char *func, uint32_t line,
2015 				     void *caller)
2016 {
2017 	QDF_STATUS status;
2018 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2019 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2020 	struct qdf_mem_header *header;
2021 	void *vaddr;
2022 
2023 	if (is_initial_mem_debug_disabled)
2024 		return __qdf_mem_alloc_consistent(osdev, dev,
2025 						  size, paddr,
2026 						  func, line);
2027 
2028 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2029 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2030 		return NULL;
2031 	}
2032 
2033 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2034 				   paddr);
2035 
2036 	if (!vaddr) {
2037 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2038 		return NULL;
2039 	}
2040 
2041 	header = qdf_mem_dma_get_header(vaddr, size);
2042 	/* For DMA buffers we only add trailers, this function will init
2043 	 * the header structure at the tail
2044 	 * Prefix the header into DMA buffer causes SMMU faults, so
2045 	 * do not prefix header into the DMA buffers
2046 	 */
2047 	qdf_mem_header_init(header, size, func, line, caller);
2048 
2049 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2050 	status = qdf_list_insert_front(mem_list, &header->node);
2051 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2052 	if (QDF_IS_STATUS_ERROR(status))
2053 		qdf_err("Failed to insert memory header; status %d", status);
2054 
2055 	qdf_mem_dma_inc(size);
2056 
2057 	return vaddr;
2058 }
2059 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2060 
2061 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2062 				   qdf_size_t size, void *vaddr,
2063 				   qdf_dma_addr_t paddr,
2064 				   qdf_dma_context_t memctx,
2065 				   const char *func, uint32_t line)
2066 {
2067 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2068 	struct qdf_mem_header *header;
2069 	enum qdf_mem_validation_bitmap error_bitmap;
2070 
2071 	if (is_initial_mem_debug_disabled) {
2072 		__qdf_mem_free_consistent(
2073 					  osdev, dev,
2074 					  size, vaddr,
2075 					  paddr, memctx);
2076 		return;
2077 	}
2078 
2079 	/* freeing a null pointer is valid */
2080 	if (qdf_unlikely(!vaddr))
2081 		return;
2082 
2083 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2084 
2085 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2086 	/* For DMA buffers we only add trailers, this function will retrieve
2087 	 * the header structure at the tail
2088 	 * Prefix the header into DMA buffer causes SMMU faults, so
2089 	 * do not prefix header into the DMA buffers
2090 	 */
2091 	header = qdf_mem_dma_get_header(vaddr, size);
2092 	error_bitmap = qdf_mem_header_validate(header, domain);
2093 	if (!error_bitmap) {
2094 		header->freed = true;
2095 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2096 				     &header->node);
2097 	}
2098 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2099 
2100 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2101 
2102 	qdf_mem_dma_dec(header->size);
2103 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2104 }
2105 qdf_export_symbol(qdf_mem_free_consistent_debug);
2106 #endif /* MEMORY_DEBUG */
2107 
2108 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2109 			       qdf_size_t size, void *vaddr,
2110 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2111 {
2112 	qdf_mem_dma_dec(size);
2113 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2114 }
2115 
2116 qdf_export_symbol(__qdf_mem_free_consistent);
2117 
2118 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2119 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2120 				 const char *func, uint32_t line)
2121 {
2122 	void *vaddr;
2123 
2124 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2125 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2126 			     size, func, line);
2127 		return NULL;
2128 	}
2129 
2130 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2131 
2132 	if (vaddr)
2133 		qdf_mem_dma_inc(size);
2134 
2135 	return vaddr;
2136 }
2137 
2138 qdf_export_symbol(__qdf_mem_alloc_consistent);
2139 
2140 void *qdf_aligned_mem_alloc_consistent_fl(
2141 	qdf_device_t osdev, uint32_t *size,
2142 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2143 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2144 	const char *func, uint32_t line)
2145 {
2146 	void *vaddr_aligned;
2147 	uint32_t align_alloc_size;
2148 
2149 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2150 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2151 	if (!*vaddr_unaligned) {
2152 		qdf_warn("Failed to alloc %uB @ %s:%d",
2153 			 *size, func, line);
2154 		return NULL;
2155 	}
2156 
2157 	/* Re-allocate additional bytes to align base address only if
2158 	 * above allocation returns unaligned address. Reason for
2159 	 * trying exact size allocation above is, OS tries to allocate
2160 	 * blocks of size power-of-2 pages and then free extra pages.
2161 	 * e.g., of a ring size of 1MB, the allocation below will
2162 	 * request 1MB plus 7 bytes for alignment, which will cause a
2163 	 * 2MB block allocation,and that is failing sometimes due to
2164 	 * memory fragmentation.
2165 	 */
2166 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2167 		align_alloc_size = *size + align - 1;
2168 
2169 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2170 					*vaddr_unaligned,
2171 					*paddr_unaligned, 0);
2172 
2173 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2174 				osdev, osdev->dev, align_alloc_size,
2175 				paddr_unaligned);
2176 		if (!*vaddr_unaligned) {
2177 			qdf_warn("Failed to alloc %uB @ %s:%d",
2178 				 align_alloc_size, func, line);
2179 			return NULL;
2180 		}
2181 
2182 		*size = align_alloc_size;
2183 	}
2184 
2185 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2186 			(unsigned long)(*paddr_unaligned), align);
2187 
2188 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2189 				 ((unsigned long)(*paddr_aligned) -
2190 				  (unsigned long)(*paddr_unaligned)));
2191 
2192 	return vaddr_aligned;
2193 }
2194 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2195 
2196 /**
2197  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2198  * @osdev: OS device handle
2199  * @bus_addr: dma address to give to the device
2200  * @size: Size of the memory block
2201  * @direction: direction data will be DMAed
2202  *
2203  * Assign memory to the remote device.
2204  * The cache lines are flushed to ram or invalidated as needed.
2205  *
2206  * Return: none
2207  */
2208 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2209 					qdf_dma_addr_t bus_addr,
2210 					qdf_size_t size,
2211 					enum dma_data_direction direction)
2212 {
2213 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2214 }
2215 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2216 
2217 /**
2218  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2219  * @osdev: OS device handle
2220  * @bus_addr: dma address to give to the cpu
2221  * @size: Size of the memory block
2222  * @direction: direction data will be DMAed
2223  *
2224  * Assign memory to the CPU.
2225  *
2226  * Return: none
2227  */
2228 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2229 				     qdf_dma_addr_t bus_addr,
2230 				     qdf_size_t size,
2231 				     enum dma_data_direction direction)
2232 {
2233 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2234 }
2235 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2236 
2237 void qdf_mem_init(void)
2238 {
2239 	qdf_mem_debug_init();
2240 	qdf_net_buf_debug_init();
2241 	qdf_mem_debugfs_init();
2242 	qdf_mem_debug_debugfs_init();
2243 }
2244 qdf_export_symbol(qdf_mem_init);
2245 
2246 void qdf_mem_exit(void)
2247 {
2248 	qdf_mem_debug_debugfs_exit();
2249 	qdf_mem_debugfs_exit();
2250 	qdf_net_buf_debug_exit();
2251 	qdf_mem_debug_exit();
2252 }
2253 qdf_export_symbol(qdf_mem_exit);
2254 
2255 /**
2256  * qdf_ether_addr_copy() - copy an Ethernet address
2257  *
2258  * @dst_addr: A six-byte array Ethernet address destination
2259  * @src_addr: A six-byte array Ethernet address source
2260  *
2261  * Please note: dst & src must both be aligned to u16.
2262  *
2263  * Return: none
2264  */
2265 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2266 {
2267 	if ((!dst_addr) || (!src_addr)) {
2268 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2269 			  "%s called with NULL parameter, source:%pK destination:%pK",
2270 			  __func__, src_addr, dst_addr);
2271 		QDF_ASSERT(0);
2272 		return;
2273 	}
2274 	ether_addr_copy(dst_addr, src_addr);
2275 }
2276 qdf_export_symbol(qdf_ether_addr_copy);
2277 
2278