xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 
37 #ifdef CONFIG_MCL
38 #include <host_diag_core_event.h>
39 #else
40 #define host_log_low_resource_failure(code) do {} while (0)
41 #endif
42 
43 #if defined(CONFIG_CNSS)
44 #include <net/cnss.h>
45 #endif
46 
47 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
48 #include <net/cnss_prealloc.h>
49 #endif
50 
51 #ifdef MEMORY_DEBUG
52 #include "qdf_debug_domain.h"
53 #include <qdf_list.h>
54 
55 /* Preprocessor Definitions and Constants */
56 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
57 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
58 #define QDF_DEBUG_STRING_SIZE 512
59 
60 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
61 static qdf_spinlock_t qdf_mem_list_lock;
62 
63 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
64 static qdf_spinlock_t qdf_mem_dma_list_lock;
65 
66 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
67 {
68 	return &qdf_mem_domains[domain];
69 }
70 
71 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
72 {
73 	return &qdf_mem_dma_domains[domain];
74 }
75 
76 /**
77  * struct qdf_mem_header - memory object to dubug
78  * @node: node to the list
79  * @domain: the active memory domain at time of allocation
80  * @freed: flag set during free, used to detect double frees
81  *	Use uint8_t so we can detect corruption
82  * @file: name of the file the allocation was made from
83  * @line: line number of the file the allocation was made from
84  * @size: size of the allocation in bytes
85  * @caller: Caller of the function for which memory is allocated
86  * @header: a known value, used to detect out-of-bounds access
87  * @time: timestamp at which allocation was made
88  */
89 struct qdf_mem_header {
90 	qdf_list_node_t node;
91 	enum qdf_debug_domain domain;
92 	uint8_t freed;
93 	char file[QDF_MEM_FILE_NAME_SIZE];
94 	uint32_t line;
95 	uint32_t size;
96 	void *caller;
97 	uint64_t header;
98 	uint64_t time;
99 };
100 
101 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
102 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
103 
104 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
105 {
106 	return (struct qdf_mem_header *)ptr - 1;
107 }
108 
109 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
110 							    qdf_size_t size)
111 {
112 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
113 }
114 
115 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
116 {
117 	return (uint64_t *)((void *)(header + 1) + header->size);
118 }
119 
120 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
121 {
122 	return (void *)(header + 1);
123 }
124 
125 /* number of bytes needed for the qdf memory debug information */
126 #define QDF_MEM_DEBUG_SIZE \
127 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
128 
129 /* number of bytes needed for the qdf dma memory debug information */
130 #define QDF_DMA_MEM_DEBUG_SIZE \
131 	(sizeof(struct qdf_mem_header))
132 
133 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
134 {
135 	QDF_BUG(header);
136 	if (!header)
137 		return;
138 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
139 }
140 
141 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
142 				const char *file, uint32_t line, void *caller)
143 {
144 	QDF_BUG(header);
145 	if (!header)
146 		return;
147 
148 	header->domain = qdf_debug_domain_get();
149 	header->freed = false;
150 
151 	/* copy the file name, rather than pointing to it */
152 	qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
153 
154 	header->line = line;
155 	header->size = size;
156 	header->caller = caller;
157 	header->header = WLAN_MEM_HEADER;
158 	header->time = qdf_get_log_timestamp();
159 }
160 
161 enum qdf_mem_validation_bitmap {
162 	QDF_MEM_BAD_HEADER = 1 << 0,
163 	QDF_MEM_BAD_TRAILER = 1 << 1,
164 	QDF_MEM_BAD_SIZE = 1 << 2,
165 	QDF_MEM_DOUBLE_FREE = 1 << 3,
166 	QDF_MEM_BAD_FREED = 1 << 4,
167 	QDF_MEM_BAD_NODE = 1 << 5,
168 	QDF_MEM_BAD_DOMAIN = 1 << 6,
169 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
170 };
171 
172 /**
173  * qdf_mem_validate_list_node() - validate that the node is in a list
174  * @qdf_node: node to check for being in a list
175  *
176  * Return: true if the node validly linked in an anchored doubly linked list
177  */
178 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
179 {
180 	struct list_head *node = qdf_node;
181 
182 	/*
183 	 * if the node is an empty list, it is not tied to an anchor node
184 	 * and must have been removed with list_del_init
185 	 */
186 	if (list_empty(node))
187 		return false;
188 
189 	if (!node->prev || !node->next)
190 		return false;
191 
192 	if (node->prev->next != node || node->next->prev != node)
193 		return false;
194 
195 	return true;
196 }
197 
198 static enum qdf_mem_validation_bitmap
199 qdf_mem_trailer_validate(struct qdf_mem_header *header)
200 {
201 	enum qdf_mem_validation_bitmap error_bitmap = 0;
202 
203 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
204 		error_bitmap |= QDF_MEM_BAD_TRAILER;
205 	return error_bitmap;
206 }
207 
208 static enum qdf_mem_validation_bitmap
209 qdf_mem_header_validate(struct qdf_mem_header *header,
210 			enum qdf_debug_domain domain)
211 {
212 	enum qdf_mem_validation_bitmap error_bitmap = 0;
213 
214 	if (header->header != WLAN_MEM_HEADER)
215 		error_bitmap |= QDF_MEM_BAD_HEADER;
216 
217 	if (header->size > QDF_MEM_MAX_MALLOC)
218 		error_bitmap |= QDF_MEM_BAD_SIZE;
219 
220 	if (header->freed == true)
221 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
222 	else if (header->freed)
223 		error_bitmap |= QDF_MEM_BAD_FREED;
224 
225 	if (!qdf_mem_validate_list_node(&header->node))
226 		error_bitmap |= QDF_MEM_BAD_NODE;
227 
228 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
229 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
230 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
231 	else if (header->domain != domain)
232 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
233 
234 	return error_bitmap;
235 }
236 
237 static void
238 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
239 			    enum qdf_debug_domain current_domain,
240 			    enum qdf_mem_validation_bitmap error_bitmap,
241 			    const char *file,
242 			    uint32_t line)
243 {
244 	if (!error_bitmap)
245 		return;
246 
247 	if (error_bitmap & QDF_MEM_BAD_HEADER)
248 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
249 			header->header, WLAN_MEM_HEADER);
250 
251 	if (error_bitmap & QDF_MEM_BAD_SIZE)
252 		qdf_err("Corrupted memory size %u (expected < %d)",
253 			header->size, QDF_MEM_MAX_MALLOC);
254 
255 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
256 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
257 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
258 
259 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
260 		qdf_err("Memory has previously been freed");
261 
262 	if (error_bitmap & QDF_MEM_BAD_FREED)
263 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
264 
265 	if (error_bitmap & QDF_MEM_BAD_NODE)
266 		qdf_err("Corrupted memory header node or double free");
267 
268 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
269 		qdf_err("Corrupted memory domain 0x%x", header->domain);
270 
271 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
272 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
273 			qdf_debug_domain_name(header->domain), header->domain,
274 			qdf_debug_domain_name(current_domain), current_domain);
275 
276 	QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", file, line);
277 }
278 #endif /* MEMORY_DEBUG */
279 
280 u_int8_t prealloc_disabled = 1;
281 qdf_declare_param(prealloc_disabled, byte);
282 qdf_export_symbol(prealloc_disabled);
283 
284 #if defined WLAN_DEBUGFS
285 
286 /* Debugfs root directory for qdf_mem */
287 static struct dentry *qdf_mem_debugfs_root;
288 
289 /**
290  * struct __qdf_mem_stat - qdf memory statistics
291  * @kmalloc:	total kmalloc allocations
292  * @dma:	total dma allocations
293  * @skb:	total skb allocations
294  */
295 static struct __qdf_mem_stat {
296 	qdf_atomic_t kmalloc;
297 	qdf_atomic_t dma;
298 	qdf_atomic_t skb;
299 } qdf_mem_stat;
300 
301 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
302 {
303 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
304 }
305 
306 static inline void qdf_mem_dma_inc(qdf_size_t size)
307 {
308 	qdf_atomic_add(size, &qdf_mem_stat.dma);
309 }
310 
311 void qdf_mem_skb_inc(qdf_size_t size)
312 {
313 	qdf_atomic_add(size, &qdf_mem_stat.skb);
314 }
315 
316 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
317 {
318 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
319 }
320 
321 static inline void qdf_mem_dma_dec(qdf_size_t size)
322 {
323 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
324 }
325 
326 void qdf_mem_skb_dec(qdf_size_t size)
327 {
328 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
329 }
330 
331 #ifdef MEMORY_DEBUG
332 static int qdf_err_printer(void *priv, const char *fmt, ...)
333 {
334 	va_list args;
335 
336 	va_start(args, fmt);
337 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
338 	va_end(args);
339 
340 	return 0;
341 }
342 
343 static int seq_printf_printer(void *priv, const char *fmt, ...)
344 {
345 	struct seq_file *file = priv;
346 	va_list args;
347 
348 	va_start(args, fmt);
349 	seq_vprintf(file, fmt, args);
350 	seq_puts(file, "\n");
351 	va_end(args);
352 
353 	return 0;
354 }
355 
356 /**
357  * struct __qdf_mem_info - memory statistics
358  * @file: the file which allocated memory
359  * @line: the line at which allocation happened
360  * @size: the size of allocation
361  * @caller: Address of the caller function
362  * @count: how many allocations of same type
363  * @time: timestamp at which allocation happened
364  */
365 struct __qdf_mem_info {
366 	char file[QDF_MEM_FILE_NAME_SIZE];
367 	uint32_t line;
368 	uint32_t size;
369 	void *caller;
370 	uint32_t count;
371 	uint64_t time;
372 };
373 
374 /*
375  * The table depth defines the de-duplication proximity scope.
376  * A deeper table takes more time, so choose any optimum value.
377  */
378 #define QDF_MEM_STAT_TABLE_SIZE 8
379 
380 /**
381  * qdf_mem_domain_print_header() - memory domain header print logic
382  * @print: the print adapter function
383  * @print_priv: the private data to be consumed by @print
384  *
385  * Return: None
386  */
387 static void qdf_mem_domain_print_header(qdf_abstract_print print,
388 					void *print_priv)
389 {
390 	print(print_priv,
391 	      "--------------------------------------------------------------");
392 	print(print_priv,
393 	      " count    size     total    filename     caller    timestamp");
394 	print(print_priv,
395 	      "--------------------------------------------------------------");
396 }
397 
398 /**
399  * qdf_mem_meta_table_print() - memory metadata table print logic
400  * @table: the memory metadata table to print
401  * @print: the print adapter function
402  * @print_priv: the private data to be consumed by @print
403  *
404  * Return: None
405  */
406 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
407 				     qdf_abstract_print print,
408 				     void *print_priv)
409 {
410 	int i;
411 	char debug_str[QDF_DEBUG_STRING_SIZE];
412 	size_t len = 0;
413 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
414 
415 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
416 			     "%s", debug_prefix);
417 
418 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
419 		if (!table[i].count)
420 			break;
421 
422 		print(print_priv,
423 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
424 		      table[i].count,
425 		      table[i].size,
426 		      table[i].count * table[i].size,
427 		      table[i].file,
428 		      table[i].line, table[i].caller,
429 		      table[i].time);
430 		len += qdf_scnprintf(debug_str + len,
431 				     sizeof(debug_str) - len,
432 				     " @ %s:%u %pS",
433 				     table[i].file,
434 				     table[i].line,
435 				     table[i].caller);
436 	}
437 	print(print_priv, "%s", debug_str);
438 }
439 
440 /**
441  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
442  * @table: the memory metadata table to insert into
443  * @meta: the memory metadata to insert
444  *
445  * Return: true if the table is full after inserting, false otherwise
446  */
447 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
448 				      struct qdf_mem_header *meta)
449 {
450 	int i;
451 
452 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
453 		if (!table[i].count) {
454 			qdf_str_lcopy(table[i].file, meta->file,
455 				      QDF_MEM_FILE_NAME_SIZE);
456 			table[i].line = meta->line;
457 			table[i].size = meta->size;
458 			table[i].count = 1;
459 			table[i].caller = meta->caller;
460 			table[i].time = meta->time;
461 			break;
462 		}
463 
464 		if (qdf_str_eq(table[i].file, meta->file) &&
465 		    table[i].line == meta->line &&
466 		    table[i].size == meta->size &&
467 		    table[i].caller == meta->caller) {
468 			table[i].count++;
469 			break;
470 		}
471 	}
472 
473 	/* return true if the table is now full */
474 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
475 }
476 
477 /**
478  * qdf_mem_domain_print() - output agnostic memory domain print logic
479  * @domain: the memory domain to print
480  * @print: the print adapter function
481  * @print_priv: the private data to be consumed by @print
482  *
483  * Return: None
484  */
485 static void qdf_mem_domain_print(qdf_list_t *domain,
486 				 qdf_abstract_print print,
487 				 void *print_priv)
488 {
489 	QDF_STATUS status;
490 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
491 	qdf_list_node_t *node;
492 
493 	qdf_mem_zero(table, sizeof(table));
494 	qdf_mem_domain_print_header(print, print_priv);
495 
496 	/* hold lock while inserting to avoid use-after free of the metadata */
497 	qdf_spin_lock(&qdf_mem_list_lock);
498 	status = qdf_list_peek_front(domain, &node);
499 	while (QDF_IS_STATUS_SUCCESS(status)) {
500 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
501 		bool is_full = qdf_mem_meta_table_insert(table, meta);
502 
503 		qdf_spin_unlock(&qdf_mem_list_lock);
504 
505 		if (is_full) {
506 			qdf_mem_meta_table_print(table, print, print_priv);
507 			qdf_mem_zero(table, sizeof(table));
508 		}
509 
510 		qdf_spin_lock(&qdf_mem_list_lock);
511 		status = qdf_list_peek_next(domain, node, &node);
512 	}
513 	qdf_spin_unlock(&qdf_mem_list_lock);
514 
515 	qdf_mem_meta_table_print(table, print, print_priv);
516 }
517 
518 /**
519  * qdf_mem_seq_start() - sequential callback to start
520  * @seq: seq_file handle
521  * @pos: The start position of the sequence
522  *
523  * Return: iterator pointer, or NULL if iteration is complete
524  */
525 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
526 {
527 	enum qdf_debug_domain domain = *pos;
528 
529 	if (!qdf_debug_domain_valid(domain))
530 		return NULL;
531 
532 	/* just use the current position as our iterator */
533 	return pos;
534 }
535 
536 /**
537  * qdf_mem_seq_next() - next sequential callback
538  * @seq: seq_file handle
539  * @v: the current iterator
540  * @pos: the current position
541  *
542  * Get the next node and release previous node.
543  *
544  * Return: iterator pointer, or NULL if iteration is complete
545  */
546 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
547 {
548 	++*pos;
549 
550 	return qdf_mem_seq_start(seq, pos);
551 }
552 
553 /**
554  * qdf_mem_seq_stop() - stop sequential callback
555  * @seq: seq_file handle
556  * @v: current iterator
557  *
558  * Return: None
559  */
560 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
561 
562 /**
563  * qdf_mem_seq_show() - print sequential callback
564  * @seq: seq_file handle
565  * @v: current iterator
566  *
567  * Return: 0 - success
568  */
569 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
570 {
571 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
572 
573 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
574 		   qdf_debug_domain_name(domain_id), domain_id);
575 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
576 			     seq_printf_printer, seq);
577 
578 	return 0;
579 }
580 
581 /* sequential file operation table */
582 static const struct seq_operations qdf_mem_seq_ops = {
583 	.start = qdf_mem_seq_start,
584 	.next  = qdf_mem_seq_next,
585 	.stop  = qdf_mem_seq_stop,
586 	.show  = qdf_mem_seq_show,
587 };
588 
589 
590 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
591 {
592 	return seq_open(file, &qdf_mem_seq_ops);
593 }
594 
595 /* debugfs file operation table */
596 static const struct file_operations fops_qdf_mem_debugfs = {
597 	.owner = THIS_MODULE,
598 	.open = qdf_mem_debugfs_open,
599 	.read = seq_read,
600 	.llseek = seq_lseek,
601 	.release = seq_release,
602 };
603 
604 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
605 {
606 	if (!qdf_mem_debugfs_root)
607 		return QDF_STATUS_E_FAILURE;
608 
609 	debugfs_create_file("list",
610 			    S_IRUSR,
611 			    qdf_mem_debugfs_root,
612 			    NULL,
613 			    &fops_qdf_mem_debugfs);
614 
615 	return QDF_STATUS_SUCCESS;
616 }
617 
618 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
619 {
620 	return QDF_STATUS_SUCCESS;
621 }
622 
623 #else /* MEMORY_DEBUG */
624 
625 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
626 {
627 	return QDF_STATUS_E_NOSUPPORT;
628 }
629 
630 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
631 {
632 	return QDF_STATUS_E_NOSUPPORT;
633 }
634 
635 #endif /* MEMORY_DEBUG */
636 
637 
638 static void qdf_mem_debugfs_exit(void)
639 {
640 	debugfs_remove_recursive(qdf_mem_debugfs_root);
641 	qdf_mem_debugfs_root = NULL;
642 }
643 
644 static QDF_STATUS qdf_mem_debugfs_init(void)
645 {
646 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
647 
648 	if (!qdf_debugfs_root)
649 		return QDF_STATUS_E_FAILURE;
650 
651 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
652 
653 	if (!qdf_mem_debugfs_root)
654 		return QDF_STATUS_E_FAILURE;
655 
656 
657 	debugfs_create_atomic_t("kmalloc",
658 				S_IRUSR,
659 				qdf_mem_debugfs_root,
660 				&qdf_mem_stat.kmalloc);
661 
662 	debugfs_create_atomic_t("dma",
663 				S_IRUSR,
664 				qdf_mem_debugfs_root,
665 				&qdf_mem_stat.dma);
666 
667 	debugfs_create_atomic_t("skb",
668 				S_IRUSR,
669 				qdf_mem_debugfs_root,
670 				&qdf_mem_stat.skb);
671 
672 	return QDF_STATUS_SUCCESS;
673 }
674 
675 #else /* WLAN_DEBUGFS */
676 
677 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
678 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
679 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
680 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
681 
682 
683 static QDF_STATUS qdf_mem_debugfs_init(void)
684 {
685 	return QDF_STATUS_E_NOSUPPORT;
686 }
687 static void qdf_mem_debugfs_exit(void) {}
688 
689 
690 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
691 {
692 	return QDF_STATUS_E_NOSUPPORT;
693 }
694 
695 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
696 {
697 	return QDF_STATUS_E_NOSUPPORT;
698 }
699 
700 #endif /* WLAN_DEBUGFS */
701 
702 /**
703  * __qdf_mempool_init() - Create and initialize memory pool
704  *
705  * @osdev: platform device object
706  * @pool_addr: address of the pool created
707  * @elem_cnt: no. of elements in pool
708  * @elem_size: size of each pool element in bytes
709  * @flags: flags
710  *
711  * return: Handle to memory pool or NULL if allocation failed
712  */
713 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
714 		       int elem_cnt, size_t elem_size, u_int32_t flags)
715 {
716 	__qdf_mempool_ctxt_t *new_pool = NULL;
717 	u_int32_t align = L1_CACHE_BYTES;
718 	unsigned long aligned_pool_mem;
719 	int pool_id;
720 	int i;
721 
722 	if (prealloc_disabled) {
723 		/* TBD: We can maintain a list of pools in qdf_device_t
724 		 * to help debugging
725 		 * when pre-allocation is not enabled
726 		 */
727 		new_pool = (__qdf_mempool_ctxt_t *)
728 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
729 		if (new_pool == NULL)
730 			return QDF_STATUS_E_NOMEM;
731 
732 		memset(new_pool, 0, sizeof(*new_pool));
733 		/* TBD: define flags for zeroing buffers etc */
734 		new_pool->flags = flags;
735 		new_pool->elem_size = elem_size;
736 		new_pool->max_elem = elem_cnt;
737 		*pool_addr = new_pool;
738 		return 0;
739 	}
740 
741 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
742 		if (osdev->mem_pool[pool_id] == NULL)
743 			break;
744 	}
745 
746 	if (pool_id == MAX_MEM_POOLS)
747 		return -ENOMEM;
748 
749 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
750 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
751 	if (new_pool == NULL)
752 		return -ENOMEM;
753 
754 	memset(new_pool, 0, sizeof(*new_pool));
755 	/* TBD: define flags for zeroing buffers etc */
756 	new_pool->flags = flags;
757 	new_pool->pool_id = pool_id;
758 
759 	/* Round up the element size to cacheline */
760 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
761 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
762 				((align)?(align - 1):0);
763 
764 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
765 	if (new_pool->pool_mem == NULL) {
766 			/* TBD: Check if we need get_free_pages above */
767 		kfree(new_pool);
768 		osdev->mem_pool[pool_id] = NULL;
769 		return -ENOMEM;
770 	}
771 
772 	spin_lock_init(&new_pool->lock);
773 
774 	/* Initialize free list */
775 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
776 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
777 	STAILQ_INIT(&new_pool->free_list);
778 
779 	for (i = 0; i < elem_cnt; i++)
780 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
781 			(mempool_elem_t *)(aligned_pool_mem +
782 			(new_pool->elem_size * i)), mempool_entry);
783 
784 
785 	new_pool->free_cnt = elem_cnt;
786 	*pool_addr = new_pool;
787 	return 0;
788 }
789 qdf_export_symbol(__qdf_mempool_init);
790 
791 /**
792  * __qdf_mempool_destroy() - Destroy memory pool
793  * @osdev: platform device object
794  * @Handle: to memory pool
795  *
796  * Returns: none
797  */
798 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
799 {
800 	int pool_id = 0;
801 
802 	if (!pool)
803 		return;
804 
805 	if (prealloc_disabled) {
806 		kfree(pool);
807 		return;
808 	}
809 
810 	pool_id = pool->pool_id;
811 
812 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
813 	kfree(pool->pool_mem);
814 	kfree(pool);
815 	osdev->mem_pool[pool_id] = NULL;
816 }
817 qdf_export_symbol(__qdf_mempool_destroy);
818 
819 /**
820  * __qdf_mempool_alloc() - Allocate an element memory pool
821  *
822  * @osdev: platform device object
823  * @Handle: to memory pool
824  *
825  * Return: Pointer to the allocated element or NULL if the pool is empty
826  */
827 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
828 {
829 	void *buf = NULL;
830 
831 	if (!pool)
832 		return NULL;
833 
834 	if (prealloc_disabled)
835 		return  qdf_mem_malloc(pool->elem_size);
836 
837 	spin_lock_bh(&pool->lock);
838 
839 	buf = STAILQ_FIRST(&pool->free_list);
840 	if (buf != NULL) {
841 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
842 		pool->free_cnt--;
843 	}
844 
845 	/* TBD: Update free count if debug is enabled */
846 	spin_unlock_bh(&pool->lock);
847 
848 	return buf;
849 }
850 qdf_export_symbol(__qdf_mempool_alloc);
851 
852 /**
853  * __qdf_mempool_free() - Free a memory pool element
854  * @osdev: Platform device object
855  * @pool: Handle to memory pool
856  * @buf: Element to be freed
857  *
858  * Returns: none
859  */
860 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
861 {
862 	if (!pool)
863 		return;
864 
865 
866 	if (prealloc_disabled)
867 		return qdf_mem_free(buf);
868 
869 	spin_lock_bh(&pool->lock);
870 	pool->free_cnt++;
871 
872 	STAILQ_INSERT_TAIL
873 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
874 	spin_unlock_bh(&pool->lock);
875 }
876 qdf_export_symbol(__qdf_mempool_free);
877 
878 /**
879  * qdf_mem_alloc_outline() - allocation QDF memory
880  * @osdev: platform device object
881  * @size: Number of bytes of memory to allocate.
882  *
883  * This function will dynamicallly allocate the specified number of bytes of
884  * memory.
885  *
886  * Return:
887  * Upon successful allocate, returns a non-NULL pointer to the allocated
888  * memory.  If this function is unable to allocate the amount of memory
889  * specified (for any reason) it returns NULL.
890  */
891 void *
892 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
893 {
894 	return qdf_mem_malloc(size);
895 }
896 qdf_export_symbol(qdf_mem_alloc_outline);
897 
898 /**
899  * qdf_mem_free_outline() - QDF memory free API
900  * @ptr: Pointer to the starting address of the memory to be free'd.
901  *
902  * This function will free the memory pointed to by 'ptr'. It also checks
903  * is memory is corrupted or getting double freed and panic.
904  *
905  * Return: none
906  */
907 void
908 qdf_mem_free_outline(void *buf)
909 {
910 	qdf_mem_free(buf);
911 }
912 qdf_export_symbol(qdf_mem_free_outline);
913 
914 /**
915  * qdf_mem_zero_outline() - zero out memory
916  * @buf: pointer to memory that will be set to zero
917  * @size: number of bytes zero
918  *
919  * This function sets the memory location to all zeros, essentially clearing
920  * the memory.
921  *
922  * Return: none
923  */
924 void
925 qdf_mem_zero_outline(void *buf, qdf_size_t size)
926 {
927 	qdf_mem_zero(buf, size);
928 }
929 qdf_export_symbol(qdf_mem_zero_outline);
930 
931 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
932 /**
933  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
934  * @size: the number of bytes to allocate
935  *
936  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
937  * a chunk of pre-allocated memory. If size if less than or equal to
938  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
939  *
940  * Return: NULL on failure, non-NULL on success
941  */
942 static void *qdf_mem_prealloc_get(size_t size)
943 {
944 	void *ptr;
945 
946 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
947 		return NULL;
948 
949 	ptr = wcnss_prealloc_get(size);
950 	if (!ptr)
951 		return NULL;
952 
953 	memset(ptr, 0, size);
954 
955 	return ptr;
956 }
957 
958 static inline bool qdf_mem_prealloc_put(void *ptr)
959 {
960 	return wcnss_prealloc_put(ptr);
961 }
962 #else
963 static inline void *qdf_mem_prealloc_get(size_t size)
964 {
965 	return NULL;
966 }
967 
968 static inline bool qdf_mem_prealloc_put(void *ptr)
969 {
970 	return false;
971 }
972 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
973 
974 static int qdf_mem_malloc_flags(void)
975 {
976 	if (in_interrupt() || irqs_disabled() || in_atomic())
977 		return GFP_ATOMIC;
978 
979 	return GFP_KERNEL;
980 }
981 
982 /* External Function implementation */
983 #ifdef MEMORY_DEBUG
984 
985 /**
986  * qdf_mem_debug_init() - initialize qdf memory debug functionality
987  *
988  * Return: none
989  */
990 static void qdf_mem_debug_init(void)
991 {
992 	int i;
993 
994 	/* Initalizing the list with maximum size of 60000 */
995 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
996 		qdf_list_create(&qdf_mem_domains[i], 60000);
997 	qdf_spinlock_create(&qdf_mem_list_lock);
998 
999 	/* dma */
1000 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1001 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1002 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1003 }
1004 
1005 static uint32_t
1006 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1007 			       qdf_list_t *mem_list)
1008 {
1009 	if (qdf_list_empty(mem_list))
1010 		return 0;
1011 
1012 	qdf_err("Memory leaks detected in %s domain!",
1013 		qdf_debug_domain_name(domain));
1014 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1015 
1016 	return mem_list->count;
1017 }
1018 
1019 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1020 {
1021 	uint32_t leak_count = 0;
1022 	int i;
1023 
1024 	/* detect and print leaks */
1025 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1026 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1027 
1028 	if (leak_count)
1029 		panic("%u fatal memory leaks detected!", leak_count);
1030 }
1031 
1032 /**
1033  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1034  *
1035  * Return: none
1036  */
1037 static void qdf_mem_debug_exit(void)
1038 {
1039 	int i;
1040 
1041 	/* mem */
1042 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1043 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1044 		qdf_list_destroy(qdf_mem_list_get(i));
1045 
1046 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1047 
1048 	/* dma */
1049 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1050 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1051 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1052 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1053 }
1054 
1055 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1056 			   void *caller, uint32_t flag)
1057 {
1058 	QDF_STATUS status;
1059 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1060 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1061 	struct qdf_mem_header *header;
1062 	void *ptr;
1063 	unsigned long start, duration;
1064 
1065 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1066 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1067 		return NULL;
1068 	}
1069 
1070 	ptr = qdf_mem_prealloc_get(size);
1071 	if (ptr)
1072 		return ptr;
1073 
1074 	if (!flag)
1075 		flag = qdf_mem_malloc_flags();
1076 
1077 	start = qdf_mc_timer_get_system_time();
1078 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1079 	duration = qdf_mc_timer_get_system_time() - start;
1080 
1081 	if (duration > QDF_MEM_WARN_THRESHOLD)
1082 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1083 			 duration, size, file, line);
1084 
1085 	if (!header) {
1086 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1087 		return NULL;
1088 	}
1089 
1090 	qdf_mem_header_init(header, size, file, line, caller);
1091 	qdf_mem_trailer_init(header);
1092 	ptr = qdf_mem_get_ptr(header);
1093 
1094 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1095 	status = qdf_list_insert_front(mem_list, &header->node);
1096 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1097 	if (QDF_IS_STATUS_ERROR(status))
1098 		qdf_err("Failed to insert memory header; status %d", status);
1099 
1100 	qdf_mem_kmalloc_inc(size);
1101 
1102 	return ptr;
1103 }
1104 qdf_export_symbol(qdf_mem_malloc_debug);
1105 
1106 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1107 {
1108 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1109 	struct qdf_mem_header *header;
1110 	enum qdf_mem_validation_bitmap error_bitmap;
1111 
1112 	/* freeing a null pointer is valid */
1113 	if (qdf_unlikely(!ptr))
1114 		return;
1115 
1116 	if (qdf_mem_prealloc_put(ptr))
1117 		return;
1118 
1119 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1120 		panic("Failed to free invalid memory location %pK", ptr);
1121 
1122 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1123 	header = qdf_mem_get_header(ptr);
1124 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1125 	error_bitmap |= qdf_mem_trailer_validate(header);
1126 
1127 	if (!error_bitmap) {
1128 		header->freed = true;
1129 		list_del_init(&header->node);
1130 		qdf_mem_list_get(header->domain)->count--;
1131 	}
1132 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1133 
1134 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1135 				    file, line);
1136 
1137 	qdf_mem_kmalloc_dec(header->size);
1138 	kfree(header);
1139 }
1140 qdf_export_symbol(qdf_mem_free_debug);
1141 
1142 void qdf_mem_check_for_leaks(void)
1143 {
1144 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1145 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1146 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1147 	uint32_t leaks_count = 0;
1148 
1149 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1150 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1151 
1152 	if (leaks_count)
1153 		panic("%u fatal memory leaks detected!", leaks_count);
1154 }
1155 
1156 #else
1157 static void qdf_mem_debug_init(void) {}
1158 
1159 static void qdf_mem_debug_exit(void) {}
1160 
1161 /**
1162  * qdf_mem_malloc() - allocation QDF memory
1163  * @size: Number of bytes of memory to allocate.
1164  *
1165  * This function will dynamicallly allocate the specified number of bytes of
1166  * memory.
1167  *
1168  * Return:
1169  * Upon successful allocate, returns a non-NULL pointer to the allocated
1170  * memory.  If this function is unable to allocate the amount of memory
1171  * specified (for any reason) it returns NULL.
1172  */
1173 void *qdf_mem_malloc(size_t size)
1174 {
1175 	void *ptr;
1176 
1177 	ptr = qdf_mem_prealloc_get(size);
1178 	if (ptr)
1179 		return ptr;
1180 
1181 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1182 	if (!ptr)
1183 		return NULL;
1184 
1185 	qdf_mem_kmalloc_inc(ksize(ptr));
1186 
1187 	return ptr;
1188 }
1189 qdf_export_symbol(qdf_mem_malloc);
1190 
1191 /**
1192  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
1193  * @size: Number of bytes of memory to allocate.
1194  *
1195  * This function will dynamicallly allocate the specified number of bytes of
1196  * memory.
1197  *
1198  * Return:
1199  * Upon successful allocate, returns a non-NULL pointer to the allocated
1200  * memory.  If this function is unable to allocate the amount of memory
1201  * specified (for any reason) it returns NULL.
1202  */
1203 void *qdf_mem_malloc_atomic(size_t size)
1204 {
1205 	void *ptr;
1206 
1207 	ptr = qdf_mem_prealloc_get(size);
1208 	if (ptr)
1209 		return ptr;
1210 
1211 	ptr = kzalloc(size, GFP_ATOMIC);
1212 	if (!ptr)
1213 		return NULL;
1214 
1215 	qdf_mem_kmalloc_inc(ksize(ptr));
1216 
1217 	return ptr;
1218 }
1219 
1220 qdf_export_symbol(qdf_mem_malloc_atomic);
1221 
1222 /**
1223  * qdf_mem_free() - free QDF memory
1224  * @ptr: Pointer to the starting address of the memory to be free'd.
1225  *
1226  * This function will free the memory pointed to by 'ptr'.
1227  *
1228  * Return: None
1229  */
1230 void qdf_mem_free(void *ptr)
1231 {
1232 	if (ptr == NULL)
1233 		return;
1234 
1235 	if (qdf_mem_prealloc_put(ptr))
1236 		return;
1237 
1238 	qdf_mem_kmalloc_dec(ksize(ptr));
1239 
1240 	kfree(ptr);
1241 }
1242 qdf_export_symbol(qdf_mem_free);
1243 #endif
1244 
1245 /**
1246  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1247  * @osdev: OS device handle pointer
1248  * @pages: Multi page information storage
1249  * @element_size: Each element size
1250  * @element_num: Total number of elements should be allocated
1251  * @memctxt: Memory context
1252  * @cacheable: Coherent memory or cacheable memory
1253  *
1254  * This function will allocate large size of memory over multiple pages.
1255  * Large size of contiguous memory allocation will fail frequently, then
1256  * instead of allocate large memory by one shot, allocate through multiple, non
1257  * contiguous memory and combine pages when actual usage
1258  *
1259  * Return: None
1260  */
1261 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1262 			       struct qdf_mem_multi_page_t *pages,
1263 			       size_t element_size, uint16_t element_num,
1264 			       qdf_dma_context_t memctxt, bool cacheable)
1265 {
1266 	uint16_t page_idx;
1267 	struct qdf_mem_dma_page_t *dma_pages;
1268 	void **cacheable_pages = NULL;
1269 	uint16_t i;
1270 
1271 	pages->num_element_per_page = PAGE_SIZE / element_size;
1272 	if (!pages->num_element_per_page) {
1273 		qdf_print("Invalid page %d or element size %d",
1274 			  (int)PAGE_SIZE, (int)element_size);
1275 		goto out_fail;
1276 	}
1277 
1278 	pages->num_pages = element_num / pages->num_element_per_page;
1279 	if (element_num % pages->num_element_per_page)
1280 		pages->num_pages++;
1281 
1282 	if (cacheable) {
1283 		/* Pages information storage */
1284 		pages->cacheable_pages = qdf_mem_malloc(
1285 			pages->num_pages * sizeof(pages->cacheable_pages));
1286 		if (!pages->cacheable_pages) {
1287 			qdf_print("Cacheable page storage alloc fail");
1288 			goto out_fail;
1289 		}
1290 
1291 		cacheable_pages = pages->cacheable_pages;
1292 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1293 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1294 			if (!cacheable_pages[page_idx]) {
1295 				qdf_print("cacheable page alloc fail, pi %d",
1296 					  page_idx);
1297 				goto page_alloc_fail;
1298 			}
1299 		}
1300 		pages->dma_pages = NULL;
1301 	} else {
1302 		pages->dma_pages = qdf_mem_malloc(
1303 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1304 		if (!pages->dma_pages) {
1305 			qdf_print("dmaable page storage alloc fail");
1306 			goto out_fail;
1307 		}
1308 
1309 		dma_pages = pages->dma_pages;
1310 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1311 			dma_pages->page_v_addr_start =
1312 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1313 					 PAGE_SIZE,
1314 					&dma_pages->page_p_addr);
1315 			if (!dma_pages->page_v_addr_start) {
1316 				qdf_print("dmaable page alloc fail pi %d",
1317 					page_idx);
1318 				goto page_alloc_fail;
1319 			}
1320 			dma_pages->page_v_addr_end =
1321 				dma_pages->page_v_addr_start + PAGE_SIZE;
1322 			dma_pages++;
1323 		}
1324 		pages->cacheable_pages = NULL;
1325 	}
1326 	return;
1327 
1328 page_alloc_fail:
1329 	if (cacheable) {
1330 		for (i = 0; i < page_idx; i++)
1331 			qdf_mem_free(pages->cacheable_pages[i]);
1332 		qdf_mem_free(pages->cacheable_pages);
1333 	} else {
1334 		dma_pages = pages->dma_pages;
1335 		for (i = 0; i < page_idx; i++) {
1336 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1337 				dma_pages->page_v_addr_start,
1338 				dma_pages->page_p_addr, memctxt);
1339 			dma_pages++;
1340 		}
1341 		qdf_mem_free(pages->dma_pages);
1342 	}
1343 
1344 out_fail:
1345 	pages->cacheable_pages = NULL;
1346 	pages->dma_pages = NULL;
1347 	pages->num_pages = 0;
1348 	return;
1349 }
1350 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1351 
1352 /**
1353  * qdf_mem_multi_pages_free() - free large size of kernel memory
1354  * @osdev: OS device handle pointer
1355  * @pages: Multi page information storage
1356  * @memctxt: Memory context
1357  * @cacheable: Coherent memory or cacheable memory
1358  *
1359  * This function will free large size of memory over multiple pages.
1360  *
1361  * Return: None
1362  */
1363 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1364 			      struct qdf_mem_multi_page_t *pages,
1365 			      qdf_dma_context_t memctxt, bool cacheable)
1366 {
1367 	unsigned int page_idx;
1368 	struct qdf_mem_dma_page_t *dma_pages;
1369 
1370 	if (cacheable) {
1371 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1372 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1373 		qdf_mem_free(pages->cacheable_pages);
1374 	} else {
1375 		dma_pages = pages->dma_pages;
1376 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1377 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1378 				dma_pages->page_v_addr_start,
1379 				dma_pages->page_p_addr, memctxt);
1380 			dma_pages++;
1381 		}
1382 		qdf_mem_free(pages->dma_pages);
1383 	}
1384 
1385 	pages->cacheable_pages = NULL;
1386 	pages->dma_pages = NULL;
1387 	pages->num_pages = 0;
1388 	return;
1389 }
1390 qdf_export_symbol(qdf_mem_multi_pages_free);
1391 
1392 /**
1393  * qdf_mem_multi_page_link() - Make links for multi page elements
1394  * @osdev: OS device handle pointer
1395  * @pages: Multi page information storage
1396  * @elem_size: Single element size
1397  * @elem_count: elements count should be linked
1398  * @cacheable: Coherent memory or cacheable memory
1399  *
1400  * This function will make links for multi page allocated structure
1401  *
1402  * Return: 0 success
1403  */
1404 int qdf_mem_multi_page_link(qdf_device_t osdev,
1405 		struct qdf_mem_multi_page_t *pages,
1406 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1407 {
1408 	uint16_t i, i_int;
1409 	void *page_info;
1410 	void **c_elem = NULL;
1411 	uint32_t num_link = 0;
1412 
1413 	for (i = 0; i < pages->num_pages; i++) {
1414 		if (cacheable)
1415 			page_info = pages->cacheable_pages[i];
1416 		else
1417 			page_info = pages->dma_pages[i].page_v_addr_start;
1418 
1419 		if (!page_info)
1420 			return -ENOMEM;
1421 
1422 		c_elem = (void **)page_info;
1423 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1424 			if (i_int == (pages->num_element_per_page - 1)) {
1425 				if (cacheable)
1426 					*c_elem = pages->
1427 						cacheable_pages[i + 1];
1428 				else
1429 					*c_elem = pages->
1430 						dma_pages[i + 1].
1431 							page_v_addr_start;
1432 				num_link++;
1433 				break;
1434 			} else {
1435 				*c_elem =
1436 					(void *)(((char *)c_elem) + elem_size);
1437 			}
1438 			num_link++;
1439 			c_elem = (void **)*c_elem;
1440 
1441 			/* Last link established exit */
1442 			if (num_link == (elem_count - 1))
1443 				break;
1444 		}
1445 	}
1446 
1447 	if (c_elem)
1448 		*c_elem = NULL;
1449 
1450 	return 0;
1451 }
1452 qdf_export_symbol(qdf_mem_multi_page_link);
1453 
1454 /**
1455  * qdf_mem_copy() - copy memory
1456  * @dst_addr: Pointer to destination memory location (to copy to)
1457  * @src_addr: Pointer to source memory location (to copy from)
1458  * @num_bytes: Number of bytes to copy.
1459  *
1460  * Copy host memory from one location to another, similar to memcpy in
1461  * standard C.  Note this function does not specifically handle overlapping
1462  * source and destination memory locations.  Calling this function with
1463  * overlapping source and destination memory locations will result in
1464  * unpredictable results.  Use qdf_mem_move() if the memory locations
1465  * for the source and destination are overlapping (or could be overlapping!)
1466  *
1467  * Return: none
1468  */
1469 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1470 {
1471 	if (0 == num_bytes) {
1472 		/* special case where dst_addr or src_addr can be NULL */
1473 		return;
1474 	}
1475 
1476 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1477 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1478 			  "%s called with NULL parameter, source:%pK destination:%pK",
1479 			  __func__, src_addr, dst_addr);
1480 		QDF_ASSERT(0);
1481 		return;
1482 	}
1483 	memcpy(dst_addr, src_addr, num_bytes);
1484 }
1485 qdf_export_symbol(qdf_mem_copy);
1486 
1487 /**
1488  * qdf_mem_zero() - zero out memory
1489  * @ptr: pointer to memory that will be set to zero
1490  * @num_bytes: number of bytes zero
1491  *
1492  * This function sets the memory location to all zeros, essentially clearing
1493  * the memory.
1494  *
1495  * Return: None
1496  */
1497 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1498 {
1499 	if (0 == num_bytes) {
1500 		/* special case where ptr can be NULL */
1501 		return;
1502 	}
1503 
1504 	if (ptr == NULL) {
1505 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1506 			  "%s called with NULL parameter ptr", __func__);
1507 		return;
1508 	}
1509 	memset(ptr, 0, num_bytes);
1510 }
1511 qdf_export_symbol(qdf_mem_zero);
1512 
1513 /**
1514  * qdf_mem_copy_toio() - copy memory
1515  * @dst_addr: Pointer to destination memory location (to copy to)
1516  * @src_addr: Pointer to source memory location (to copy from)
1517  * @num_bytes: Number of bytes to copy.
1518  *
1519  * Return: none
1520  */
1521 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1522 {
1523 	if (0 == num_bytes) {
1524 		/* special case where dst_addr or src_addr can be NULL */
1525 		return;
1526 	}
1527 
1528 	if ((!dst_addr) || (!src_addr)) {
1529 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1530 			  "%s called with NULL parameter, source:%pK destination:%pK",
1531 			  __func__, src_addr, dst_addr);
1532 		QDF_ASSERT(0);
1533 		return;
1534 	}
1535 	memcpy_toio(dst_addr, src_addr, num_bytes);
1536 }
1537 
1538 qdf_export_symbol(qdf_mem_copy_toio);
1539 
1540 /**
1541  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1542  * @ptr: Pointer to memory that will be set
1543  * @value: Byte set in memory
1544  * @num_bytes: Number of bytes to be set
1545  *
1546  * Return: None
1547  */
1548 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1549 {
1550 	if (!ptr) {
1551 		qdf_print("%s called with NULL parameter ptr", __func__);
1552 		return;
1553 	}
1554 	memset_io(ptr, value, num_bytes);
1555 }
1556 
1557 qdf_export_symbol(qdf_mem_set_io);
1558 
1559 /**
1560  * qdf_mem_set() - set (fill) memory with a specified byte value.
1561  * @ptr: Pointer to memory that will be set
1562  * @num_bytes: Number of bytes to be set
1563  * @value: Byte set in memory
1564  *
1565  * Return: None
1566  */
1567 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1568 {
1569 	if (ptr == NULL) {
1570 		qdf_print("%s called with NULL parameter ptr", __func__);
1571 		return;
1572 	}
1573 	memset(ptr, value, num_bytes);
1574 }
1575 qdf_export_symbol(qdf_mem_set);
1576 
1577 /**
1578  * qdf_mem_move() - move memory
1579  * @dst_addr: pointer to destination memory location (to move to)
1580  * @src_addr: pointer to source memory location (to move from)
1581  * @num_bytes: number of bytes to move.
1582  *
1583  * Move host memory from one location to another, similar to memmove in
1584  * standard C.  Note this function *does* handle overlapping
1585  * source and destination memory locations.
1586 
1587  * Return: None
1588  */
1589 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1590 {
1591 	if (0 == num_bytes) {
1592 		/* special case where dst_addr or src_addr can be NULL */
1593 		return;
1594 	}
1595 
1596 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1597 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1598 			  "%s called with NULL parameter, source:%pK destination:%pK",
1599 			  __func__, src_addr, dst_addr);
1600 		QDF_ASSERT(0);
1601 		return;
1602 	}
1603 	memmove(dst_addr, src_addr, num_bytes);
1604 }
1605 qdf_export_symbol(qdf_mem_move);
1606 
1607 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1608 /**
1609  * qdf_mem_dma_alloc() - allocates memory for dma
1610  * @osdev: OS device handle
1611  * @dev: Pointer to device handle
1612  * @size: Size to be allocated
1613  * @phy_addr: Physical address
1614  *
1615  * Return: pointer of allocated memory or null if memory alloc fails
1616  */
1617 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1618 				      qdf_size_t size,
1619 				      qdf_dma_addr_t *phy_addr)
1620 {
1621 	void *vaddr;
1622 
1623 	vaddr = qdf_mem_malloc(size);
1624 	*phy_addr = ((uintptr_t) vaddr);
1625 	/* using this type conversion to suppress "cast from pointer to integer
1626 	 * of different size" warning on some platforms
1627 	 */
1628 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1629 	return vaddr;
1630 }
1631 
1632 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1633 #define QCA8074_RAM_BASE 0x50000000
1634 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1635 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1636 			qdf_dma_addr_t *phy_addr)
1637 {
1638 	void *vaddr = NULL;
1639 	int i;
1640 
1641 	*phy_addr = 0;
1642 
1643 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1644 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1645 					   qdf_mem_malloc_flags());
1646 
1647 		if (!vaddr) {
1648 			qdf_err("%s failed , size: %zu!", __func__, size);
1649 			return NULL;
1650 		}
1651 
1652 		if (*phy_addr >= QCA8074_RAM_BASE)
1653 			return vaddr;
1654 
1655 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1656 	}
1657 
1658 	return NULL;
1659 }
1660 
1661 #else
1662 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1663 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1664 {
1665 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1666 }
1667 #endif
1668 
1669 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1670 static inline void
1671 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1672 {
1673 	qdf_mem_free(vaddr);
1674 }
1675 #else
1676 
1677 static inline void
1678 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1679 {
1680 	dma_free_coherent(dev, size, vaddr, paddr);
1681 }
1682 #endif
1683 
1684 #ifdef MEMORY_DEBUG
1685 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1686 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1687 				     const char *file, uint32_t line,
1688 				     void *caller)
1689 {
1690 	QDF_STATUS status;
1691 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1692 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1693 	struct qdf_mem_header *header;
1694 	void *vaddr;
1695 
1696 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1697 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1698 		return NULL;
1699 	}
1700 
1701 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1702 				   paddr);
1703 
1704 	if (!vaddr) {
1705 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1706 		return NULL;
1707 	}
1708 
1709 	header = qdf_mem_dma_get_header(vaddr, size);
1710 	/* For DMA buffers we only add trailers, this function will init
1711 	 * the header structure at the tail
1712 	 * Prefix the header into DMA buffer causes SMMU faults, so
1713 	 * do not prefix header into the DMA buffers
1714 	 */
1715 	qdf_mem_header_init(header, size, file, line, caller);
1716 
1717 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1718 	status = qdf_list_insert_front(mem_list, &header->node);
1719 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1720 	if (QDF_IS_STATUS_ERROR(status))
1721 		qdf_err("Failed to insert memory header; status %d", status);
1722 
1723 	qdf_mem_dma_inc(size);
1724 
1725 	return vaddr;
1726 }
1727 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1728 
1729 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1730 				   qdf_size_t size, void *vaddr,
1731 				   qdf_dma_addr_t paddr,
1732 				   qdf_dma_context_t memctx,
1733 				   const char *file, uint32_t line)
1734 {
1735 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1736 	struct qdf_mem_header *header;
1737 	enum qdf_mem_validation_bitmap error_bitmap;
1738 
1739 	/* freeing a null pointer is valid */
1740 	if (qdf_unlikely(!vaddr))
1741 		return;
1742 
1743 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1744 	/* For DMA buffers we only add trailers, this function will retrieve
1745 	 * the header structure at the tail
1746 	 * Prefix the header into DMA buffer causes SMMU faults, so
1747 	 * do not prefix header into the DMA buffers
1748 	 */
1749 	header = qdf_mem_dma_get_header(vaddr, size);
1750 	error_bitmap = qdf_mem_header_validate(header, domain);
1751 	if (!error_bitmap) {
1752 		header->freed = true;
1753 		list_del_init(&header->node);
1754 		qdf_mem_dma_list(header->domain)->count--;
1755 	}
1756 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1757 
1758 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1759 
1760 	qdf_mem_dma_dec(header->size);
1761 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1762 }
1763 qdf_export_symbol(qdf_mem_free_consistent_debug);
1764 
1765 #else
1766 
1767 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1768 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1769 {
1770 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1771 
1772 	if (vaddr)
1773 		qdf_mem_dma_inc(size);
1774 
1775 	return vaddr;
1776 }
1777 qdf_export_symbol(qdf_mem_alloc_consistent);
1778 
1779 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1780 			     qdf_size_t size, void *vaddr,
1781 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1782 {
1783 	qdf_mem_dma_dec(size);
1784 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1785 }
1786 qdf_export_symbol(qdf_mem_free_consistent);
1787 
1788 #endif /* MEMORY_DEBUG */
1789 
1790 /**
1791  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1792  * @osdev: OS device handle
1793  * @bus_addr: dma address to give to the device
1794  * @size: Size of the memory block
1795  * @direction: direction data will be DMAed
1796  *
1797  * Assign memory to the remote device.
1798  * The cache lines are flushed to ram or invalidated as needed.
1799  *
1800  * Return: none
1801  */
1802 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1803 					qdf_dma_addr_t bus_addr,
1804 					qdf_size_t size,
1805 					enum dma_data_direction direction)
1806 {
1807 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1808 }
1809 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1810 
1811 /**
1812  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1813  * @osdev: OS device handle
1814  * @bus_addr: dma address to give to the cpu
1815  * @size: Size of the memory block
1816  * @direction: direction data will be DMAed
1817  *
1818  * Assign memory to the CPU.
1819  *
1820  * Return: none
1821  */
1822 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1823 				     qdf_dma_addr_t bus_addr,
1824 				     qdf_size_t size,
1825 				     enum dma_data_direction direction)
1826 {
1827 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1828 }
1829 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1830 
1831 void qdf_mem_init(void)
1832 {
1833 	qdf_mem_debug_init();
1834 	qdf_net_buf_debug_init();
1835 	qdf_mem_debugfs_init();
1836 	qdf_mem_debug_debugfs_init();
1837 }
1838 qdf_export_symbol(qdf_mem_init);
1839 
1840 void qdf_mem_exit(void)
1841 {
1842 	qdf_mem_debug_debugfs_exit();
1843 	qdf_mem_debugfs_exit();
1844 	qdf_net_buf_debug_exit();
1845 	qdf_mem_debug_exit();
1846 }
1847 qdf_export_symbol(qdf_mem_exit);
1848 
1849 /**
1850  * qdf_ether_addr_copy() - copy an Ethernet address
1851  *
1852  * @dst_addr: A six-byte array Ethernet address destination
1853  * @src_addr: A six-byte array Ethernet address source
1854  *
1855  * Please note: dst & src must both be aligned to u16.
1856  *
1857  * Return: none
1858  */
1859 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1860 {
1861 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1862 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1863 			  "%s called with NULL parameter, source:%pK destination:%pK",
1864 			  __func__, src_addr, dst_addr);
1865 		QDF_ASSERT(0);
1866 		return;
1867 	}
1868 	ether_addr_copy(dst_addr, src_addr);
1869 }
1870 qdf_export_symbol(qdf_ether_addr_copy);
1871 
1872