xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 
37 #ifdef CONFIG_MCL
38 #include <host_diag_core_event.h>
39 #else
40 #define host_log_low_resource_failure(code) do {} while (0)
41 #endif
42 
43 #if defined(CONFIG_CNSS)
44 #include <net/cnss.h>
45 #endif
46 
47 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
48 #include <net/cnss_prealloc.h>
49 #endif
50 
51 #ifdef MEMORY_DEBUG
52 #include "qdf_debug_domain.h"
53 #include <qdf_list.h>
54 
55 /* Preprocessor Definitions and Constants */
56 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
57 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
58 #define QDF_DEBUG_STRING_SIZE 512
59 
60 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
61 static qdf_spinlock_t qdf_mem_list_lock;
62 
63 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
64 static qdf_spinlock_t qdf_mem_dma_list_lock;
65 
66 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
67 {
68 	return &qdf_mem_domains[domain];
69 }
70 
71 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
72 {
73 	return &qdf_mem_dma_domains[domain];
74 }
75 
76 /**
77  * struct qdf_mem_header - memory object to dubug
78  * @node: node to the list
79  * @domain: the active memory domain at time of allocation
80  * @freed: flag set during free, used to detect double frees
81  *	Use uint8_t so we can detect corruption
82  * @file: name of the file the allocation was made from
83  * @line: line number of the file the allocation was made from
84  * @size: size of the allocation in bytes
85  * @caller: Caller of the function for which memory is allocated
86  * @header: a known value, used to detect out-of-bounds access
87  * @time: timestamp at which allocation was made
88  */
89 struct qdf_mem_header {
90 	qdf_list_node_t node;
91 	enum qdf_debug_domain domain;
92 	uint8_t freed;
93 	char file[QDF_MEM_FILE_NAME_SIZE];
94 	uint32_t line;
95 	uint32_t size;
96 	void *caller;
97 	uint64_t header;
98 	uint64_t time;
99 };
100 
101 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
102 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
103 
104 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
105 {
106 	return (struct qdf_mem_header *)ptr - 1;
107 }
108 
109 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
110 							    qdf_size_t size)
111 {
112 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
113 }
114 
115 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
116 {
117 	return (uint64_t *)((void *)(header + 1) + header->size);
118 }
119 
120 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
121 {
122 	return (void *)(header + 1);
123 }
124 
125 /* number of bytes needed for the qdf memory debug information */
126 #define QDF_MEM_DEBUG_SIZE \
127 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
128 
129 /* number of bytes needed for the qdf dma memory debug information */
130 #define QDF_DMA_MEM_DEBUG_SIZE \
131 	(sizeof(struct qdf_mem_header))
132 
133 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
134 {
135 	QDF_BUG(header);
136 	if (!header)
137 		return;
138 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
139 }
140 
141 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
142 				const char *file, uint32_t line, void *caller)
143 {
144 	QDF_BUG(header);
145 	if (!header)
146 		return;
147 
148 	header->domain = qdf_debug_domain_get();
149 	header->freed = false;
150 
151 	/* copy the file name, rather than pointing to it */
152 	qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
153 
154 	header->line = line;
155 	header->size = size;
156 	header->caller = caller;
157 	header->header = WLAN_MEM_HEADER;
158 	header->time = qdf_get_log_timestamp();
159 }
160 
161 enum qdf_mem_validation_bitmap {
162 	QDF_MEM_BAD_HEADER = 1 << 0,
163 	QDF_MEM_BAD_TRAILER = 1 << 1,
164 	QDF_MEM_BAD_SIZE = 1 << 2,
165 	QDF_MEM_DOUBLE_FREE = 1 << 3,
166 	QDF_MEM_BAD_FREED = 1 << 4,
167 	QDF_MEM_BAD_NODE = 1 << 5,
168 	QDF_MEM_BAD_DOMAIN = 1 << 6,
169 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
170 };
171 
172 /**
173  * qdf_mem_validate_list_node() - validate that the node is in a list
174  * @qdf_node: node to check for being in a list
175  *
176  * Return: true if the node validly linked in an anchored doubly linked list
177  */
178 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
179 {
180 	struct list_head *node = qdf_node;
181 
182 	/*
183 	 * if the node is an empty list, it is not tied to an anchor node
184 	 * and must have been removed with list_del_init
185 	 */
186 	if (list_empty(node))
187 		return false;
188 
189 	if (!node->prev || !node->next)
190 		return false;
191 
192 	if (node->prev->next != node || node->next->prev != node)
193 		return false;
194 
195 	return true;
196 }
197 
198 static enum qdf_mem_validation_bitmap
199 qdf_mem_trailer_validate(struct qdf_mem_header *header)
200 {
201 	enum qdf_mem_validation_bitmap error_bitmap = 0;
202 
203 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
204 		error_bitmap |= QDF_MEM_BAD_TRAILER;
205 	return error_bitmap;
206 }
207 
208 static enum qdf_mem_validation_bitmap
209 qdf_mem_header_validate(struct qdf_mem_header *header,
210 			enum qdf_debug_domain domain)
211 {
212 	enum qdf_mem_validation_bitmap error_bitmap = 0;
213 
214 	if (header->header != WLAN_MEM_HEADER)
215 		error_bitmap |= QDF_MEM_BAD_HEADER;
216 
217 	if (header->size > QDF_MEM_MAX_MALLOC)
218 		error_bitmap |= QDF_MEM_BAD_SIZE;
219 
220 	if (header->freed == true)
221 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
222 	else if (header->freed)
223 		error_bitmap |= QDF_MEM_BAD_FREED;
224 
225 	if (!qdf_mem_validate_list_node(&header->node))
226 		error_bitmap |= QDF_MEM_BAD_NODE;
227 
228 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
229 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
230 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
231 	else if (header->domain != domain)
232 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
233 
234 	return error_bitmap;
235 }
236 
237 static void
238 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
239 			    enum qdf_debug_domain current_domain,
240 			    enum qdf_mem_validation_bitmap error_bitmap,
241 			    const char *file,
242 			    uint32_t line)
243 {
244 	if (!error_bitmap)
245 		return;
246 
247 	if (error_bitmap & QDF_MEM_BAD_HEADER)
248 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
249 			header->header, WLAN_MEM_HEADER);
250 
251 	if (error_bitmap & QDF_MEM_BAD_SIZE)
252 		qdf_err("Corrupted memory size %u (expected < %d)",
253 			header->size, QDF_MEM_MAX_MALLOC);
254 
255 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
256 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
257 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
258 
259 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
260 		qdf_err("Memory has previously been freed");
261 
262 	if (error_bitmap & QDF_MEM_BAD_FREED)
263 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
264 
265 	if (error_bitmap & QDF_MEM_BAD_NODE)
266 		qdf_err("Corrupted memory header node or double free");
267 
268 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
269 		qdf_err("Corrupted memory domain 0x%x", header->domain);
270 
271 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
272 		qdf_err("Memory domain mismatch; found %s(%d), expected %s(%d)",
273 			qdf_debug_domain_name(header->domain), header->domain,
274 			qdf_debug_domain_name(current_domain), current_domain);
275 
276 	panic("A fatal memory error was detected @ %s:%d",
277 	      file, line);
278 }
279 #endif /* MEMORY_DEBUG */
280 
281 u_int8_t prealloc_disabled = 1;
282 qdf_declare_param(prealloc_disabled, byte);
283 qdf_export_symbol(prealloc_disabled);
284 
285 #if defined WLAN_DEBUGFS
286 
287 /* Debugfs root directory for qdf_mem */
288 static struct dentry *qdf_mem_debugfs_root;
289 
290 /**
291  * struct __qdf_mem_stat - qdf memory statistics
292  * @kmalloc:	total kmalloc allocations
293  * @dma:	total dma allocations
294  * @skb:	total skb allocations
295  */
296 static struct __qdf_mem_stat {
297 	qdf_atomic_t kmalloc;
298 	qdf_atomic_t dma;
299 	qdf_atomic_t skb;
300 } qdf_mem_stat;
301 
302 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
303 {
304 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
305 }
306 
307 static inline void qdf_mem_dma_inc(qdf_size_t size)
308 {
309 	qdf_atomic_add(size, &qdf_mem_stat.dma);
310 }
311 
312 void qdf_mem_skb_inc(qdf_size_t size)
313 {
314 	qdf_atomic_add(size, &qdf_mem_stat.skb);
315 }
316 
317 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
318 {
319 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
320 }
321 
322 static inline void qdf_mem_dma_dec(qdf_size_t size)
323 {
324 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
325 }
326 
327 void qdf_mem_skb_dec(qdf_size_t size)
328 {
329 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
330 }
331 
332 #ifdef MEMORY_DEBUG
333 static int qdf_err_printer(void *priv, const char *fmt, ...)
334 {
335 	va_list args;
336 
337 	va_start(args, fmt);
338 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
339 	va_end(args);
340 
341 	return 0;
342 }
343 
344 static int seq_printf_printer(void *priv, const char *fmt, ...)
345 {
346 	struct seq_file *file = priv;
347 	va_list args;
348 
349 	va_start(args, fmt);
350 	seq_vprintf(file, fmt, args);
351 	seq_puts(file, "\n");
352 	va_end(args);
353 
354 	return 0;
355 }
356 
357 /**
358  * struct __qdf_mem_info - memory statistics
359  * @file: the file which allocated memory
360  * @line: the line at which allocation happened
361  * @size: the size of allocation
362  * @caller: Address of the caller function
363  * @count: how many allocations of same type
364  * @time: timestamp at which allocation happened
365  */
366 struct __qdf_mem_info {
367 	char file[QDF_MEM_FILE_NAME_SIZE];
368 	uint32_t line;
369 	uint32_t size;
370 	void *caller;
371 	uint32_t count;
372 	uint64_t time;
373 };
374 
375 /*
376  * The table depth defines the de-duplication proximity scope.
377  * A deeper table takes more time, so choose any optimum value.
378  */
379 #define QDF_MEM_STAT_TABLE_SIZE 8
380 
381 /**
382  * qdf_mem_domain_print_header() - memory domain header print logic
383  * @print: the print adapter function
384  * @print_priv: the private data to be consumed by @print
385  *
386  * Return: None
387  */
388 static void qdf_mem_domain_print_header(qdf_abstract_print print,
389 					void *print_priv)
390 {
391 	print(print_priv,
392 	      "--------------------------------------------------------------");
393 	print(print_priv,
394 	      " count    size     total    filename     caller    timestamp");
395 	print(print_priv,
396 	      "--------------------------------------------------------------");
397 }
398 
399 /**
400  * qdf_mem_meta_table_print() - memory metadata table print logic
401  * @table: the memory metadata table to print
402  * @print: the print adapter function
403  * @print_priv: the private data to be consumed by @print
404  *
405  * Return: None
406  */
407 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
408 				     qdf_abstract_print print,
409 				     void *print_priv)
410 {
411 	int i;
412 	char debug_str[QDF_DEBUG_STRING_SIZE];
413 	size_t len = 0;
414 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
415 
416 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
417 			     "%s", debug_prefix);
418 
419 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
420 		if (!table[i].count)
421 			break;
422 
423 		print(print_priv,
424 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
425 		      table[i].count,
426 		      table[i].size,
427 		      table[i].count * table[i].size,
428 		      table[i].file,
429 		      table[i].line, table[i].caller,
430 		      table[i].time);
431 		len += qdf_scnprintf(debug_str + len,
432 				     sizeof(debug_str) - len,
433 				     " @ %s:%u %pS",
434 				     table[i].file,
435 				     table[i].line,
436 				     table[i].caller);
437 	}
438 	print(print_priv, "%s", debug_str);
439 }
440 
441 /**
442  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
443  * @table: the memory metadata table to insert into
444  * @meta: the memory metadata to insert
445  *
446  * Return: true if the table is full after inserting, false otherwise
447  */
448 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
449 				      struct qdf_mem_header *meta)
450 {
451 	int i;
452 
453 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
454 		if (!table[i].count) {
455 			qdf_str_lcopy(table[i].file, meta->file,
456 				      QDF_MEM_FILE_NAME_SIZE);
457 			table[i].line = meta->line;
458 			table[i].size = meta->size;
459 			table[i].count = 1;
460 			table[i].caller = meta->caller;
461 			table[i].time = meta->time;
462 			break;
463 		}
464 
465 		if (qdf_str_eq(table[i].file, meta->file) &&
466 		    table[i].line == meta->line &&
467 		    table[i].size == meta->size &&
468 		    table[i].caller == meta->caller) {
469 			table[i].count++;
470 			break;
471 		}
472 	}
473 
474 	/* return true if the table is now full */
475 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
476 }
477 
478 /**
479  * qdf_mem_domain_print() - output agnostic memory domain print logic
480  * @domain: the memory domain to print
481  * @print: the print adapter function
482  * @print_priv: the private data to be consumed by @print
483  *
484  * Return: None
485  */
486 static void qdf_mem_domain_print(qdf_list_t *domain,
487 				 qdf_abstract_print print,
488 				 void *print_priv)
489 {
490 	QDF_STATUS status;
491 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
492 	qdf_list_node_t *node;
493 
494 	qdf_mem_zero(table, sizeof(table));
495 	qdf_mem_domain_print_header(print, print_priv);
496 
497 	/* hold lock while inserting to avoid use-after free of the metadata */
498 	qdf_spin_lock(&qdf_mem_list_lock);
499 	status = qdf_list_peek_front(domain, &node);
500 	while (QDF_IS_STATUS_SUCCESS(status)) {
501 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
502 		bool is_full = qdf_mem_meta_table_insert(table, meta);
503 
504 		qdf_spin_unlock(&qdf_mem_list_lock);
505 
506 		if (is_full) {
507 			qdf_mem_meta_table_print(table, print, print_priv);
508 			qdf_mem_zero(table, sizeof(table));
509 		}
510 
511 		qdf_spin_lock(&qdf_mem_list_lock);
512 		status = qdf_list_peek_next(domain, node, &node);
513 	}
514 	qdf_spin_unlock(&qdf_mem_list_lock);
515 
516 	qdf_mem_meta_table_print(table, print, print_priv);
517 }
518 
519 /**
520  * qdf_mem_seq_start() - sequential callback to start
521  * @seq: seq_file handle
522  * @pos: The start position of the sequence
523  *
524  * Return: iterator pointer, or NULL if iteration is complete
525  */
526 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
527 {
528 	enum qdf_debug_domain domain = *pos;
529 
530 	if (!qdf_debug_domain_valid(domain))
531 		return NULL;
532 
533 	/* just use the current position as our iterator */
534 	return pos;
535 }
536 
537 /**
538  * qdf_mem_seq_next() - next sequential callback
539  * @seq: seq_file handle
540  * @v: the current iterator
541  * @pos: the current position
542  *
543  * Get the next node and release previous node.
544  *
545  * Return: iterator pointer, or NULL if iteration is complete
546  */
547 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
548 {
549 	++*pos;
550 
551 	return qdf_mem_seq_start(seq, pos);
552 }
553 
554 /**
555  * qdf_mem_seq_stop() - stop sequential callback
556  * @seq: seq_file handle
557  * @v: current iterator
558  *
559  * Return: None
560  */
561 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
562 
563 /**
564  * qdf_mem_seq_show() - print sequential callback
565  * @seq: seq_file handle
566  * @v: current iterator
567  *
568  * Return: 0 - success
569  */
570 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
571 {
572 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
573 
574 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
575 		   qdf_debug_domain_name(domain_id), domain_id);
576 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
577 			     seq_printf_printer, seq);
578 
579 	return 0;
580 }
581 
582 /* sequential file operation table */
583 static const struct seq_operations qdf_mem_seq_ops = {
584 	.start = qdf_mem_seq_start,
585 	.next  = qdf_mem_seq_next,
586 	.stop  = qdf_mem_seq_stop,
587 	.show  = qdf_mem_seq_show,
588 };
589 
590 
591 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
592 {
593 	return seq_open(file, &qdf_mem_seq_ops);
594 }
595 
596 /* debugfs file operation table */
597 static const struct file_operations fops_qdf_mem_debugfs = {
598 	.owner = THIS_MODULE,
599 	.open = qdf_mem_debugfs_open,
600 	.read = seq_read,
601 	.llseek = seq_lseek,
602 	.release = seq_release,
603 };
604 
605 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
606 {
607 	if (!qdf_mem_debugfs_root)
608 		return QDF_STATUS_E_FAILURE;
609 
610 	debugfs_create_file("list",
611 			    S_IRUSR,
612 			    qdf_mem_debugfs_root,
613 			    NULL,
614 			    &fops_qdf_mem_debugfs);
615 
616 	return QDF_STATUS_SUCCESS;
617 }
618 
619 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
620 {
621 	return QDF_STATUS_SUCCESS;
622 }
623 
624 #else /* MEMORY_DEBUG */
625 
626 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
627 {
628 	return QDF_STATUS_E_NOSUPPORT;
629 }
630 
631 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
632 {
633 	return QDF_STATUS_E_NOSUPPORT;
634 }
635 
636 #endif /* MEMORY_DEBUG */
637 
638 
639 static void qdf_mem_debugfs_exit(void)
640 {
641 	debugfs_remove_recursive(qdf_mem_debugfs_root);
642 	qdf_mem_debugfs_root = NULL;
643 }
644 
645 static QDF_STATUS qdf_mem_debugfs_init(void)
646 {
647 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
648 
649 	if (!qdf_debugfs_root)
650 		return QDF_STATUS_E_FAILURE;
651 
652 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
653 
654 	if (!qdf_mem_debugfs_root)
655 		return QDF_STATUS_E_FAILURE;
656 
657 
658 	debugfs_create_atomic_t("kmalloc",
659 				S_IRUSR,
660 				qdf_mem_debugfs_root,
661 				&qdf_mem_stat.kmalloc);
662 
663 	debugfs_create_atomic_t("dma",
664 				S_IRUSR,
665 				qdf_mem_debugfs_root,
666 				&qdf_mem_stat.dma);
667 
668 	debugfs_create_atomic_t("skb",
669 				S_IRUSR,
670 				qdf_mem_debugfs_root,
671 				&qdf_mem_stat.skb);
672 
673 	return QDF_STATUS_SUCCESS;
674 }
675 
676 #else /* WLAN_DEBUGFS */
677 
678 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
679 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
680 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
681 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
682 
683 
684 static QDF_STATUS qdf_mem_debugfs_init(void)
685 {
686 	return QDF_STATUS_E_NOSUPPORT;
687 }
688 static void qdf_mem_debugfs_exit(void) {}
689 
690 
691 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
692 {
693 	return QDF_STATUS_E_NOSUPPORT;
694 }
695 
696 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
697 {
698 	return QDF_STATUS_E_NOSUPPORT;
699 }
700 
701 #endif /* WLAN_DEBUGFS */
702 
703 /**
704  * __qdf_mempool_init() - Create and initialize memory pool
705  *
706  * @osdev: platform device object
707  * @pool_addr: address of the pool created
708  * @elem_cnt: no. of elements in pool
709  * @elem_size: size of each pool element in bytes
710  * @flags: flags
711  *
712  * return: Handle to memory pool or NULL if allocation failed
713  */
714 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
715 		       int elem_cnt, size_t elem_size, u_int32_t flags)
716 {
717 	__qdf_mempool_ctxt_t *new_pool = NULL;
718 	u_int32_t align = L1_CACHE_BYTES;
719 	unsigned long aligned_pool_mem;
720 	int pool_id;
721 	int i;
722 
723 	if (prealloc_disabled) {
724 		/* TBD: We can maintain a list of pools in qdf_device_t
725 		 * to help debugging
726 		 * when pre-allocation is not enabled
727 		 */
728 		new_pool = (__qdf_mempool_ctxt_t *)
729 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
730 		if (new_pool == NULL)
731 			return QDF_STATUS_E_NOMEM;
732 
733 		memset(new_pool, 0, sizeof(*new_pool));
734 		/* TBD: define flags for zeroing buffers etc */
735 		new_pool->flags = flags;
736 		new_pool->elem_size = elem_size;
737 		new_pool->max_elem = elem_cnt;
738 		*pool_addr = new_pool;
739 		return 0;
740 	}
741 
742 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
743 		if (osdev->mem_pool[pool_id] == NULL)
744 			break;
745 	}
746 
747 	if (pool_id == MAX_MEM_POOLS)
748 		return -ENOMEM;
749 
750 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
751 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
752 	if (new_pool == NULL)
753 		return -ENOMEM;
754 
755 	memset(new_pool, 0, sizeof(*new_pool));
756 	/* TBD: define flags for zeroing buffers etc */
757 	new_pool->flags = flags;
758 	new_pool->pool_id = pool_id;
759 
760 	/* Round up the element size to cacheline */
761 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
762 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
763 				((align)?(align - 1):0);
764 
765 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
766 	if (new_pool->pool_mem == NULL) {
767 			/* TBD: Check if we need get_free_pages above */
768 		kfree(new_pool);
769 		osdev->mem_pool[pool_id] = NULL;
770 		return -ENOMEM;
771 	}
772 
773 	spin_lock_init(&new_pool->lock);
774 
775 	/* Initialize free list */
776 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
777 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
778 	STAILQ_INIT(&new_pool->free_list);
779 
780 	for (i = 0; i < elem_cnt; i++)
781 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
782 			(mempool_elem_t *)(aligned_pool_mem +
783 			(new_pool->elem_size * i)), mempool_entry);
784 
785 
786 	new_pool->free_cnt = elem_cnt;
787 	*pool_addr = new_pool;
788 	return 0;
789 }
790 qdf_export_symbol(__qdf_mempool_init);
791 
792 /**
793  * __qdf_mempool_destroy() - Destroy memory pool
794  * @osdev: platform device object
795  * @Handle: to memory pool
796  *
797  * Returns: none
798  */
799 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
800 {
801 	int pool_id = 0;
802 
803 	if (!pool)
804 		return;
805 
806 	if (prealloc_disabled) {
807 		kfree(pool);
808 		return;
809 	}
810 
811 	pool_id = pool->pool_id;
812 
813 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
814 	kfree(pool->pool_mem);
815 	kfree(pool);
816 	osdev->mem_pool[pool_id] = NULL;
817 }
818 qdf_export_symbol(__qdf_mempool_destroy);
819 
820 /**
821  * __qdf_mempool_alloc() - Allocate an element memory pool
822  *
823  * @osdev: platform device object
824  * @Handle: to memory pool
825  *
826  * Return: Pointer to the allocated element or NULL if the pool is empty
827  */
828 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
829 {
830 	void *buf = NULL;
831 
832 	if (!pool)
833 		return NULL;
834 
835 	if (prealloc_disabled)
836 		return  qdf_mem_malloc(pool->elem_size);
837 
838 	spin_lock_bh(&pool->lock);
839 
840 	buf = STAILQ_FIRST(&pool->free_list);
841 	if (buf != NULL) {
842 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
843 		pool->free_cnt--;
844 	}
845 
846 	/* TBD: Update free count if debug is enabled */
847 	spin_unlock_bh(&pool->lock);
848 
849 	return buf;
850 }
851 qdf_export_symbol(__qdf_mempool_alloc);
852 
853 /**
854  * __qdf_mempool_free() - Free a memory pool element
855  * @osdev: Platform device object
856  * @pool: Handle to memory pool
857  * @buf: Element to be freed
858  *
859  * Returns: none
860  */
861 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
862 {
863 	if (!pool)
864 		return;
865 
866 
867 	if (prealloc_disabled)
868 		return qdf_mem_free(buf);
869 
870 	spin_lock_bh(&pool->lock);
871 	pool->free_cnt++;
872 
873 	STAILQ_INSERT_TAIL
874 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
875 	spin_unlock_bh(&pool->lock);
876 }
877 qdf_export_symbol(__qdf_mempool_free);
878 
879 /**
880  * qdf_mem_alloc_outline() - allocation QDF memory
881  * @osdev: platform device object
882  * @size: Number of bytes of memory to allocate.
883  *
884  * This function will dynamicallly allocate the specified number of bytes of
885  * memory.
886  *
887  * Return:
888  * Upon successful allocate, returns a non-NULL pointer to the allocated
889  * memory.  If this function is unable to allocate the amount of memory
890  * specified (for any reason) it returns NULL.
891  */
892 void *
893 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
894 {
895 	return qdf_mem_malloc(size);
896 }
897 qdf_export_symbol(qdf_mem_alloc_outline);
898 
899 /**
900  * qdf_mem_free_outline() - QDF memory free API
901  * @ptr: Pointer to the starting address of the memory to be free'd.
902  *
903  * This function will free the memory pointed to by 'ptr'. It also checks
904  * is memory is corrupted or getting double freed and panic.
905  *
906  * Return: none
907  */
908 void
909 qdf_mem_free_outline(void *buf)
910 {
911 	qdf_mem_free(buf);
912 }
913 qdf_export_symbol(qdf_mem_free_outline);
914 
915 /**
916  * qdf_mem_zero_outline() - zero out memory
917  * @buf: pointer to memory that will be set to zero
918  * @size: number of bytes zero
919  *
920  * This function sets the memory location to all zeros, essentially clearing
921  * the memory.
922  *
923  * Return: none
924  */
925 void
926 qdf_mem_zero_outline(void *buf, qdf_size_t size)
927 {
928 	qdf_mem_zero(buf, size);
929 }
930 qdf_export_symbol(qdf_mem_zero_outline);
931 
932 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
933 /**
934  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
935  * @size: the number of bytes to allocate
936  *
937  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
938  * a chunk of pre-allocated memory. If size if less than or equal to
939  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
940  *
941  * Return: NULL on failure, non-NULL on success
942  */
943 static void *qdf_mem_prealloc_get(size_t size)
944 {
945 	void *ptr;
946 
947 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
948 		return NULL;
949 
950 	ptr = wcnss_prealloc_get(size);
951 	if (!ptr)
952 		return NULL;
953 
954 	memset(ptr, 0, size);
955 
956 	return ptr;
957 }
958 
959 static inline bool qdf_mem_prealloc_put(void *ptr)
960 {
961 	return wcnss_prealloc_put(ptr);
962 }
963 #else
964 static inline void *qdf_mem_prealloc_get(size_t size)
965 {
966 	return NULL;
967 }
968 
969 static inline bool qdf_mem_prealloc_put(void *ptr)
970 {
971 	return false;
972 }
973 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
974 
975 static int qdf_mem_malloc_flags(void)
976 {
977 	if (in_interrupt() || irqs_disabled() || in_atomic())
978 		return GFP_ATOMIC;
979 
980 	return GFP_KERNEL;
981 }
982 
983 /* External Function implementation */
984 #ifdef MEMORY_DEBUG
985 
986 /**
987  * qdf_mem_debug_init() - initialize qdf memory debug functionality
988  *
989  * Return: none
990  */
991 static void qdf_mem_debug_init(void)
992 {
993 	int i;
994 
995 	/* Initalizing the list with maximum size of 60000 */
996 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
997 		qdf_list_create(&qdf_mem_domains[i], 60000);
998 	qdf_spinlock_create(&qdf_mem_list_lock);
999 
1000 	/* dma */
1001 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1002 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1003 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1004 
1005 	/* skb */
1006 	qdf_net_buf_debug_init();
1007 }
1008 
1009 static uint32_t
1010 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1011 			       qdf_list_t *mem_list)
1012 {
1013 	if (qdf_list_empty(mem_list))
1014 		return 0;
1015 
1016 	qdf_err("Memory leaks detected in %s domain!",
1017 		qdf_debug_domain_name(domain));
1018 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1019 
1020 	return mem_list->count;
1021 }
1022 
1023 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1024 {
1025 	uint32_t leak_count = 0;
1026 	int i;
1027 
1028 	/* detect and print leaks */
1029 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1030 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1031 
1032 	if (leak_count)
1033 		panic("%u fatal memory leaks detected!", leak_count);
1034 }
1035 
1036 /**
1037  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1038  *
1039  * Return: none
1040  */
1041 static void qdf_mem_debug_exit(void)
1042 {
1043 	int i;
1044 
1045 	/* skb */
1046 	qdf_net_buf_debug_exit();
1047 
1048 	/* mem */
1049 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1050 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1051 		qdf_list_destroy(qdf_mem_list_get(i));
1052 
1053 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1054 
1055 	/* dma */
1056 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1057 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1058 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1059 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1060 }
1061 
1062 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1063 			   void *caller, uint32_t flag)
1064 {
1065 	QDF_STATUS status;
1066 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1067 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1068 	struct qdf_mem_header *header;
1069 	void *ptr;
1070 	unsigned long start, duration;
1071 
1072 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1073 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1074 		return NULL;
1075 	}
1076 
1077 	ptr = qdf_mem_prealloc_get(size);
1078 	if (ptr)
1079 		return ptr;
1080 
1081 	if (!flag)
1082 		flag = qdf_mem_malloc_flags();
1083 
1084 	start = qdf_mc_timer_get_system_time();
1085 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1086 	duration = qdf_mc_timer_get_system_time() - start;
1087 
1088 	if (duration > QDF_MEM_WARN_THRESHOLD)
1089 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1090 			 duration, size, file, line);
1091 
1092 	if (!header) {
1093 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1094 		return NULL;
1095 	}
1096 
1097 	qdf_mem_header_init(header, size, file, line, caller);
1098 	qdf_mem_trailer_init(header);
1099 	ptr = qdf_mem_get_ptr(header);
1100 
1101 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1102 	status = qdf_list_insert_front(mem_list, &header->node);
1103 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1104 	if (QDF_IS_STATUS_ERROR(status))
1105 		qdf_err("Failed to insert memory header; status %d", status);
1106 
1107 	qdf_mem_kmalloc_inc(size);
1108 
1109 	return ptr;
1110 }
1111 qdf_export_symbol(qdf_mem_malloc_debug);
1112 
1113 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1114 {
1115 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1116 	struct qdf_mem_header *header;
1117 	enum qdf_mem_validation_bitmap error_bitmap;
1118 
1119 	/* freeing a null pointer is valid */
1120 	if (qdf_unlikely(!ptr))
1121 		return;
1122 
1123 	if (qdf_mem_prealloc_put(ptr))
1124 		return;
1125 
1126 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1127 		panic("Failed to free invalid memory location %pK", ptr);
1128 
1129 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1130 	header = qdf_mem_get_header(ptr);
1131 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1132 	error_bitmap |= qdf_mem_trailer_validate(header);
1133 
1134 	if (!error_bitmap) {
1135 		header->freed = true;
1136 		list_del_init(&header->node);
1137 		qdf_mem_list_get(header->domain)->count--;
1138 	}
1139 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1140 
1141 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1142 				    file, line);
1143 
1144 	qdf_mem_kmalloc_dec(header->size);
1145 	kfree(header);
1146 }
1147 qdf_export_symbol(qdf_mem_free_debug);
1148 
1149 void qdf_mem_check_for_leaks(void)
1150 {
1151 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1152 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1153 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1154 	uint32_t leaks_count = 0;
1155 
1156 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1157 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1158 
1159 	if (leaks_count)
1160 		panic("%u fatal memory leaks detected!", leaks_count);
1161 }
1162 
1163 #else
1164 static void qdf_mem_debug_init(void) {}
1165 
1166 static void qdf_mem_debug_exit(void) {}
1167 
1168 /**
1169  * qdf_mem_malloc() - allocation QDF memory
1170  * @size: Number of bytes of memory to allocate.
1171  *
1172  * This function will dynamicallly allocate the specified number of bytes of
1173  * memory.
1174  *
1175  * Return:
1176  * Upon successful allocate, returns a non-NULL pointer to the allocated
1177  * memory.  If this function is unable to allocate the amount of memory
1178  * specified (for any reason) it returns NULL.
1179  */
1180 void *qdf_mem_malloc(size_t size)
1181 {
1182 	void *ptr;
1183 
1184 	ptr = qdf_mem_prealloc_get(size);
1185 	if (ptr)
1186 		return ptr;
1187 
1188 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1189 	if (!ptr)
1190 		return NULL;
1191 
1192 	qdf_mem_kmalloc_inc(ksize(ptr));
1193 
1194 	return ptr;
1195 }
1196 qdf_export_symbol(qdf_mem_malloc);
1197 
1198 /**
1199  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
1200  * @size: Number of bytes of memory to allocate.
1201  *
1202  * This function will dynamicallly allocate the specified number of bytes of
1203  * memory.
1204  *
1205  * Return:
1206  * Upon successful allocate, returns a non-NULL pointer to the allocated
1207  * memory.  If this function is unable to allocate the amount of memory
1208  * specified (for any reason) it returns NULL.
1209  */
1210 void *qdf_mem_malloc_atomic(size_t size)
1211 {
1212 	void *ptr;
1213 
1214 	ptr = qdf_mem_prealloc_get(size);
1215 	if (ptr)
1216 		return ptr;
1217 
1218 	ptr = kzalloc(size, GFP_ATOMIC);
1219 	if (!ptr)
1220 		return NULL;
1221 
1222 	qdf_mem_kmalloc_inc(ksize(ptr));
1223 
1224 	return ptr;
1225 }
1226 
1227 qdf_export_symbol(qdf_mem_malloc_atomic);
1228 
1229 /**
1230  * qdf_mem_free() - free QDF memory
1231  * @ptr: Pointer to the starting address of the memory to be free'd.
1232  *
1233  * This function will free the memory pointed to by 'ptr'.
1234  *
1235  * Return: None
1236  */
1237 void qdf_mem_free(void *ptr)
1238 {
1239 	if (ptr == NULL)
1240 		return;
1241 
1242 	if (qdf_mem_prealloc_put(ptr))
1243 		return;
1244 
1245 	qdf_mem_kmalloc_dec(ksize(ptr));
1246 
1247 	kfree(ptr);
1248 }
1249 qdf_export_symbol(qdf_mem_free);
1250 #endif
1251 
1252 /**
1253  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1254  * @osdev: OS device handle pointer
1255  * @pages: Multi page information storage
1256  * @element_size: Each element size
1257  * @element_num: Total number of elements should be allocated
1258  * @memctxt: Memory context
1259  * @cacheable: Coherent memory or cacheable memory
1260  *
1261  * This function will allocate large size of memory over multiple pages.
1262  * Large size of contiguous memory allocation will fail frequently, then
1263  * instead of allocate large memory by one shot, allocate through multiple, non
1264  * contiguous memory and combine pages when actual usage
1265  *
1266  * Return: None
1267  */
1268 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1269 			       struct qdf_mem_multi_page_t *pages,
1270 			       size_t element_size, uint16_t element_num,
1271 			       qdf_dma_context_t memctxt, bool cacheable)
1272 {
1273 	uint16_t page_idx;
1274 	struct qdf_mem_dma_page_t *dma_pages;
1275 	void **cacheable_pages = NULL;
1276 	uint16_t i;
1277 
1278 	pages->num_element_per_page = PAGE_SIZE / element_size;
1279 	if (!pages->num_element_per_page) {
1280 		qdf_print("Invalid page %d or element size %d",
1281 			  (int)PAGE_SIZE, (int)element_size);
1282 		goto out_fail;
1283 	}
1284 
1285 	pages->num_pages = element_num / pages->num_element_per_page;
1286 	if (element_num % pages->num_element_per_page)
1287 		pages->num_pages++;
1288 
1289 	if (cacheable) {
1290 		/* Pages information storage */
1291 		pages->cacheable_pages = qdf_mem_malloc(
1292 			pages->num_pages * sizeof(pages->cacheable_pages));
1293 		if (!pages->cacheable_pages) {
1294 			qdf_print("Cacheable page storage alloc fail");
1295 			goto out_fail;
1296 		}
1297 
1298 		cacheable_pages = pages->cacheable_pages;
1299 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1300 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1301 			if (!cacheable_pages[page_idx]) {
1302 				qdf_print("cacheable page alloc fail, pi %d",
1303 					  page_idx);
1304 				goto page_alloc_fail;
1305 			}
1306 		}
1307 		pages->dma_pages = NULL;
1308 	} else {
1309 		pages->dma_pages = qdf_mem_malloc(
1310 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1311 		if (!pages->dma_pages) {
1312 			qdf_print("dmaable page storage alloc fail");
1313 			goto out_fail;
1314 		}
1315 
1316 		dma_pages = pages->dma_pages;
1317 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1318 			dma_pages->page_v_addr_start =
1319 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1320 					 PAGE_SIZE,
1321 					&dma_pages->page_p_addr);
1322 			if (!dma_pages->page_v_addr_start) {
1323 				qdf_print("dmaable page alloc fail pi %d",
1324 					page_idx);
1325 				goto page_alloc_fail;
1326 			}
1327 			dma_pages->page_v_addr_end =
1328 				dma_pages->page_v_addr_start + PAGE_SIZE;
1329 			dma_pages++;
1330 		}
1331 		pages->cacheable_pages = NULL;
1332 	}
1333 	return;
1334 
1335 page_alloc_fail:
1336 	if (cacheable) {
1337 		for (i = 0; i < page_idx; i++)
1338 			qdf_mem_free(pages->cacheable_pages[i]);
1339 		qdf_mem_free(pages->cacheable_pages);
1340 	} else {
1341 		dma_pages = pages->dma_pages;
1342 		for (i = 0; i < page_idx; i++) {
1343 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1344 				dma_pages->page_v_addr_start,
1345 				dma_pages->page_p_addr, memctxt);
1346 			dma_pages++;
1347 		}
1348 		qdf_mem_free(pages->dma_pages);
1349 	}
1350 
1351 out_fail:
1352 	pages->cacheable_pages = NULL;
1353 	pages->dma_pages = NULL;
1354 	pages->num_pages = 0;
1355 	return;
1356 }
1357 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1358 
1359 /**
1360  * qdf_mem_multi_pages_free() - free large size of kernel memory
1361  * @osdev: OS device handle pointer
1362  * @pages: Multi page information storage
1363  * @memctxt: Memory context
1364  * @cacheable: Coherent memory or cacheable memory
1365  *
1366  * This function will free large size of memory over multiple pages.
1367  *
1368  * Return: None
1369  */
1370 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1371 			      struct qdf_mem_multi_page_t *pages,
1372 			      qdf_dma_context_t memctxt, bool cacheable)
1373 {
1374 	unsigned int page_idx;
1375 	struct qdf_mem_dma_page_t *dma_pages;
1376 
1377 	if (cacheable) {
1378 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1379 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1380 		qdf_mem_free(pages->cacheable_pages);
1381 	} else {
1382 		dma_pages = pages->dma_pages;
1383 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1384 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1385 				dma_pages->page_v_addr_start,
1386 				dma_pages->page_p_addr, memctxt);
1387 			dma_pages++;
1388 		}
1389 		qdf_mem_free(pages->dma_pages);
1390 	}
1391 
1392 	pages->cacheable_pages = NULL;
1393 	pages->dma_pages = NULL;
1394 	pages->num_pages = 0;
1395 	return;
1396 }
1397 qdf_export_symbol(qdf_mem_multi_pages_free);
1398 
1399 /**
1400  * qdf_mem_multi_page_link() - Make links for multi page elements
1401  * @osdev: OS device handle pointer
1402  * @pages: Multi page information storage
1403  * @elem_size: Single element size
1404  * @elem_count: elements count should be linked
1405  * @cacheable: Coherent memory or cacheable memory
1406  *
1407  * This function will make links for multi page allocated structure
1408  *
1409  * Return: 0 success
1410  */
1411 int qdf_mem_multi_page_link(qdf_device_t osdev,
1412 		struct qdf_mem_multi_page_t *pages,
1413 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1414 {
1415 	uint16_t i, i_int;
1416 	void *page_info;
1417 	void **c_elem = NULL;
1418 	uint32_t num_link = 0;
1419 
1420 	for (i = 0; i < pages->num_pages; i++) {
1421 		if (cacheable)
1422 			page_info = pages->cacheable_pages[i];
1423 		else
1424 			page_info = pages->dma_pages[i].page_v_addr_start;
1425 
1426 		if (!page_info)
1427 			return -ENOMEM;
1428 
1429 		c_elem = (void **)page_info;
1430 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1431 			if (i_int == (pages->num_element_per_page - 1)) {
1432 				if (cacheable)
1433 					*c_elem = pages->
1434 						cacheable_pages[i + 1];
1435 				else
1436 					*c_elem = pages->
1437 						dma_pages[i + 1].
1438 							page_v_addr_start;
1439 				num_link++;
1440 				break;
1441 			} else {
1442 				*c_elem =
1443 					(void *)(((char *)c_elem) + elem_size);
1444 			}
1445 			num_link++;
1446 			c_elem = (void **)*c_elem;
1447 
1448 			/* Last link established exit */
1449 			if (num_link == (elem_count - 1))
1450 				break;
1451 		}
1452 	}
1453 
1454 	if (c_elem)
1455 		*c_elem = NULL;
1456 
1457 	return 0;
1458 }
1459 qdf_export_symbol(qdf_mem_multi_page_link);
1460 
1461 /**
1462  * qdf_mem_copy() - copy memory
1463  * @dst_addr: Pointer to destination memory location (to copy to)
1464  * @src_addr: Pointer to source memory location (to copy from)
1465  * @num_bytes: Number of bytes to copy.
1466  *
1467  * Copy host memory from one location to another, similar to memcpy in
1468  * standard C.  Note this function does not specifically handle overlapping
1469  * source and destination memory locations.  Calling this function with
1470  * overlapping source and destination memory locations will result in
1471  * unpredictable results.  Use qdf_mem_move() if the memory locations
1472  * for the source and destination are overlapping (or could be overlapping!)
1473  *
1474  * Return: none
1475  */
1476 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1477 {
1478 	if (0 == num_bytes) {
1479 		/* special case where dst_addr or src_addr can be NULL */
1480 		return;
1481 	}
1482 
1483 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1484 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1485 			  "%s called with NULL parameter, source:%pK destination:%pK",
1486 			  __func__, src_addr, dst_addr);
1487 		QDF_ASSERT(0);
1488 		return;
1489 	}
1490 	memcpy(dst_addr, src_addr, num_bytes);
1491 }
1492 qdf_export_symbol(qdf_mem_copy);
1493 
1494 /**
1495  * qdf_mem_zero() - zero out memory
1496  * @ptr: pointer to memory that will be set to zero
1497  * @num_bytes: number of bytes zero
1498  *
1499  * This function sets the memory location to all zeros, essentially clearing
1500  * the memory.
1501  *
1502  * Return: None
1503  */
1504 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1505 {
1506 	if (0 == num_bytes) {
1507 		/* special case where ptr can be NULL */
1508 		return;
1509 	}
1510 
1511 	if (ptr == NULL) {
1512 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1513 			  "%s called with NULL parameter ptr", __func__);
1514 		return;
1515 	}
1516 	memset(ptr, 0, num_bytes);
1517 }
1518 qdf_export_symbol(qdf_mem_zero);
1519 
1520 /**
1521  * qdf_mem_set() - set (fill) memory with a specified byte value.
1522  * @ptr: Pointer to memory that will be set
1523  * @num_bytes: Number of bytes to be set
1524  * @value: Byte set in memory
1525  *
1526  * Return: None
1527  */
1528 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1529 {
1530 	if (ptr == NULL) {
1531 		qdf_print("%s called with NULL parameter ptr", __func__);
1532 		return;
1533 	}
1534 	memset(ptr, value, num_bytes);
1535 }
1536 qdf_export_symbol(qdf_mem_set);
1537 
1538 /**
1539  * qdf_mem_move() - move memory
1540  * @dst_addr: pointer to destination memory location (to move to)
1541  * @src_addr: pointer to source memory location (to move from)
1542  * @num_bytes: number of bytes to move.
1543  *
1544  * Move host memory from one location to another, similar to memmove in
1545  * standard C.  Note this function *does* handle overlapping
1546  * source and destination memory locations.
1547 
1548  * Return: None
1549  */
1550 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1551 {
1552 	if (0 == num_bytes) {
1553 		/* special case where dst_addr or src_addr can be NULL */
1554 		return;
1555 	}
1556 
1557 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1558 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1559 			  "%s called with NULL parameter, source:%pK destination:%pK",
1560 			  __func__, src_addr, dst_addr);
1561 		QDF_ASSERT(0);
1562 		return;
1563 	}
1564 	memmove(dst_addr, src_addr, num_bytes);
1565 }
1566 qdf_export_symbol(qdf_mem_move);
1567 
1568 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1569 /**
1570  * qdf_mem_dma_alloc() - allocates memory for dma
1571  * @osdev: OS device handle
1572  * @dev: Pointer to device handle
1573  * @size: Size to be allocated
1574  * @phy_addr: Physical address
1575  *
1576  * Return: pointer of allocated memory or null if memory alloc fails
1577  */
1578 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1579 				      qdf_size_t size,
1580 				      qdf_dma_addr_t *phy_addr)
1581 {
1582 	void *vaddr;
1583 
1584 	vaddr = qdf_mem_malloc(size);
1585 	*phy_addr = ((uintptr_t) vaddr);
1586 	/* using this type conversion to suppress "cast from pointer to integer
1587 	 * of different size" warning on some platforms
1588 	 */
1589 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1590 	return vaddr;
1591 }
1592 
1593 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1594 #define QCA8074_RAM_BASE 0x50000000
1595 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1596 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1597 			qdf_dma_addr_t *phy_addr)
1598 {
1599 	void *vaddr = NULL;
1600 	int i;
1601 
1602 	*phy_addr = 0;
1603 
1604 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1605 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1606 					   qdf_mem_malloc_flags());
1607 
1608 		if (!vaddr) {
1609 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1610 			return NULL;
1611 		}
1612 
1613 		if (*phy_addr >= QCA8074_RAM_BASE)
1614 			return vaddr;
1615 
1616 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1617 	}
1618 
1619 	return NULL;
1620 }
1621 
1622 #else
1623 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1624 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1625 {
1626 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1627 }
1628 #endif
1629 
1630 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1631 static inline void
1632 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1633 {
1634 	qdf_mem_free(vaddr);
1635 }
1636 #else
1637 
1638 static inline void
1639 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1640 {
1641 	dma_free_coherent(dev, size, vaddr, paddr);
1642 }
1643 #endif
1644 
1645 #ifdef MEMORY_DEBUG
1646 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1647 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1648 				     const char *file, uint32_t line,
1649 				     void *caller)
1650 {
1651 	QDF_STATUS status;
1652 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1653 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1654 	struct qdf_mem_header *header;
1655 	void *vaddr;
1656 
1657 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1658 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1659 		return NULL;
1660 	}
1661 
1662 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1663 				   paddr);
1664 
1665 	if (!vaddr) {
1666 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1667 		return NULL;
1668 	}
1669 
1670 	header = qdf_mem_dma_get_header(vaddr, size);
1671 	/* For DMA buffers we only add trailers, this function will init
1672 	 * the header structure at the tail
1673 	 * Prefix the header into DMA buffer causes SMMU faults, so
1674 	 * do not prefix header into the DMA buffers
1675 	 */
1676 	qdf_mem_header_init(header, size, file, line, caller);
1677 
1678 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1679 	status = qdf_list_insert_front(mem_list, &header->node);
1680 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1681 	if (QDF_IS_STATUS_ERROR(status))
1682 		qdf_err("Failed to insert memory header; status %d", status);
1683 
1684 	qdf_mem_dma_inc(size);
1685 
1686 	return vaddr;
1687 }
1688 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1689 
1690 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1691 				   qdf_size_t size, void *vaddr,
1692 				   qdf_dma_addr_t paddr,
1693 				   qdf_dma_context_t memctx,
1694 				   const char *file, uint32_t line)
1695 {
1696 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1697 	struct qdf_mem_header *header;
1698 	enum qdf_mem_validation_bitmap error_bitmap;
1699 
1700 	/* freeing a null pointer is valid */
1701 	if (qdf_unlikely(!vaddr))
1702 		return;
1703 
1704 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1705 	/* For DMA buffers we only add trailers, this function will retrieve
1706 	 * the header structure at the tail
1707 	 * Prefix the header into DMA buffer causes SMMU faults, so
1708 	 * do not prefix header into the DMA buffers
1709 	 */
1710 	header = qdf_mem_dma_get_header(vaddr, size);
1711 	error_bitmap = qdf_mem_header_validate(header, domain);
1712 	if (!error_bitmap) {
1713 		header->freed = true;
1714 		list_del_init(&header->node);
1715 		qdf_mem_dma_list(header->domain)->count--;
1716 	}
1717 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1718 
1719 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1720 
1721 	qdf_mem_dma_dec(header->size);
1722 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1723 }
1724 qdf_export_symbol(qdf_mem_free_consistent_debug);
1725 
1726 #else
1727 
1728 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1729 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1730 {
1731 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1732 
1733 	if (vaddr)
1734 		qdf_mem_dma_inc(size);
1735 
1736 	return vaddr;
1737 }
1738 qdf_export_symbol(qdf_mem_alloc_consistent);
1739 
1740 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1741 			     qdf_size_t size, void *vaddr,
1742 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1743 {
1744 	qdf_mem_dma_dec(size);
1745 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1746 }
1747 qdf_export_symbol(qdf_mem_free_consistent);
1748 
1749 #endif /* MEMORY_DEBUG */
1750 
1751 /**
1752  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1753  * @osdev: OS device handle
1754  * @bus_addr: dma address to give to the device
1755  * @size: Size of the memory block
1756  * @direction: direction data will be DMAed
1757  *
1758  * Assign memory to the remote device.
1759  * The cache lines are flushed to ram or invalidated as needed.
1760  *
1761  * Return: none
1762  */
1763 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1764 					qdf_dma_addr_t bus_addr,
1765 					qdf_size_t size,
1766 					enum dma_data_direction direction)
1767 {
1768 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1769 }
1770 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1771 
1772 /**
1773  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1774  * @osdev: OS device handle
1775  * @bus_addr: dma address to give to the cpu
1776  * @size: Size of the memory block
1777  * @direction: direction data will be DMAed
1778  *
1779  * Assign memory to the CPU.
1780  *
1781  * Return: none
1782  */
1783 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1784 				     qdf_dma_addr_t bus_addr,
1785 				     qdf_size_t size,
1786 				     enum dma_data_direction direction)
1787 {
1788 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1789 }
1790 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1791 
1792 void qdf_mem_init(void)
1793 {
1794 	qdf_mem_debug_init();
1795 	qdf_mem_debugfs_init();
1796 	qdf_mem_debug_debugfs_init();
1797 }
1798 qdf_export_symbol(qdf_mem_init);
1799 
1800 void qdf_mem_exit(void)
1801 {
1802 	qdf_mem_debug_debugfs_exit();
1803 	qdf_mem_debugfs_exit();
1804 	qdf_mem_debug_exit();
1805 }
1806 qdf_export_symbol(qdf_mem_exit);
1807 
1808 /**
1809  * qdf_ether_addr_copy() - copy an Ethernet address
1810  *
1811  * @dst_addr: A six-byte array Ethernet address destination
1812  * @src_addr: A six-byte array Ethernet address source
1813  *
1814  * Please note: dst & src must both be aligned to u16.
1815  *
1816  * Return: none
1817  */
1818 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1819 {
1820 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1821 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1822 			  "%s called with NULL parameter, source:%pK destination:%pK",
1823 			  __func__, src_addr, dst_addr);
1824 		QDF_ASSERT(0);
1825 		return;
1826 	}
1827 	ether_addr_copy(dst_addr, src_addr);
1828 }
1829 qdf_export_symbol(qdf_ether_addr_copy);
1830 
1831