xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_mem
30  * This file provides OS dependent memory management APIs
31  */
32 
33 #include "qdf_debugfs.h"
34 #include "qdf_mem.h"
35 #include "qdf_nbuf.h"
36 #include "qdf_lock.h"
37 #include "qdf_mc_timer.h"
38 #include "qdf_module.h"
39 #include <qdf_trace.h>
40 #include "qdf_atomic.h"
41 #include "qdf_str.h"
42 #include <linux/debugfs.h>
43 #include <linux/seq_file.h>
44 #include <linux/string.h>
45 
46 #ifdef CONFIG_MCL
47 #include <host_diag_core_event.h>
48 #else
49 #define host_log_low_resource_failure(code) do {} while (0)
50 #endif
51 
52 #if defined(CONFIG_CNSS)
53 #include <net/cnss.h>
54 #endif
55 
56 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
57 #include <net/cnss_prealloc.h>
58 #endif
59 
60 #ifdef MEMORY_DEBUG
61 #include "qdf_debug_domain.h"
62 #include <qdf_list.h>
63 
64 /* Preprocessor Definitions and Constants */
65 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
66 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
67 #define QDF_DEBUG_STRING_SIZE 512
68 #define QDF_MEM_FILE_NAME_SIZE 48
69 
70 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
71 static qdf_spinlock_t qdf_mem_list_lock;
72 
73 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
74 static qdf_spinlock_t qdf_mem_dma_list_lock;
75 
76 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
77 {
78 	return &qdf_mem_domains[domain];
79 }
80 
81 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
82 {
83 	return &qdf_mem_dma_domains[domain];
84 }
85 
86 /**
87  * struct qdf_mem_header - memory object to dubug
88  * @node: node to the list
89  * @domain: the active memory domain at time of allocation
90  * @freed: flag set during free, used to detect double frees
91  *	Use uint8_t so we can detect corruption
92  * @file: name of the file the allocation was made from
93  * @line: line number of the file the allocation was made from
94  * @size: size of the allocation in bytes
95  * @caller: Caller of the function for which memory is allocated
96  * @header: a known value, used to detect out-of-bounds access
97  */
98 struct qdf_mem_header {
99 	qdf_list_node_t node;
100 	enum qdf_debug_domain domain;
101 	uint8_t freed;
102 	char file[QDF_MEM_FILE_NAME_SIZE];
103 	uint32_t line;
104 	uint32_t size;
105 	void *caller;
106 	uint64_t header;
107 };
108 
109 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
110 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
111 
112 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
113 {
114 	return (struct qdf_mem_header *)ptr - 1;
115 }
116 
117 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
118 							    qdf_size_t size)
119 {
120 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
121 }
122 
123 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
124 {
125 	return (uint64_t *)((void *)(header + 1) + header->size);
126 }
127 
128 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
129 {
130 	return (void *)(header + 1);
131 }
132 
133 /* number of bytes needed for the qdf memory debug information */
134 #define QDF_MEM_DEBUG_SIZE \
135 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
136 
137 /* number of bytes needed for the qdf dma memory debug information */
138 #define QDF_DMA_MEM_DEBUG_SIZE \
139 	(sizeof(struct qdf_mem_header))
140 
141 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
142 {
143 	QDF_BUG(header);
144 	if (!header)
145 		return;
146 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
147 }
148 
149 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
150 				const char *file, uint32_t line, void *caller)
151 {
152 	QDF_BUG(header);
153 	if (!header)
154 		return;
155 
156 	header->domain = qdf_debug_domain_get();
157 	header->freed = false;
158 
159 	/* copy the file name, rather than pointing to it */
160 	qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
161 
162 	header->line = line;
163 	header->size = size;
164 	header->caller = caller;
165 	header->header = WLAN_MEM_HEADER;
166 }
167 
168 enum qdf_mem_validation_bitmap {
169 	QDF_MEM_BAD_HEADER = 1 << 0,
170 	QDF_MEM_BAD_TRAILER = 1 << 1,
171 	QDF_MEM_BAD_SIZE = 1 << 2,
172 	QDF_MEM_DOUBLE_FREE = 1 << 3,
173 	QDF_MEM_BAD_FREED = 1 << 4,
174 	QDF_MEM_BAD_NODE = 1 << 5,
175 	QDF_MEM_BAD_DOMAIN = 1 << 6,
176 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
177 };
178 
179 /**
180  * qdf_mem_validate_list_node() - validate that the node is in a list
181  * @qdf_node: node to check for being in a list
182  *
183  * Return: true if the node validly linked in an anchored doubly linked list
184  */
185 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
186 {
187 	struct list_head *node = qdf_node;
188 
189 	/*
190 	 * if the node is an empty list, it is not tied to an anchor node
191 	 * and must have been removed with list_del_init
192 	 */
193 	if (list_empty(node))
194 		return false;
195 
196 	if (!node->prev || !node->next)
197 		return false;
198 
199 	if (node->prev->next != node || node->next->prev != node)
200 		return false;
201 
202 	return true;
203 }
204 
205 static enum qdf_mem_validation_bitmap
206 qdf_mem_trailer_validate(struct qdf_mem_header *header)
207 {
208 	enum qdf_mem_validation_bitmap error_bitmap = 0;
209 
210 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
211 		error_bitmap |= QDF_MEM_BAD_TRAILER;
212 	return error_bitmap;
213 }
214 
215 static enum qdf_mem_validation_bitmap
216 qdf_mem_header_validate(struct qdf_mem_header *header,
217 			enum qdf_debug_domain domain)
218 {
219 	enum qdf_mem_validation_bitmap error_bitmap = 0;
220 
221 	if (header->header != WLAN_MEM_HEADER)
222 		error_bitmap |= QDF_MEM_BAD_HEADER;
223 
224 	if (header->size > QDF_MEM_MAX_MALLOC)
225 		error_bitmap |= QDF_MEM_BAD_SIZE;
226 
227 	if (header->freed == true)
228 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
229 	else if (header->freed)
230 		error_bitmap |= QDF_MEM_BAD_FREED;
231 
232 	if (!qdf_mem_validate_list_node(&header->node))
233 		error_bitmap |= QDF_MEM_BAD_NODE;
234 
235 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
236 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
237 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
238 	else if (header->domain != domain)
239 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
240 
241 	return error_bitmap;
242 }
243 
244 static void
245 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
246 			    enum qdf_debug_domain current_domain,
247 			    enum qdf_mem_validation_bitmap error_bitmap,
248 			    const char *file,
249 			    uint32_t line)
250 {
251 	if (!error_bitmap)
252 		return;
253 
254 	if (error_bitmap & QDF_MEM_BAD_HEADER)
255 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
256 			header->header, WLAN_MEM_HEADER);
257 
258 	if (error_bitmap & QDF_MEM_BAD_SIZE)
259 		qdf_err("Corrupted memory size %u (expected < %d)",
260 			header->size, QDF_MEM_MAX_MALLOC);
261 
262 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
263 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
264 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
265 
266 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
267 		qdf_err("Memory has previously been freed");
268 
269 	if (error_bitmap & QDF_MEM_BAD_FREED)
270 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
271 
272 	if (error_bitmap & QDF_MEM_BAD_NODE)
273 		qdf_err("Corrupted memory header node or double free");
274 
275 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
276 		qdf_err("Corrupted memory domain 0x%x", header->domain);
277 
278 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
279 		qdf_err("Memory domain mismatch; found %s(%d), expected %s(%d)",
280 			qdf_debug_domain_name(header->domain), header->domain,
281 			qdf_debug_domain_name(current_domain), current_domain);
282 
283 	panic("A fatal memory error was detected @ %s:%d",
284 	      file, line);
285 }
286 #endif /* MEMORY_DEBUG */
287 
288 u_int8_t prealloc_disabled = 1;
289 qdf_declare_param(prealloc_disabled, byte);
290 qdf_export_symbol(prealloc_disabled);
291 
292 #if defined WLAN_DEBUGFS
293 
294 /* Debugfs root directory for qdf_mem */
295 static struct dentry *qdf_mem_debugfs_root;
296 
297 /**
298  * struct __qdf_mem_stat - qdf memory statistics
299  * @kmalloc:	total kmalloc allocations
300  * @dma:	total dma allocations
301  * @skb:	total skb allocations
302  */
303 static struct __qdf_mem_stat {
304 	qdf_atomic_t kmalloc;
305 	qdf_atomic_t dma;
306 	qdf_atomic_t skb;
307 } qdf_mem_stat;
308 
309 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
310 {
311 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
312 }
313 
314 static inline void qdf_mem_dma_inc(qdf_size_t size)
315 {
316 	qdf_atomic_add(size, &qdf_mem_stat.dma);
317 }
318 
319 void qdf_mem_skb_inc(qdf_size_t size)
320 {
321 	qdf_atomic_add(size, &qdf_mem_stat.skb);
322 }
323 
324 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
325 {
326 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
327 }
328 
329 static inline void qdf_mem_dma_dec(qdf_size_t size)
330 {
331 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
332 }
333 
334 void qdf_mem_skb_dec(qdf_size_t size)
335 {
336 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
337 }
338 
339 #ifdef MEMORY_DEBUG
340 static int qdf_err_printer(void *priv, const char *fmt, ...)
341 {
342 	va_list args;
343 
344 	va_start(args, fmt);
345 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
346 	va_end(args);
347 
348 	return 0;
349 }
350 
351 static int seq_printf_printer(void *priv, const char *fmt, ...)
352 {
353 	struct seq_file *file = priv;
354 	va_list args;
355 
356 	va_start(args, fmt);
357 	seq_vprintf(file, fmt, args);
358 	seq_puts(file, "\n");
359 	va_end(args);
360 
361 	return 0;
362 }
363 
364 /**
365  * struct __qdf_mem_info - memory statistics
366  * @file: the file which allocated memory
367  * @line: the line at which allocation happened
368  * @size: the size of allocation
369  * @caller: Address of the caller function
370  * @count: how many allocations of same type
371  *
372  */
373 struct __qdf_mem_info {
374 	char file[QDF_MEM_FILE_NAME_SIZE];
375 	uint32_t line;
376 	uint32_t size;
377 	void *caller;
378 	uint32_t count;
379 };
380 
381 /*
382  * The table depth defines the de-duplication proximity scope.
383  * A deeper table takes more time, so choose any optimum value.
384  */
385 #define QDF_MEM_STAT_TABLE_SIZE 8
386 
387 /**
388  * qdf_mem_domain_print_header() - memory domain header print logic
389  * @print: the print adapter function
390  * @print_priv: the private data to be consumed by @print
391  *
392  * Return: None
393  */
394 static void qdf_mem_domain_print_header(qdf_abstract_print print,
395 					void *print_priv)
396 {
397 	print(print_priv,
398 	      "--------------------------------------------------------------");
399 	print(print_priv, " count    size     total    filename     caller");
400 	print(print_priv,
401 	      "--------------------------------------------------------------");
402 }
403 
404 /**
405  * qdf_mem_meta_table_print() - memory metadata table print logic
406  * @table: the memory metadata table to print
407  * @print: the print adapter function
408  * @print_priv: the private data to be consumed by @print
409  *
410  * Return: None
411  */
412 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
413 				     qdf_abstract_print print,
414 				     void *print_priv)
415 {
416 	int i;
417 	char debug_str[QDF_DEBUG_STRING_SIZE];
418 	size_t len = 0;
419 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
420 
421 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
422 			     "%s", debug_prefix);
423 
424 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
425 		if (!table[i].count)
426 			break;
427 
428 		print(print_priv,
429 		      "%6u x %5u = %7uB @ %s:%u   %pS",
430 		      table[i].count,
431 		      table[i].size,
432 		      table[i].count * table[i].size,
433 		      table[i].file,
434 		      table[i].line, table[i].caller);
435 		len += qdf_scnprintf(debug_str + len,
436 				     sizeof(debug_str) - len,
437 				     " @ %s:%u %pS",
438 				     table[i].file,
439 				     table[i].line,
440 				     table[i].caller);
441 	}
442 	print(print_priv, "%s", debug_str);
443 }
444 
445 /**
446  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
447  * @table: the memory metadata table to insert into
448  * @meta: the memory metadata to insert
449  *
450  * Return: true if the table is full after inserting, false otherwise
451  */
452 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
453 				      struct qdf_mem_header *meta)
454 {
455 	int i;
456 
457 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
458 		if (!table[i].count) {
459 			qdf_str_lcopy(table[i].file, meta->file,
460 				      QDF_MEM_FILE_NAME_SIZE);
461 			table[i].line = meta->line;
462 			table[i].size = meta->size;
463 			table[i].count = 1;
464 			table[i].caller = meta->caller;
465 			break;
466 		}
467 
468 		if (qdf_str_eq(table[i].file, meta->file) &&
469 		    table[i].line == meta->line &&
470 		    table[i].size == meta->size &&
471 		    table[i].caller == meta->caller) {
472 			table[i].count++;
473 			break;
474 		}
475 	}
476 
477 	/* return true if the table is now full */
478 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
479 }
480 
481 /**
482  * qdf_mem_domain_print() - output agnostic memory domain print logic
483  * @domain: the memory domain to print
484  * @print: the print adapter function
485  * @print_priv: the private data to be consumed by @print
486  *
487  * Return: None
488  */
489 static void qdf_mem_domain_print(qdf_list_t *domain,
490 				 qdf_abstract_print print,
491 				 void *print_priv)
492 {
493 	QDF_STATUS status;
494 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
495 	qdf_list_node_t *node;
496 
497 	qdf_mem_zero(table, sizeof(table));
498 	qdf_mem_domain_print_header(print, print_priv);
499 
500 	/* hold lock while inserting to avoid use-after free of the metadata */
501 	qdf_spin_lock(&qdf_mem_list_lock);
502 	status = qdf_list_peek_front(domain, &node);
503 	while (QDF_IS_STATUS_SUCCESS(status)) {
504 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
505 		bool is_full = qdf_mem_meta_table_insert(table, meta);
506 
507 		qdf_spin_unlock(&qdf_mem_list_lock);
508 
509 		if (is_full) {
510 			qdf_mem_meta_table_print(table, print, print_priv);
511 			qdf_mem_zero(table, sizeof(table));
512 		}
513 
514 		qdf_spin_lock(&qdf_mem_list_lock);
515 		status = qdf_list_peek_next(domain, node, &node);
516 	}
517 	qdf_spin_unlock(&qdf_mem_list_lock);
518 
519 	qdf_mem_meta_table_print(table, print, print_priv);
520 }
521 
522 /**
523  * qdf_mem_seq_start() - sequential callback to start
524  * @seq: seq_file handle
525  * @pos: The start position of the sequence
526  *
527  * Return: iterator pointer, or NULL if iteration is complete
528  */
529 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
530 {
531 	enum qdf_debug_domain domain = *pos;
532 
533 	if (!qdf_debug_domain_valid(domain))
534 		return NULL;
535 
536 	/* just use the current position as our iterator */
537 	return pos;
538 }
539 
540 /**
541  * qdf_mem_seq_next() - next sequential callback
542  * @seq: seq_file handle
543  * @v: the current iterator
544  * @pos: the current position
545  *
546  * Get the next node and release previous node.
547  *
548  * Return: iterator pointer, or NULL if iteration is complete
549  */
550 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
551 {
552 	++*pos;
553 
554 	return qdf_mem_seq_start(seq, pos);
555 }
556 
557 /**
558  * qdf_mem_seq_stop() - stop sequential callback
559  * @seq: seq_file handle
560  * @v: current iterator
561  *
562  * Return: None
563  */
564 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
565 
566 /**
567  * qdf_mem_seq_show() - print sequential callback
568  * @seq: seq_file handle
569  * @v: current iterator
570  *
571  * Return: 0 - success
572  */
573 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
574 {
575 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
576 
577 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
578 		   qdf_debug_domain_name(domain_id), domain_id);
579 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
580 			     seq_printf_printer, seq);
581 
582 	return 0;
583 }
584 
585 /* sequential file operation table */
586 static const struct seq_operations qdf_mem_seq_ops = {
587 	.start = qdf_mem_seq_start,
588 	.next  = qdf_mem_seq_next,
589 	.stop  = qdf_mem_seq_stop,
590 	.show  = qdf_mem_seq_show,
591 };
592 
593 
594 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
595 {
596 	return seq_open(file, &qdf_mem_seq_ops);
597 }
598 
599 /* debugfs file operation table */
600 static const struct file_operations fops_qdf_mem_debugfs = {
601 	.owner = THIS_MODULE,
602 	.open = qdf_mem_debugfs_open,
603 	.read = seq_read,
604 	.llseek = seq_lseek,
605 	.release = seq_release,
606 };
607 
608 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
609 {
610 	if (!qdf_mem_debugfs_root)
611 		return QDF_STATUS_E_FAILURE;
612 
613 	debugfs_create_file("list",
614 			    S_IRUSR,
615 			    qdf_mem_debugfs_root,
616 			    NULL,
617 			    &fops_qdf_mem_debugfs);
618 
619 	return QDF_STATUS_SUCCESS;
620 }
621 
622 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
623 {
624 	return QDF_STATUS_SUCCESS;
625 }
626 
627 #else /* MEMORY_DEBUG */
628 
629 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
630 {
631 	return QDF_STATUS_E_NOSUPPORT;
632 }
633 
634 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
635 {
636 	return QDF_STATUS_E_NOSUPPORT;
637 }
638 
639 #endif /* MEMORY_DEBUG */
640 
641 
642 static void qdf_mem_debugfs_exit(void)
643 {
644 	debugfs_remove_recursive(qdf_mem_debugfs_root);
645 	qdf_mem_debugfs_root = NULL;
646 }
647 
648 static QDF_STATUS qdf_mem_debugfs_init(void)
649 {
650 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
651 
652 	if (!qdf_debugfs_root)
653 		return QDF_STATUS_E_FAILURE;
654 
655 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
656 
657 	if (!qdf_mem_debugfs_root)
658 		return QDF_STATUS_E_FAILURE;
659 
660 
661 	debugfs_create_atomic_t("kmalloc",
662 				S_IRUSR,
663 				qdf_mem_debugfs_root,
664 				&qdf_mem_stat.kmalloc);
665 
666 	debugfs_create_atomic_t("dma",
667 				S_IRUSR,
668 				qdf_mem_debugfs_root,
669 				&qdf_mem_stat.dma);
670 
671 	debugfs_create_atomic_t("skb",
672 				S_IRUSR,
673 				qdf_mem_debugfs_root,
674 				&qdf_mem_stat.skb);
675 
676 	return QDF_STATUS_SUCCESS;
677 }
678 
679 #else /* WLAN_DEBUGFS */
680 
681 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
682 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
683 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
684 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
685 
686 
687 static QDF_STATUS qdf_mem_debugfs_init(void)
688 {
689 	return QDF_STATUS_E_NOSUPPORT;
690 }
691 static void qdf_mem_debugfs_exit(void) {}
692 
693 
694 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
695 {
696 	return QDF_STATUS_E_NOSUPPORT;
697 }
698 
699 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
700 {
701 	return QDF_STATUS_E_NOSUPPORT;
702 }
703 
704 #endif /* WLAN_DEBUGFS */
705 
706 /**
707  * __qdf_mempool_init() - Create and initialize memory pool
708  *
709  * @osdev: platform device object
710  * @pool_addr: address of the pool created
711  * @elem_cnt: no. of elements in pool
712  * @elem_size: size of each pool element in bytes
713  * @flags: flags
714  *
715  * return: Handle to memory pool or NULL if allocation failed
716  */
717 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
718 		       int elem_cnt, size_t elem_size, u_int32_t flags)
719 {
720 	__qdf_mempool_ctxt_t *new_pool = NULL;
721 	u_int32_t align = L1_CACHE_BYTES;
722 	unsigned long aligned_pool_mem;
723 	int pool_id;
724 	int i;
725 
726 	if (prealloc_disabled) {
727 		/* TBD: We can maintain a list of pools in qdf_device_t
728 		 * to help debugging
729 		 * when pre-allocation is not enabled
730 		 */
731 		new_pool = (__qdf_mempool_ctxt_t *)
732 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
733 		if (new_pool == NULL)
734 			return QDF_STATUS_E_NOMEM;
735 
736 		memset(new_pool, 0, sizeof(*new_pool));
737 		/* TBD: define flags for zeroing buffers etc */
738 		new_pool->flags = flags;
739 		new_pool->elem_size = elem_size;
740 		new_pool->max_elem = elem_cnt;
741 		*pool_addr = new_pool;
742 		return 0;
743 	}
744 
745 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
746 		if (osdev->mem_pool[pool_id] == NULL)
747 			break;
748 	}
749 
750 	if (pool_id == MAX_MEM_POOLS)
751 		return -ENOMEM;
752 
753 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
754 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
755 	if (new_pool == NULL)
756 		return -ENOMEM;
757 
758 	memset(new_pool, 0, sizeof(*new_pool));
759 	/* TBD: define flags for zeroing buffers etc */
760 	new_pool->flags = flags;
761 	new_pool->pool_id = pool_id;
762 
763 	/* Round up the element size to cacheline */
764 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
765 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
766 				((align)?(align - 1):0);
767 
768 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
769 	if (new_pool->pool_mem == NULL) {
770 			/* TBD: Check if we need get_free_pages above */
771 		kfree(new_pool);
772 		osdev->mem_pool[pool_id] = NULL;
773 		return -ENOMEM;
774 	}
775 
776 	spin_lock_init(&new_pool->lock);
777 
778 	/* Initialize free list */
779 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
780 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
781 	STAILQ_INIT(&new_pool->free_list);
782 
783 	for (i = 0; i < elem_cnt; i++)
784 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
785 			(mempool_elem_t *)(aligned_pool_mem +
786 			(new_pool->elem_size * i)), mempool_entry);
787 
788 
789 	new_pool->free_cnt = elem_cnt;
790 	*pool_addr = new_pool;
791 	return 0;
792 }
793 qdf_export_symbol(__qdf_mempool_init);
794 
795 /**
796  * __qdf_mempool_destroy() - Destroy memory pool
797  * @osdev: platform device object
798  * @Handle: to memory pool
799  *
800  * Returns: none
801  */
802 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
803 {
804 	int pool_id = 0;
805 
806 	if (!pool)
807 		return;
808 
809 	if (prealloc_disabled) {
810 		kfree(pool);
811 		return;
812 	}
813 
814 	pool_id = pool->pool_id;
815 
816 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
817 	kfree(pool->pool_mem);
818 	kfree(pool);
819 	osdev->mem_pool[pool_id] = NULL;
820 }
821 qdf_export_symbol(__qdf_mempool_destroy);
822 
823 /**
824  * __qdf_mempool_alloc() - Allocate an element memory pool
825  *
826  * @osdev: platform device object
827  * @Handle: to memory pool
828  *
829  * Return: Pointer to the allocated element or NULL if the pool is empty
830  */
831 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
832 {
833 	void *buf = NULL;
834 
835 	if (!pool)
836 		return NULL;
837 
838 	if (prealloc_disabled)
839 		return  qdf_mem_malloc(pool->elem_size);
840 
841 	spin_lock_bh(&pool->lock);
842 
843 	buf = STAILQ_FIRST(&pool->free_list);
844 	if (buf != NULL) {
845 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
846 		pool->free_cnt--;
847 	}
848 
849 	/* TBD: Update free count if debug is enabled */
850 	spin_unlock_bh(&pool->lock);
851 
852 	return buf;
853 }
854 qdf_export_symbol(__qdf_mempool_alloc);
855 
856 /**
857  * __qdf_mempool_free() - Free a memory pool element
858  * @osdev: Platform device object
859  * @pool: Handle to memory pool
860  * @buf: Element to be freed
861  *
862  * Returns: none
863  */
864 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
865 {
866 	if (!pool)
867 		return;
868 
869 
870 	if (prealloc_disabled)
871 		return qdf_mem_free(buf);
872 
873 	spin_lock_bh(&pool->lock);
874 	pool->free_cnt++;
875 
876 	STAILQ_INSERT_TAIL
877 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
878 	spin_unlock_bh(&pool->lock);
879 }
880 qdf_export_symbol(__qdf_mempool_free);
881 
882 /**
883  * qdf_mem_alloc_outline() - allocation QDF memory
884  * @osdev: platform device object
885  * @size: Number of bytes of memory to allocate.
886  *
887  * This function will dynamicallly allocate the specified number of bytes of
888  * memory.
889  *
890  * Return:
891  * Upon successful allocate, returns a non-NULL pointer to the allocated
892  * memory.  If this function is unable to allocate the amount of memory
893  * specified (for any reason) it returns NULL.
894  */
895 void *
896 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
897 {
898 	return qdf_mem_malloc(size);
899 }
900 qdf_export_symbol(qdf_mem_alloc_outline);
901 
902 /**
903  * qdf_mem_free_outline() - QDF memory free API
904  * @ptr: Pointer to the starting address of the memory to be free'd.
905  *
906  * This function will free the memory pointed to by 'ptr'. It also checks
907  * is memory is corrupted or getting double freed and panic.
908  *
909  * Return: none
910  */
911 void
912 qdf_mem_free_outline(void *buf)
913 {
914 	qdf_mem_free(buf);
915 }
916 qdf_export_symbol(qdf_mem_free_outline);
917 
918 /**
919  * qdf_mem_zero_outline() - zero out memory
920  * @buf: pointer to memory that will be set to zero
921  * @size: number of bytes zero
922  *
923  * This function sets the memory location to all zeros, essentially clearing
924  * the memory.
925  *
926  * Return: none
927  */
928 void
929 qdf_mem_zero_outline(void *buf, qdf_size_t size)
930 {
931 	qdf_mem_zero(buf, size);
932 }
933 qdf_export_symbol(qdf_mem_zero_outline);
934 
935 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
936 /**
937  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
938  * @size: the number of bytes to allocate
939  *
940  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
941  * a chunk of pre-allocated memory. If size if less than or equal to
942  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
943  *
944  * Return: NULL on failure, non-NULL on success
945  */
946 static void *qdf_mem_prealloc_get(size_t size)
947 {
948 	void *ptr;
949 
950 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
951 		return NULL;
952 
953 	ptr = wcnss_prealloc_get(size);
954 	if (!ptr)
955 		return NULL;
956 
957 	memset(ptr, 0, size);
958 
959 	return ptr;
960 }
961 
962 static inline bool qdf_mem_prealloc_put(void *ptr)
963 {
964 	return wcnss_prealloc_put(ptr);
965 }
966 #else
967 static inline void *qdf_mem_prealloc_get(size_t size)
968 {
969 	return NULL;
970 }
971 
972 static inline bool qdf_mem_prealloc_put(void *ptr)
973 {
974 	return false;
975 }
976 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
977 
978 static int qdf_mem_malloc_flags(void)
979 {
980 	if (in_interrupt() || irqs_disabled() || in_atomic())
981 		return GFP_ATOMIC;
982 
983 	return GFP_KERNEL;
984 }
985 
986 /* External Function implementation */
987 #ifdef MEMORY_DEBUG
988 
989 /**
990  * qdf_mem_debug_init() - initialize qdf memory debug functionality
991  *
992  * Return: none
993  */
994 static void qdf_mem_debug_init(void)
995 {
996 	int i;
997 
998 	/* Initalizing the list with maximum size of 60000 */
999 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1000 		qdf_list_create(&qdf_mem_domains[i], 60000);
1001 	qdf_spinlock_create(&qdf_mem_list_lock);
1002 
1003 	/* dma */
1004 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1005 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1006 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1007 
1008 	/* skb */
1009 	qdf_net_buf_debug_init();
1010 }
1011 
1012 static uint32_t
1013 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1014 			       qdf_list_t *mem_list)
1015 {
1016 	if (qdf_list_empty(mem_list))
1017 		return 0;
1018 
1019 	qdf_err("Memory leaks detected in %s domain!",
1020 		qdf_debug_domain_name(domain));
1021 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1022 
1023 	return mem_list->count;
1024 }
1025 
1026 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1027 {
1028 	uint32_t leak_count = 0;
1029 	int i;
1030 
1031 	/* detect and print leaks */
1032 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1033 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1034 
1035 	if (leak_count)
1036 		panic("%u fatal memory leaks detected!", leak_count);
1037 }
1038 
1039 /**
1040  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1041  *
1042  * Return: none
1043  */
1044 static void qdf_mem_debug_exit(void)
1045 {
1046 	int i;
1047 
1048 	/* skb */
1049 	qdf_net_buf_debug_exit();
1050 
1051 	/* mem */
1052 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1053 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1054 		qdf_list_destroy(qdf_mem_list_get(i));
1055 
1056 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1057 
1058 	/* dma */
1059 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1060 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1061 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1062 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1063 }
1064 
1065 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1066 			   void *caller)
1067 {
1068 	QDF_STATUS status;
1069 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1070 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1071 	struct qdf_mem_header *header;
1072 	void *ptr;
1073 	unsigned long start, duration;
1074 
1075 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1076 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1077 		return NULL;
1078 	}
1079 
1080 	ptr = qdf_mem_prealloc_get(size);
1081 	if (ptr)
1082 		return ptr;
1083 
1084 	start = qdf_mc_timer_get_system_time();
1085 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, qdf_mem_malloc_flags());
1086 	duration = qdf_mc_timer_get_system_time() - start;
1087 
1088 	if (duration > QDF_MEM_WARN_THRESHOLD)
1089 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1090 			 duration, size, file, line);
1091 
1092 	if (!header) {
1093 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1094 		return NULL;
1095 	}
1096 
1097 	qdf_mem_header_init(header, size, file, line, caller);
1098 	qdf_mem_trailer_init(header);
1099 	ptr = qdf_mem_get_ptr(header);
1100 
1101 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1102 	status = qdf_list_insert_front(mem_list, &header->node);
1103 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1104 	if (QDF_IS_STATUS_ERROR(status))
1105 		qdf_err("Failed to insert memory header; status %d", status);
1106 
1107 	qdf_mem_kmalloc_inc(size);
1108 
1109 	return ptr;
1110 }
1111 qdf_export_symbol(qdf_mem_malloc_debug);
1112 
1113 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1114 {
1115 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1116 	struct qdf_mem_header *header;
1117 	enum qdf_mem_validation_bitmap error_bitmap;
1118 
1119 	/* freeing a null pointer is valid */
1120 	if (qdf_unlikely(!ptr))
1121 		return;
1122 
1123 	if (qdf_mem_prealloc_put(ptr))
1124 		return;
1125 
1126 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1127 		panic("Failed to free invalid memory location %pK", ptr);
1128 
1129 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1130 	header = qdf_mem_get_header(ptr);
1131 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1132 	error_bitmap |= qdf_mem_trailer_validate(header);
1133 
1134 	if (!error_bitmap) {
1135 		header->freed = true;
1136 		list_del_init(&header->node);
1137 		qdf_mem_list_get(header->domain)->count--;
1138 	}
1139 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1140 
1141 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1142 				    file, line);
1143 
1144 	qdf_mem_kmalloc_dec(header->size);
1145 	kfree(header);
1146 }
1147 qdf_export_symbol(qdf_mem_free_debug);
1148 
1149 void qdf_mem_check_for_leaks(void)
1150 {
1151 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1152 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1153 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1154 	uint32_t leaks_count = 0;
1155 
1156 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1157 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1158 
1159 	if (leaks_count)
1160 		panic("%u fatal memory leaks detected!", leaks_count);
1161 }
1162 
1163 #else
1164 static void qdf_mem_debug_init(void) {}
1165 
1166 static void qdf_mem_debug_exit(void) {}
1167 
1168 /**
1169  * qdf_mem_malloc() - allocation QDF memory
1170  * @size: Number of bytes of memory to allocate.
1171  *
1172  * This function will dynamicallly allocate the specified number of bytes of
1173  * memory.
1174  *
1175  * Return:
1176  * Upon successful allocate, returns a non-NULL pointer to the allocated
1177  * memory.  If this function is unable to allocate the amount of memory
1178  * specified (for any reason) it returns NULL.
1179  */
1180 void *qdf_mem_malloc(size_t size)
1181 {
1182 	void *ptr;
1183 
1184 	ptr = qdf_mem_prealloc_get(size);
1185 	if (ptr)
1186 		return ptr;
1187 
1188 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1189 	if (!ptr)
1190 		return NULL;
1191 
1192 	qdf_mem_kmalloc_inc(ksize(ptr));
1193 
1194 	return ptr;
1195 }
1196 qdf_export_symbol(qdf_mem_malloc);
1197 
1198 /**
1199  * qdf_mem_free() - free QDF memory
1200  * @ptr: Pointer to the starting address of the memory to be free'd.
1201  *
1202  * This function will free the memory pointed to by 'ptr'.
1203  *
1204  * Return: None
1205  */
1206 void qdf_mem_free(void *ptr)
1207 {
1208 	if (ptr == NULL)
1209 		return;
1210 
1211 	if (qdf_mem_prealloc_put(ptr))
1212 		return;
1213 
1214 	qdf_mem_kmalloc_dec(ksize(ptr));
1215 
1216 	kfree(ptr);
1217 }
1218 qdf_export_symbol(qdf_mem_free);
1219 #endif
1220 
1221 /**
1222  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1223  * @osdev: OS device handle pointer
1224  * @pages: Multi page information storage
1225  * @element_size: Each element size
1226  * @element_num: Total number of elements should be allocated
1227  * @memctxt: Memory context
1228  * @cacheable: Coherent memory or cacheable memory
1229  *
1230  * This function will allocate large size of memory over multiple pages.
1231  * Large size of contiguous memory allocation will fail frequently, then
1232  * instead of allocate large memory by one shot, allocate through multiple, non
1233  * contiguous memory and combine pages when actual usage
1234  *
1235  * Return: None
1236  */
1237 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1238 			       struct qdf_mem_multi_page_t *pages,
1239 			       size_t element_size, uint16_t element_num,
1240 			       qdf_dma_context_t memctxt, bool cacheable)
1241 {
1242 	uint16_t page_idx;
1243 	struct qdf_mem_dma_page_t *dma_pages;
1244 	void **cacheable_pages = NULL;
1245 	uint16_t i;
1246 
1247 	pages->num_element_per_page = PAGE_SIZE / element_size;
1248 	if (!pages->num_element_per_page) {
1249 		qdf_print("Invalid page %d or element size %d",
1250 			  (int)PAGE_SIZE, (int)element_size);
1251 		goto out_fail;
1252 	}
1253 
1254 	pages->num_pages = element_num / pages->num_element_per_page;
1255 	if (element_num % pages->num_element_per_page)
1256 		pages->num_pages++;
1257 
1258 	if (cacheable) {
1259 		/* Pages information storage */
1260 		pages->cacheable_pages = qdf_mem_malloc(
1261 			pages->num_pages * sizeof(pages->cacheable_pages));
1262 		if (!pages->cacheable_pages) {
1263 			qdf_print("Cacheable page storage alloc fail");
1264 			goto out_fail;
1265 		}
1266 
1267 		cacheable_pages = pages->cacheable_pages;
1268 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1269 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1270 			if (!cacheable_pages[page_idx]) {
1271 				qdf_print("cacheable page alloc fail, pi %d",
1272 					  page_idx);
1273 				goto page_alloc_fail;
1274 			}
1275 		}
1276 		pages->dma_pages = NULL;
1277 	} else {
1278 		pages->dma_pages = qdf_mem_malloc(
1279 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1280 		if (!pages->dma_pages) {
1281 			qdf_print("dmaable page storage alloc fail");
1282 			goto out_fail;
1283 		}
1284 
1285 		dma_pages = pages->dma_pages;
1286 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1287 			dma_pages->page_v_addr_start =
1288 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1289 					 PAGE_SIZE,
1290 					&dma_pages->page_p_addr);
1291 			if (!dma_pages->page_v_addr_start) {
1292 				qdf_print("dmaable page alloc fail pi %d",
1293 					page_idx);
1294 				goto page_alloc_fail;
1295 			}
1296 			dma_pages->page_v_addr_end =
1297 				dma_pages->page_v_addr_start + PAGE_SIZE;
1298 			dma_pages++;
1299 		}
1300 		pages->cacheable_pages = NULL;
1301 	}
1302 	return;
1303 
1304 page_alloc_fail:
1305 	if (cacheable) {
1306 		for (i = 0; i < page_idx; i++)
1307 			qdf_mem_free(pages->cacheable_pages[i]);
1308 		qdf_mem_free(pages->cacheable_pages);
1309 	} else {
1310 		dma_pages = pages->dma_pages;
1311 		for (i = 0; i < page_idx; i++) {
1312 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1313 				dma_pages->page_v_addr_start,
1314 				dma_pages->page_p_addr, memctxt);
1315 			dma_pages++;
1316 		}
1317 		qdf_mem_free(pages->dma_pages);
1318 	}
1319 
1320 out_fail:
1321 	pages->cacheable_pages = NULL;
1322 	pages->dma_pages = NULL;
1323 	pages->num_pages = 0;
1324 	return;
1325 }
1326 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1327 
1328 /**
1329  * qdf_mem_multi_pages_free() - free large size of kernel memory
1330  * @osdev: OS device handle pointer
1331  * @pages: Multi page information storage
1332  * @memctxt: Memory context
1333  * @cacheable: Coherent memory or cacheable memory
1334  *
1335  * This function will free large size of memory over multiple pages.
1336  *
1337  * Return: None
1338  */
1339 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1340 			      struct qdf_mem_multi_page_t *pages,
1341 			      qdf_dma_context_t memctxt, bool cacheable)
1342 {
1343 	unsigned int page_idx;
1344 	struct qdf_mem_dma_page_t *dma_pages;
1345 
1346 	if (cacheable) {
1347 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1348 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1349 		qdf_mem_free(pages->cacheable_pages);
1350 	} else {
1351 		dma_pages = pages->dma_pages;
1352 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1353 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1354 				dma_pages->page_v_addr_start,
1355 				dma_pages->page_p_addr, memctxt);
1356 			dma_pages++;
1357 		}
1358 		qdf_mem_free(pages->dma_pages);
1359 	}
1360 
1361 	pages->cacheable_pages = NULL;
1362 	pages->dma_pages = NULL;
1363 	pages->num_pages = 0;
1364 	return;
1365 }
1366 qdf_export_symbol(qdf_mem_multi_pages_free);
1367 
1368 /**
1369  * qdf_mem_multi_page_link() - Make links for multi page elements
1370  * @osdev: OS device handle pointer
1371  * @pages: Multi page information storage
1372  * @elem_size: Single element size
1373  * @elem_count: elements count should be linked
1374  * @cacheable: Coherent memory or cacheable memory
1375  *
1376  * This function will make links for multi page allocated structure
1377  *
1378  * Return: 0 success
1379  */
1380 int qdf_mem_multi_page_link(qdf_device_t osdev,
1381 		struct qdf_mem_multi_page_t *pages,
1382 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1383 {
1384 	uint16_t i, i_int;
1385 	void *page_info;
1386 	void **c_elem = NULL;
1387 	uint32_t num_link = 0;
1388 
1389 	for (i = 0; i < pages->num_pages; i++) {
1390 		if (cacheable)
1391 			page_info = pages->cacheable_pages[i];
1392 		else
1393 			page_info = pages->dma_pages[i].page_v_addr_start;
1394 
1395 		if (!page_info)
1396 			return -ENOMEM;
1397 
1398 		c_elem = (void **)page_info;
1399 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1400 			if (i_int == (pages->num_element_per_page - 1)) {
1401 				if (cacheable)
1402 					*c_elem = pages->
1403 						cacheable_pages[i + 1];
1404 				else
1405 					*c_elem = pages->
1406 						dma_pages[i + 1].
1407 							page_v_addr_start;
1408 				num_link++;
1409 				break;
1410 			} else {
1411 				*c_elem =
1412 					(void *)(((char *)c_elem) + elem_size);
1413 			}
1414 			num_link++;
1415 			c_elem = (void **)*c_elem;
1416 
1417 			/* Last link established exit */
1418 			if (num_link == (elem_count - 1))
1419 				break;
1420 		}
1421 	}
1422 
1423 	if (c_elem)
1424 		*c_elem = NULL;
1425 
1426 	return 0;
1427 }
1428 qdf_export_symbol(qdf_mem_multi_page_link);
1429 
1430 /**
1431  * qdf_mem_copy() - copy memory
1432  * @dst_addr: Pointer to destination memory location (to copy to)
1433  * @src_addr: Pointer to source memory location (to copy from)
1434  * @num_bytes: Number of bytes to copy.
1435  *
1436  * Copy host memory from one location to another, similar to memcpy in
1437  * standard C.  Note this function does not specifically handle overlapping
1438  * source and destination memory locations.  Calling this function with
1439  * overlapping source and destination memory locations will result in
1440  * unpredictable results.  Use qdf_mem_move() if the memory locations
1441  * for the source and destination are overlapping (or could be overlapping!)
1442  *
1443  * Return: none
1444  */
1445 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1446 {
1447 	if (0 == num_bytes) {
1448 		/* special case where dst_addr or src_addr can be NULL */
1449 		return;
1450 	}
1451 
1452 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1453 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1454 			  "%s called with NULL parameter, source:%pK destination:%pK",
1455 			  __func__, src_addr, dst_addr);
1456 		QDF_ASSERT(0);
1457 		return;
1458 	}
1459 	memcpy(dst_addr, src_addr, num_bytes);
1460 }
1461 qdf_export_symbol(qdf_mem_copy);
1462 
1463 /**
1464  * qdf_mem_zero() - zero out memory
1465  * @ptr: pointer to memory that will be set to zero
1466  * @num_bytes: number of bytes zero
1467  *
1468  * This function sets the memory location to all zeros, essentially clearing
1469  * the memory.
1470  *
1471  * Return: None
1472  */
1473 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1474 {
1475 	if (0 == num_bytes) {
1476 		/* special case where ptr can be NULL */
1477 		return;
1478 	}
1479 
1480 	if (ptr == NULL) {
1481 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1482 			  "%s called with NULL parameter ptr", __func__);
1483 		return;
1484 	}
1485 	memset(ptr, 0, num_bytes);
1486 }
1487 qdf_export_symbol(qdf_mem_zero);
1488 
1489 /**
1490  * qdf_mem_set() - set (fill) memory with a specified byte value.
1491  * @ptr: Pointer to memory that will be set
1492  * @num_bytes: Number of bytes to be set
1493  * @value: Byte set in memory
1494  *
1495  * Return: None
1496  */
1497 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1498 {
1499 	if (ptr == NULL) {
1500 		qdf_print("%s called with NULL parameter ptr", __func__);
1501 		return;
1502 	}
1503 	memset(ptr, value, num_bytes);
1504 }
1505 qdf_export_symbol(qdf_mem_set);
1506 
1507 /**
1508  * qdf_mem_move() - move memory
1509  * @dst_addr: pointer to destination memory location (to move to)
1510  * @src_addr: pointer to source memory location (to move from)
1511  * @num_bytes: number of bytes to move.
1512  *
1513  * Move host memory from one location to another, similar to memmove in
1514  * standard C.  Note this function *does* handle overlapping
1515  * source and destination memory locations.
1516 
1517  * Return: None
1518  */
1519 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1520 {
1521 	if (0 == num_bytes) {
1522 		/* special case where dst_addr or src_addr can be NULL */
1523 		return;
1524 	}
1525 
1526 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1527 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1528 			  "%s called with NULL parameter, source:%pK destination:%pK",
1529 			  __func__, src_addr, dst_addr);
1530 		QDF_ASSERT(0);
1531 		return;
1532 	}
1533 	memmove(dst_addr, src_addr, num_bytes);
1534 }
1535 qdf_export_symbol(qdf_mem_move);
1536 
1537 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1538 /**
1539  * qdf_mem_dma_alloc() - allocates memory for dma
1540  * @osdev: OS device handle
1541  * @dev: Pointer to device handle
1542  * @size: Size to be allocated
1543  * @phy_addr: Physical address
1544  *
1545  * Return: pointer of allocated memory or null if memory alloc fails
1546  */
1547 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1548 				      qdf_size_t size,
1549 				      qdf_dma_addr_t *phy_addr)
1550 {
1551 	void *vaddr;
1552 
1553 	vaddr = qdf_mem_malloc(size);
1554 	*phy_addr = ((uintptr_t) vaddr);
1555 	/* using this type conversion to suppress "cast from pointer to integer
1556 	 * of different size" warning on some platforms
1557 	 */
1558 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1559 	return vaddr;
1560 }
1561 
1562 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1563 #define QCA8074_RAM_BASE 0x50000000
1564 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1565 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1566 			qdf_dma_addr_t *phy_addr)
1567 {
1568 	void *vaddr = NULL;
1569 	int i;
1570 
1571 	*phy_addr = 0;
1572 
1573 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1574 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1575 					   qdf_mem_malloc_flags());
1576 
1577 		if (!vaddr) {
1578 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1579 			return NULL;
1580 		}
1581 
1582 		if (*phy_addr >= QCA8074_RAM_BASE)
1583 			return vaddr;
1584 
1585 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1586 	}
1587 
1588 	return NULL;
1589 }
1590 
1591 #else
1592 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1593 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1594 {
1595 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1596 }
1597 #endif
1598 
1599 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1600 static inline void
1601 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1602 {
1603 	qdf_mem_free(vaddr);
1604 }
1605 #else
1606 
1607 static inline void
1608 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1609 {
1610 	dma_free_coherent(dev, size, vaddr, paddr);
1611 }
1612 #endif
1613 
1614 #ifdef MEMORY_DEBUG
1615 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1616 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1617 				     const char *file, uint32_t line,
1618 				     void *caller)
1619 {
1620 	QDF_STATUS status;
1621 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1622 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1623 	struct qdf_mem_header *header;
1624 	void *vaddr;
1625 
1626 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1627 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1628 		return NULL;
1629 	}
1630 
1631 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1632 				   paddr);
1633 
1634 	if (!vaddr) {
1635 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1636 		return NULL;
1637 	}
1638 
1639 	header = qdf_mem_dma_get_header(vaddr, size);
1640 	/* For DMA buffers we only add trailers, this function will init
1641 	 * the header structure at the tail
1642 	 * Prefix the header into DMA buffer causes SMMU faults, so
1643 	 * do not prefix header into the DMA buffers
1644 	 */
1645 	qdf_mem_header_init(header, size, file, line, caller);
1646 
1647 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1648 	status = qdf_list_insert_front(mem_list, &header->node);
1649 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1650 	if (QDF_IS_STATUS_ERROR(status))
1651 		qdf_err("Failed to insert memory header; status %d", status);
1652 
1653 	qdf_mem_dma_inc(size);
1654 
1655 	return vaddr;
1656 }
1657 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1658 
1659 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1660 				   qdf_size_t size, void *vaddr,
1661 				   qdf_dma_addr_t paddr,
1662 				   qdf_dma_context_t memctx,
1663 				   const char *file, uint32_t line)
1664 {
1665 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1666 	struct qdf_mem_header *header;
1667 	enum qdf_mem_validation_bitmap error_bitmap;
1668 
1669 	/* freeing a null pointer is valid */
1670 	if (qdf_unlikely(!vaddr))
1671 		return;
1672 
1673 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1674 	/* For DMA buffers we only add trailers, this function will retrieve
1675 	 * the header structure at the tail
1676 	 * Prefix the header into DMA buffer causes SMMU faults, so
1677 	 * do not prefix header into the DMA buffers
1678 	 */
1679 	header = qdf_mem_dma_get_header(vaddr, size);
1680 	error_bitmap = qdf_mem_header_validate(header, domain);
1681 	if (!error_bitmap) {
1682 		header->freed = true;
1683 		list_del_init(&header->node);
1684 		qdf_mem_dma_list(header->domain)->count--;
1685 	}
1686 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1687 
1688 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1689 
1690 	qdf_mem_dma_dec(header->size);
1691 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1692 }
1693 qdf_export_symbol(qdf_mem_free_consistent_debug);
1694 
1695 #else
1696 
1697 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1698 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1699 {
1700 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1701 
1702 	if (vaddr)
1703 		qdf_mem_dma_inc(size);
1704 
1705 	return vaddr;
1706 }
1707 qdf_export_symbol(qdf_mem_alloc_consistent);
1708 
1709 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1710 			     qdf_size_t size, void *vaddr,
1711 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1712 {
1713 	qdf_mem_dma_dec(size);
1714 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1715 }
1716 qdf_export_symbol(qdf_mem_free_consistent);
1717 
1718 #endif /* MEMORY_DEBUG */
1719 
1720 /**
1721  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1722  * @osdev: OS device handle
1723  * @bus_addr: dma address to give to the device
1724  * @size: Size of the memory block
1725  * @direction: direction data will be DMAed
1726  *
1727  * Assign memory to the remote device.
1728  * The cache lines are flushed to ram or invalidated as needed.
1729  *
1730  * Return: none
1731  */
1732 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1733 					qdf_dma_addr_t bus_addr,
1734 					qdf_size_t size,
1735 					enum dma_data_direction direction)
1736 {
1737 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1738 }
1739 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1740 
1741 /**
1742  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1743  * @osdev: OS device handle
1744  * @bus_addr: dma address to give to the cpu
1745  * @size: Size of the memory block
1746  * @direction: direction data will be DMAed
1747  *
1748  * Assign memory to the CPU.
1749  *
1750  * Return: none
1751  */
1752 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1753 				     qdf_dma_addr_t bus_addr,
1754 				     qdf_size_t size,
1755 				     enum dma_data_direction direction)
1756 {
1757 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1758 }
1759 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1760 
1761 void qdf_mem_init(void)
1762 {
1763 	qdf_mem_debug_init();
1764 	qdf_mem_debugfs_init();
1765 	qdf_mem_debug_debugfs_init();
1766 }
1767 qdf_export_symbol(qdf_mem_init);
1768 
1769 void qdf_mem_exit(void)
1770 {
1771 	qdf_mem_debug_debugfs_exit();
1772 	qdf_mem_debugfs_exit();
1773 	qdf_mem_debug_exit();
1774 }
1775 qdf_export_symbol(qdf_mem_exit);
1776 
1777 /**
1778  * qdf_ether_addr_copy() - copy an Ethernet address
1779  *
1780  * @dst_addr: A six-byte array Ethernet address destination
1781  * @src_addr: A six-byte array Ethernet address source
1782  *
1783  * Please note: dst & src must both be aligned to u16.
1784  *
1785  * Return: none
1786  */
1787 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1788 {
1789 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1790 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1791 			  "%s called with NULL parameter, source:%pK destination:%pK",
1792 			  __func__, src_addr, dst_addr);
1793 		QDF_ASSERT(0);
1794 		return;
1795 	}
1796 	ether_addr_copy(dst_addr, src_addr);
1797 }
1798 qdf_export_symbol(qdf_ether_addr_copy);
1799 
1800