xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_mem
30  * This file provides OS dependent memory management APIs
31  */
32 
33 #include "qdf_debugfs.h"
34 #include "qdf_mem.h"
35 #include "qdf_nbuf.h"
36 #include "qdf_lock.h"
37 #include "qdf_mc_timer.h"
38 #include "qdf_module.h"
39 #include <qdf_trace.h>
40 #include "qdf_atomic.h"
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/string.h>
44 
45 #ifdef CONFIG_MCL
46 #include <host_diag_core_event.h>
47 #else
48 #define host_log_low_resource_failure(code) do {} while (0)
49 #endif
50 
51 #if defined(CONFIG_CNSS)
52 #include <net/cnss.h>
53 #endif
54 
55 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
56 #include <net/cnss_prealloc.h>
57 #endif
58 
59 #ifdef MEMORY_DEBUG
60 #include "qdf_debug_domain.h"
61 #include <qdf_list.h>
62 
63 /* Preprocessor Definitions and Constants */
64 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
65 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
66 
67 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
68 static qdf_spinlock_t qdf_mem_list_lock;
69 
70 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
71 static qdf_spinlock_t qdf_mem_dma_list_lock;
72 
73 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
74 {
75 	return &qdf_mem_domains[domain];
76 }
77 
78 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
79 {
80 	return &qdf_mem_dma_domains[domain];
81 }
82 
83 /**
84  * struct qdf_mem_header - memory object to dubug
85  * @node: node to the list
86  * @domain: the active memory domain at time of allocation
87  * @freed: flag set during free, used to detect double frees
88  *	Use uint8_t so we can detect corruption
89  * @file: name of the file the allocation was made from
90  * @line: line number of the file the allocation was made from
91  * @size: size of the allocation in bytes
92  * @caller: Caller of the function for which memory is allocated
93  * @header: a known value, used to detect out-of-bounds access
94  */
95 struct qdf_mem_header {
96 	qdf_list_node_t node;
97 	enum qdf_debug_domain domain;
98 	uint8_t freed;
99 	const char *file;
100 	uint32_t line;
101 	uint32_t size;
102 	void *caller;
103 	uint64_t header;
104 };
105 
106 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
107 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
108 
109 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
110 {
111 	return (struct qdf_mem_header *)ptr - 1;
112 }
113 
114 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
115 							    qdf_size_t size)
116 {
117 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
118 }
119 
120 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
121 {
122 	return (uint64_t *)((void *)(header + 1) + header->size);
123 }
124 
125 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
126 {
127 	return (void *)(header + 1);
128 }
129 
130 /* number of bytes needed for the qdf memory debug information */
131 #define QDF_MEM_DEBUG_SIZE \
132 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
133 
134 /* number of bytes needed for the qdf dma memory debug information */
135 #define QDF_DMA_MEM_DEBUG_SIZE \
136 	(sizeof(struct qdf_mem_header))
137 
138 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
139 {
140 	QDF_BUG(header);
141 	if (!header)
142 		return;
143 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
144 }
145 
146 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
147 				const char *file, uint32_t line, void *caller)
148 {
149 	QDF_BUG(header);
150 	if (!header)
151 		return;
152 
153 	header->domain = qdf_debug_domain_get();
154 	header->freed = false;
155 	header->file = file;
156 	header->line = line;
157 	header->size = size;
158 	header->caller = caller;
159 	header->header = WLAN_MEM_HEADER;
160 }
161 
162 enum qdf_mem_validation_bitmap {
163 	QDF_MEM_BAD_HEADER = 1 << 0,
164 	QDF_MEM_BAD_TRAILER = 1 << 1,
165 	QDF_MEM_BAD_SIZE = 1 << 2,
166 	QDF_MEM_DOUBLE_FREE = 1 << 3,
167 	QDF_MEM_BAD_FREED = 1 << 4,
168 	QDF_MEM_BAD_NODE = 1 << 5,
169 	QDF_MEM_BAD_DOMAIN = 1 << 6,
170 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
171 };
172 
173 /**
174  * qdf_mem_validate_list_node() - validate that the node is in a list
175  * @qdf_node: node to check for being in a list
176  *
177  * Return: true if the node validly linked in an anchored doubly linked list
178  */
179 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
180 {
181 	struct list_head *node = qdf_node;
182 
183 	/*
184 	 * if the node is an empty list, it is not tied to an anchor node
185 	 * and must have been removed with list_del_init
186 	 */
187 	if (list_empty(node))
188 		return false;
189 
190 	if (!node->prev || !node->next)
191 		return false;
192 
193 	if (node->prev->next != node || node->next->prev != node)
194 		return false;
195 
196 	return true;
197 }
198 
199 static enum qdf_mem_validation_bitmap
200 qdf_mem_trailer_validate(struct qdf_mem_header *header)
201 {
202 	enum qdf_mem_validation_bitmap error_bitmap = 0;
203 
204 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
205 		error_bitmap |= QDF_MEM_BAD_TRAILER;
206 	return error_bitmap;
207 }
208 
209 static enum qdf_mem_validation_bitmap
210 qdf_mem_header_validate(struct qdf_mem_header *header,
211 			enum qdf_debug_domain domain)
212 {
213 	enum qdf_mem_validation_bitmap error_bitmap = 0;
214 
215 	if (header->header != WLAN_MEM_HEADER)
216 		error_bitmap |= QDF_MEM_BAD_HEADER;
217 
218 	if (header->size > QDF_MEM_MAX_MALLOC)
219 		error_bitmap |= QDF_MEM_BAD_SIZE;
220 
221 	if (header->freed == true)
222 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
223 	else if (header->freed)
224 		error_bitmap |= QDF_MEM_BAD_FREED;
225 
226 	if (!qdf_mem_validate_list_node(&header->node))
227 		error_bitmap |= QDF_MEM_BAD_NODE;
228 
229 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
230 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
231 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
232 	else if (header->domain != domain)
233 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
234 
235 	return error_bitmap;
236 }
237 
238 static void
239 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
240 			    enum qdf_debug_domain current_domain,
241 			    enum qdf_mem_validation_bitmap error_bitmap,
242 			    const char *file,
243 			    uint32_t line)
244 {
245 	if (!error_bitmap)
246 		return;
247 
248 	if (error_bitmap & QDF_MEM_BAD_HEADER)
249 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
250 			header->header, WLAN_MEM_HEADER);
251 
252 	if (error_bitmap & QDF_MEM_BAD_SIZE)
253 		qdf_err("Corrupted memory size %u (expected < %d)",
254 			header->size, QDF_MEM_MAX_MALLOC);
255 
256 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
257 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
258 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
259 
260 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
261 		qdf_err("Memory has previously been freed");
262 
263 	if (error_bitmap & QDF_MEM_BAD_FREED)
264 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
265 
266 	if (error_bitmap & QDF_MEM_BAD_NODE)
267 		qdf_err("Corrupted memory header node or double free");
268 
269 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
270 		qdf_err("Corrupted memory domain 0x%x", header->domain);
271 
272 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
273 		qdf_err("Memory domain mismatch; found %s(%d), expected %s(%d)",
274 			qdf_debug_domain_name(header->domain), header->domain,
275 			qdf_debug_domain_name(current_domain), current_domain);
276 
277 	panic("A fatal memory error was detected @ %s:%d",
278 	      kbasename(file), line);
279 }
280 #endif /* MEMORY_DEBUG */
281 
282 u_int8_t prealloc_disabled = 1;
283 qdf_declare_param(prealloc_disabled, byte);
284 EXPORT_SYMBOL(prealloc_disabled);
285 
286 #if defined WLAN_DEBUGFS
287 
288 /* Debugfs root directory for qdf_mem */
289 static struct dentry *qdf_mem_debugfs_root;
290 
291 /**
292  * struct __qdf_mem_stat - qdf memory statistics
293  * @kmalloc:	total kmalloc allocations
294  * @dma:	total dma allocations
295  * @skb:	total skb allocations
296  */
297 static struct __qdf_mem_stat {
298 	qdf_atomic_t kmalloc;
299 	qdf_atomic_t dma;
300 	qdf_atomic_t skb;
301 } qdf_mem_stat;
302 
303 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
304 {
305 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
306 }
307 
308 static inline void qdf_mem_dma_inc(qdf_size_t size)
309 {
310 	qdf_atomic_add(size, &qdf_mem_stat.dma);
311 }
312 
313 void qdf_mem_skb_inc(qdf_size_t size)
314 {
315 	qdf_atomic_add(size, &qdf_mem_stat.skb);
316 }
317 
318 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
319 {
320 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
321 }
322 
323 static inline void qdf_mem_dma_dec(qdf_size_t size)
324 {
325 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
326 }
327 
328 void qdf_mem_skb_dec(qdf_size_t size)
329 {
330 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
331 }
332 
333 #ifdef MEMORY_DEBUG
334 static int qdf_err_printer(void *priv, const char *fmt, ...)
335 {
336 	va_list args;
337 
338 	va_start(args, fmt);
339 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
340 	va_end(args);
341 
342 	return 0;
343 }
344 
345 static int seq_printf_printer(void *priv, const char *fmt, ...)
346 {
347 	struct seq_file *file = priv;
348 	va_list args;
349 
350 	va_start(args, fmt);
351 	seq_vprintf(file, fmt, args);
352 	seq_puts(file, "\n");
353 	va_end(args);
354 
355 	return 0;
356 }
357 
358 /**
359  * struct __qdf_mem_info - memory statistics
360  * @file: the file which allocated memory
361  * @line: the line at which allocation happened
362  * @size: the size of allocation
363  * @caller: Address of the caller function
364  * @count: how many allocations of same type
365  *
366  */
367 struct __qdf_mem_info {
368 	const char *file;
369 	uint32_t line;
370 	uint32_t size;
371 	void *caller;
372 	uint32_t count;
373 };
374 
375 /*
376  * The table depth defines the de-duplication proximity scope.
377  * A deeper table takes more time, so choose any optimum value.
378  */
379 #define QDF_MEM_STAT_TABLE_SIZE 8
380 
381 /**
382  * qdf_mem_domain_print_header() - memory domain header print logic
383  * @print: the print adapter function
384  * @print_priv: the private data to be consumed by @print
385  *
386  * Return: None
387  */
388 static void qdf_mem_domain_print_header(qdf_abstract_print print,
389 					void *print_priv)
390 {
391 	print(print_priv,
392 	      "--------------------------------------------------------------");
393 	print(print_priv, " count    size     total    filename     caller");
394 	print(print_priv,
395 	      "--------------------------------------------------------------");
396 }
397 
398 /**
399  * qdf_mem_meta_table_print() - memory metadata table print logic
400  * @table: the memory metadata table to print
401  * @print: the print adapter function
402  * @print_priv: the private data to be consumed by @print
403  *
404  * Return: None
405  */
406 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
407 				     qdf_abstract_print print,
408 				     void *print_priv)
409 {
410 	int i;
411 
412 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
413 		if (!table[i].count)
414 			break;
415 
416 		print(print_priv,
417 		      "%6u x %5u = %7uB @ %s:%u   %pS",
418 		      table[i].count,
419 		      table[i].size,
420 		      table[i].count * table[i].size,
421 		      kbasename(table[i].file),
422 		      table[i].line, table[i].caller);
423 	}
424 }
425 
426 /**
427  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
428  * @table: the memory metadata table to insert into
429  * @meta: the memory metadata to insert
430  *
431  * Return: true if the table is full after inserting, false otherwise
432  */
433 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
434 				      struct qdf_mem_header *meta)
435 {
436 	int i;
437 
438 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
439 		if (!table[i].count) {
440 			table[i].file = meta->file;
441 			table[i].line = meta->line;
442 			table[i].size = meta->size;
443 			table[i].count = 1;
444 			table[i].caller = meta->caller;
445 			break;
446 		}
447 
448 		if (table[i].file == meta->file &&
449 		    table[i].line == meta->line &&
450 		    table[i].size == meta->size &&
451 		    table[i].caller == meta->caller) {
452 			table[i].count++;
453 			break;
454 		}
455 	}
456 
457 	/* return true if the table is now full */
458 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
459 }
460 
461 /**
462  * qdf_mem_domain_print() - output agnostic memory domain print logic
463  * @domain: the memory domain to print
464  * @print: the print adapter function
465  * @print_priv: the private data to be consumed by @print
466  *
467  * Return: None
468  */
469 static void qdf_mem_domain_print(qdf_list_t *domain,
470 				 qdf_abstract_print print,
471 				 void *print_priv)
472 {
473 	QDF_STATUS status;
474 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
475 	qdf_list_node_t *node;
476 
477 	qdf_mem_zero(table, sizeof(table));
478 	qdf_mem_domain_print_header(print, print_priv);
479 
480 	/* hold lock while inserting to avoid use-after free of the metadata */
481 	qdf_spin_lock(&qdf_mem_list_lock);
482 	status = qdf_list_peek_front(domain, &node);
483 	while (QDF_IS_STATUS_SUCCESS(status)) {
484 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
485 		bool is_full = qdf_mem_meta_table_insert(table, meta);
486 
487 		qdf_spin_unlock(&qdf_mem_list_lock);
488 
489 		if (is_full) {
490 			qdf_mem_meta_table_print(table, print, print_priv);
491 			qdf_mem_zero(table, sizeof(table));
492 		}
493 
494 		qdf_spin_lock(&qdf_mem_list_lock);
495 		status = qdf_list_peek_next(domain, node, &node);
496 	}
497 	qdf_spin_unlock(&qdf_mem_list_lock);
498 
499 	qdf_mem_meta_table_print(table, print, print_priv);
500 }
501 
502 /**
503  * qdf_mem_seq_start() - sequential callback to start
504  * @seq: seq_file handle
505  * @pos: The start position of the sequence
506  *
507  * Return: iterator pointer, or NULL if iteration is complete
508  */
509 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
510 {
511 	enum qdf_debug_domain domain = *pos;
512 
513 	if (!qdf_debug_domain_valid(domain))
514 		return NULL;
515 
516 	/* just use the current position as our iterator */
517 	return pos;
518 }
519 
520 /**
521  * qdf_mem_seq_next() - next sequential callback
522  * @seq: seq_file handle
523  * @v: the current iterator
524  * @pos: the current position
525  *
526  * Get the next node and release previous node.
527  *
528  * Return: iterator pointer, or NULL if iteration is complete
529  */
530 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
531 {
532 	++*pos;
533 
534 	return qdf_mem_seq_start(seq, pos);
535 }
536 
537 /**
538  * qdf_mem_seq_stop() - stop sequential callback
539  * @seq: seq_file handle
540  * @v: current iterator
541  *
542  * Return: None
543  */
544 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
545 
546 /**
547  * qdf_mem_seq_show() - print sequential callback
548  * @seq: seq_file handle
549  * @v: current iterator
550  *
551  * Return: 0 - success
552  */
553 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
554 {
555 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
556 
557 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
558 		   qdf_debug_domain_name(domain_id), domain_id);
559 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
560 			     seq_printf_printer, seq);
561 
562 	return 0;
563 }
564 
565 /* sequential file operation table */
566 static const struct seq_operations qdf_mem_seq_ops = {
567 	.start = qdf_mem_seq_start,
568 	.next  = qdf_mem_seq_next,
569 	.stop  = qdf_mem_seq_stop,
570 	.show  = qdf_mem_seq_show,
571 };
572 
573 
574 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
575 {
576 	return seq_open(file, &qdf_mem_seq_ops);
577 }
578 
579 /* debugfs file operation table */
580 static const struct file_operations fops_qdf_mem_debugfs = {
581 	.owner = THIS_MODULE,
582 	.open = qdf_mem_debugfs_open,
583 	.read = seq_read,
584 	.llseek = seq_lseek,
585 	.release = seq_release,
586 };
587 
588 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
589 {
590 	if (!qdf_mem_debugfs_root)
591 		return QDF_STATUS_E_FAILURE;
592 
593 	debugfs_create_file("list",
594 			    S_IRUSR,
595 			    qdf_mem_debugfs_root,
596 			    NULL,
597 			    &fops_qdf_mem_debugfs);
598 
599 	return QDF_STATUS_SUCCESS;
600 }
601 
602 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
603 {
604 	return QDF_STATUS_SUCCESS;
605 }
606 
607 #else /* MEMORY_DEBUG */
608 
609 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
610 {
611 	return QDF_STATUS_E_NOSUPPORT;
612 }
613 
614 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
615 {
616 	return QDF_STATUS_E_NOSUPPORT;
617 }
618 
619 #endif /* MEMORY_DEBUG */
620 
621 
622 static void qdf_mem_debugfs_exit(void)
623 {
624 	debugfs_remove_recursive(qdf_mem_debugfs_root);
625 	qdf_mem_debugfs_root = NULL;
626 }
627 
628 static QDF_STATUS qdf_mem_debugfs_init(void)
629 {
630 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
631 
632 	if (!qdf_debugfs_root)
633 		return QDF_STATUS_E_FAILURE;
634 
635 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
636 
637 	if (!qdf_mem_debugfs_root)
638 		return QDF_STATUS_E_FAILURE;
639 
640 
641 	debugfs_create_atomic_t("kmalloc",
642 				S_IRUSR,
643 				qdf_mem_debugfs_root,
644 				&qdf_mem_stat.kmalloc);
645 
646 	debugfs_create_atomic_t("dma",
647 				S_IRUSR,
648 				qdf_mem_debugfs_root,
649 				&qdf_mem_stat.dma);
650 
651 	debugfs_create_atomic_t("skb",
652 				S_IRUSR,
653 				qdf_mem_debugfs_root,
654 				&qdf_mem_stat.skb);
655 
656 	return QDF_STATUS_SUCCESS;
657 }
658 
659 #else /* WLAN_DEBUGFS */
660 
661 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
662 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
663 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
664 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
665 
666 
667 static QDF_STATUS qdf_mem_debugfs_init(void)
668 {
669 	return QDF_STATUS_E_NOSUPPORT;
670 }
671 static void qdf_mem_debugfs_exit(void) {}
672 
673 
674 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
675 {
676 	return QDF_STATUS_E_NOSUPPORT;
677 }
678 
679 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
680 {
681 	return QDF_STATUS_E_NOSUPPORT;
682 }
683 
684 #endif /* WLAN_DEBUGFS */
685 
686 /**
687  * __qdf_mempool_init() - Create and initialize memory pool
688  *
689  * @osdev: platform device object
690  * @pool_addr: address of the pool created
691  * @elem_cnt: no. of elements in pool
692  * @elem_size: size of each pool element in bytes
693  * @flags: flags
694  *
695  * return: Handle to memory pool or NULL if allocation failed
696  */
697 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
698 		       int elem_cnt, size_t elem_size, u_int32_t flags)
699 {
700 	__qdf_mempool_ctxt_t *new_pool = NULL;
701 	u_int32_t align = L1_CACHE_BYTES;
702 	unsigned long aligned_pool_mem;
703 	int pool_id;
704 	int i;
705 
706 	if (prealloc_disabled) {
707 		/* TBD: We can maintain a list of pools in qdf_device_t
708 		 * to help debugging
709 		 * when pre-allocation is not enabled
710 		 */
711 		new_pool = (__qdf_mempool_ctxt_t *)
712 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
713 		if (new_pool == NULL)
714 			return QDF_STATUS_E_NOMEM;
715 
716 		memset(new_pool, 0, sizeof(*new_pool));
717 		/* TBD: define flags for zeroing buffers etc */
718 		new_pool->flags = flags;
719 		new_pool->elem_size = elem_size;
720 		new_pool->max_elem = elem_cnt;
721 		*pool_addr = new_pool;
722 		return 0;
723 	}
724 
725 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
726 		if (osdev->mem_pool[pool_id] == NULL)
727 			break;
728 	}
729 
730 	if (pool_id == MAX_MEM_POOLS)
731 		return -ENOMEM;
732 
733 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
734 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
735 	if (new_pool == NULL)
736 		return -ENOMEM;
737 
738 	memset(new_pool, 0, sizeof(*new_pool));
739 	/* TBD: define flags for zeroing buffers etc */
740 	new_pool->flags = flags;
741 	new_pool->pool_id = pool_id;
742 
743 	/* Round up the element size to cacheline */
744 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
745 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
746 				((align)?(align - 1):0);
747 
748 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
749 	if (new_pool->pool_mem == NULL) {
750 			/* TBD: Check if we need get_free_pages above */
751 		kfree(new_pool);
752 		osdev->mem_pool[pool_id] = NULL;
753 		return -ENOMEM;
754 	}
755 
756 	spin_lock_init(&new_pool->lock);
757 
758 	/* Initialize free list */
759 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
760 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
761 	STAILQ_INIT(&new_pool->free_list);
762 
763 	for (i = 0; i < elem_cnt; i++)
764 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
765 			(mempool_elem_t *)(aligned_pool_mem +
766 			(new_pool->elem_size * i)), mempool_entry);
767 
768 
769 	new_pool->free_cnt = elem_cnt;
770 	*pool_addr = new_pool;
771 	return 0;
772 }
773 EXPORT_SYMBOL(__qdf_mempool_init);
774 
775 /**
776  * __qdf_mempool_destroy() - Destroy memory pool
777  * @osdev: platform device object
778  * @Handle: to memory pool
779  *
780  * Returns: none
781  */
782 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
783 {
784 	int pool_id = 0;
785 
786 	if (!pool)
787 		return;
788 
789 	if (prealloc_disabled) {
790 		kfree(pool);
791 		return;
792 	}
793 
794 	pool_id = pool->pool_id;
795 
796 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
797 	kfree(pool->pool_mem);
798 	kfree(pool);
799 	osdev->mem_pool[pool_id] = NULL;
800 }
801 EXPORT_SYMBOL(__qdf_mempool_destroy);
802 
803 /**
804  * __qdf_mempool_alloc() - Allocate an element memory pool
805  *
806  * @osdev: platform device object
807  * @Handle: to memory pool
808  *
809  * Return: Pointer to the allocated element or NULL if the pool is empty
810  */
811 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
812 {
813 	void *buf = NULL;
814 
815 	if (!pool)
816 		return NULL;
817 
818 	if (prealloc_disabled)
819 		return  qdf_mem_malloc(pool->elem_size);
820 
821 	spin_lock_bh(&pool->lock);
822 
823 	buf = STAILQ_FIRST(&pool->free_list);
824 	if (buf != NULL) {
825 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
826 		pool->free_cnt--;
827 	}
828 
829 	/* TBD: Update free count if debug is enabled */
830 	spin_unlock_bh(&pool->lock);
831 
832 	return buf;
833 }
834 EXPORT_SYMBOL(__qdf_mempool_alloc);
835 
836 /**
837  * __qdf_mempool_free() - Free a memory pool element
838  * @osdev: Platform device object
839  * @pool: Handle to memory pool
840  * @buf: Element to be freed
841  *
842  * Returns: none
843  */
844 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
845 {
846 	if (!pool)
847 		return;
848 
849 
850 	if (prealloc_disabled)
851 		return qdf_mem_free(buf);
852 
853 	spin_lock_bh(&pool->lock);
854 	pool->free_cnt++;
855 
856 	STAILQ_INSERT_TAIL
857 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
858 	spin_unlock_bh(&pool->lock);
859 }
860 EXPORT_SYMBOL(__qdf_mempool_free);
861 
862 /**
863  * qdf_mem_alloc_outline() - allocation QDF memory
864  * @osdev: platform device object
865  * @size: Number of bytes of memory to allocate.
866  *
867  * This function will dynamicallly allocate the specified number of bytes of
868  * memory.
869  *
870  * Return:
871  * Upon successful allocate, returns a non-NULL pointer to the allocated
872  * memory.  If this function is unable to allocate the amount of memory
873  * specified (for any reason) it returns NULL.
874  */
875 void *
876 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
877 {
878 	return qdf_mem_malloc(size);
879 }
880 EXPORT_SYMBOL(qdf_mem_alloc_outline);
881 
882 /**
883  * qdf_mem_free_outline() - QDF memory free API
884  * @ptr: Pointer to the starting address of the memory to be free'd.
885  *
886  * This function will free the memory pointed to by 'ptr'. It also checks
887  * is memory is corrupted or getting double freed and panic.
888  *
889  * Return: none
890  */
891 void
892 qdf_mem_free_outline(void *buf)
893 {
894 	qdf_mem_free(buf);
895 }
896 EXPORT_SYMBOL(qdf_mem_free_outline);
897 
898 /**
899  * qdf_mem_zero_outline() - zero out memory
900  * @buf: pointer to memory that will be set to zero
901  * @size: number of bytes zero
902  *
903  * This function sets the memory location to all zeros, essentially clearing
904  * the memory.
905  *
906  * Return: none
907  */
908 void
909 qdf_mem_zero_outline(void *buf, qdf_size_t size)
910 {
911 	qdf_mem_zero(buf, size);
912 }
913 EXPORT_SYMBOL(qdf_mem_zero_outline);
914 
915 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
916 /**
917  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
918  * @size: the number of bytes to allocate
919  *
920  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
921  * a chunk of pre-allocated memory. If size if less than or equal to
922  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
923  *
924  * Return: NULL on failure, non-NULL on success
925  */
926 static void *qdf_mem_prealloc_get(size_t size)
927 {
928 	void *ptr;
929 
930 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
931 		return NULL;
932 
933 	ptr = wcnss_prealloc_get(size);
934 	if (!ptr)
935 		return NULL;
936 
937 	memset(ptr, 0, size);
938 
939 	return ptr;
940 }
941 
942 static inline bool qdf_mem_prealloc_put(void *ptr)
943 {
944 	return wcnss_prealloc_put(ptr);
945 }
946 #else
947 static inline void *qdf_mem_prealloc_get(size_t size)
948 {
949 	return NULL;
950 }
951 
952 static inline bool qdf_mem_prealloc_put(void *ptr)
953 {
954 	return false;
955 }
956 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
957 
958 static int qdf_mem_malloc_flags(void)
959 {
960 	if (in_interrupt() || irqs_disabled() || in_atomic())
961 		return GFP_ATOMIC;
962 
963 	return GFP_KERNEL;
964 }
965 
966 /* External Function implementation */
967 #ifdef MEMORY_DEBUG
968 
969 /**
970  * qdf_mem_debug_init() - initialize qdf memory debug functionality
971  *
972  * Return: none
973  */
974 static void qdf_mem_debug_init(void)
975 {
976 	int i;
977 
978 	/* Initalizing the list with maximum size of 60000 */
979 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
980 		qdf_list_create(&qdf_mem_domains[i], 60000);
981 	qdf_spinlock_create(&qdf_mem_list_lock);
982 
983 	/* dma */
984 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
985 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
986 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
987 
988 	/* skb */
989 	qdf_net_buf_debug_init();
990 }
991 
992 static uint32_t
993 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
994 			       qdf_list_t *mem_list)
995 {
996 	if (qdf_list_empty(mem_list))
997 		return 0;
998 
999 	qdf_err("Memory leaks detected in %s domain!",
1000 		qdf_debug_domain_name(domain));
1001 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1002 
1003 	return mem_list->count;
1004 }
1005 
1006 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1007 {
1008 	uint32_t leak_count = 0;
1009 	int i;
1010 
1011 	/* detect and print leaks */
1012 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1013 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1014 
1015 	if (leak_count)
1016 		panic("%u fatal memory leaks detected!", leak_count);
1017 }
1018 
1019 /**
1020  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1021  *
1022  * Return: none
1023  */
1024 static void qdf_mem_debug_exit(void)
1025 {
1026 	int i;
1027 
1028 	/* skb */
1029 	qdf_net_buf_debug_exit();
1030 
1031 	/* mem */
1032 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1033 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1034 		qdf_list_destroy(qdf_mem_list_get(i));
1035 
1036 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1037 
1038 	/* dma */
1039 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1040 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1041 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1042 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1043 }
1044 
1045 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1046 			   void *caller)
1047 {
1048 	QDF_STATUS status;
1049 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1050 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1051 	struct qdf_mem_header *header;
1052 	void *ptr;
1053 	unsigned long start, duration;
1054 
1055 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1056 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1057 		return NULL;
1058 	}
1059 
1060 	ptr = qdf_mem_prealloc_get(size);
1061 	if (ptr)
1062 		return ptr;
1063 
1064 	start = qdf_mc_timer_get_system_time();
1065 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, qdf_mem_malloc_flags());
1066 	duration = qdf_mc_timer_get_system_time() - start;
1067 
1068 	if (duration > QDF_MEM_WARN_THRESHOLD)
1069 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1070 			 duration, size, file, line);
1071 
1072 	if (!header) {
1073 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1074 		return NULL;
1075 	}
1076 
1077 	qdf_mem_header_init(header, size, file, line, caller);
1078 	qdf_mem_trailer_init(header);
1079 	ptr = qdf_mem_get_ptr(header);
1080 
1081 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1082 	status = qdf_list_insert_front(mem_list, &header->node);
1083 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1084 	if (QDF_IS_STATUS_ERROR(status))
1085 		qdf_err("Failed to insert memory header; status %d", status);
1086 
1087 	qdf_mem_kmalloc_inc(size);
1088 
1089 	return ptr;
1090 }
1091 qdf_export_symbol(qdf_mem_malloc_debug);
1092 
1093 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1094 {
1095 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1096 	struct qdf_mem_header *header;
1097 	enum qdf_mem_validation_bitmap error_bitmap;
1098 
1099 	/* freeing a null pointer is valid */
1100 	if (qdf_unlikely(!ptr))
1101 		return;
1102 
1103 	if (qdf_mem_prealloc_put(ptr))
1104 		return;
1105 
1106 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1107 		panic("Failed to free invalid memory location %pK", ptr);
1108 
1109 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1110 	header = qdf_mem_get_header(ptr);
1111 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1112 	error_bitmap |= qdf_mem_trailer_validate(header);
1113 
1114 	if (!error_bitmap) {
1115 		header->freed = true;
1116 		list_del_init(&header->node);
1117 		qdf_mem_list_get(header->domain)->count--;
1118 	}
1119 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1120 
1121 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1122 				    file, line);
1123 
1124 	qdf_mem_kmalloc_dec(header->size);
1125 	kfree(header);
1126 }
1127 qdf_export_symbol(qdf_mem_free_debug);
1128 
1129 void qdf_mem_check_for_leaks(void)
1130 {
1131 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1132 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1133 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1134 	uint32_t leaks_count = 0;
1135 
1136 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1137 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1138 
1139 	if (leaks_count)
1140 		panic("%u fatal memory leaks detected!", leaks_count);
1141 }
1142 
1143 #else
1144 static void qdf_mem_debug_init(void) {}
1145 
1146 static void qdf_mem_debug_exit(void) {}
1147 
1148 /**
1149  * qdf_mem_malloc() - allocation QDF memory
1150  * @size: Number of bytes of memory to allocate.
1151  *
1152  * This function will dynamicallly allocate the specified number of bytes of
1153  * memory.
1154  *
1155  * Return:
1156  * Upon successful allocate, returns a non-NULL pointer to the allocated
1157  * memory.  If this function is unable to allocate the amount of memory
1158  * specified (for any reason) it returns NULL.
1159  */
1160 void *qdf_mem_malloc(size_t size)
1161 {
1162 	void *ptr;
1163 
1164 	ptr = qdf_mem_prealloc_get(size);
1165 	if (ptr)
1166 		return ptr;
1167 
1168 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1169 	if (!ptr)
1170 		return NULL;
1171 
1172 	qdf_mem_kmalloc_inc(ksize(ptr));
1173 
1174 	return ptr;
1175 }
1176 EXPORT_SYMBOL(qdf_mem_malloc);
1177 
1178 /**
1179  * qdf_mem_free() - free QDF memory
1180  * @ptr: Pointer to the starting address of the memory to be free'd.
1181  *
1182  * This function will free the memory pointed to by 'ptr'.
1183  *
1184  * Return: None
1185  */
1186 void qdf_mem_free(void *ptr)
1187 {
1188 	if (ptr == NULL)
1189 		return;
1190 
1191 	if (qdf_mem_prealloc_put(ptr))
1192 		return;
1193 
1194 	qdf_mem_kmalloc_dec(ksize(ptr));
1195 
1196 	kfree(ptr);
1197 }
1198 EXPORT_SYMBOL(qdf_mem_free);
1199 #endif
1200 
1201 /**
1202  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1203  * @osdev: OS device handle pointer
1204  * @pages: Multi page information storage
1205  * @element_size: Each element size
1206  * @element_num: Total number of elements should be allocated
1207  * @memctxt: Memory context
1208  * @cacheable: Coherent memory or cacheable memory
1209  *
1210  * This function will allocate large size of memory over multiple pages.
1211  * Large size of contiguous memory allocation will fail frequently, then
1212  * instead of allocate large memory by one shot, allocate through multiple, non
1213  * contiguous memory and combine pages when actual usage
1214  *
1215  * Return: None
1216  */
1217 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1218 			       struct qdf_mem_multi_page_t *pages,
1219 			       size_t element_size, uint16_t element_num,
1220 			       qdf_dma_context_t memctxt, bool cacheable)
1221 {
1222 	uint16_t page_idx;
1223 	struct qdf_mem_dma_page_t *dma_pages;
1224 	void **cacheable_pages = NULL;
1225 	uint16_t i;
1226 
1227 	pages->num_element_per_page = PAGE_SIZE / element_size;
1228 	if (!pages->num_element_per_page) {
1229 		qdf_print("Invalid page %d or element size %d",
1230 			  (int)PAGE_SIZE, (int)element_size);
1231 		goto out_fail;
1232 	}
1233 
1234 	pages->num_pages = element_num / pages->num_element_per_page;
1235 	if (element_num % pages->num_element_per_page)
1236 		pages->num_pages++;
1237 
1238 	if (cacheable) {
1239 		/* Pages information storage */
1240 		pages->cacheable_pages = qdf_mem_malloc(
1241 			pages->num_pages * sizeof(pages->cacheable_pages));
1242 		if (!pages->cacheable_pages) {
1243 			qdf_print("Cacheable page storage alloc fail");
1244 			goto out_fail;
1245 		}
1246 
1247 		cacheable_pages = pages->cacheable_pages;
1248 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1249 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1250 			if (!cacheable_pages[page_idx]) {
1251 				qdf_print("cacheable page alloc fail, pi %d",
1252 					  page_idx);
1253 				goto page_alloc_fail;
1254 			}
1255 		}
1256 		pages->dma_pages = NULL;
1257 	} else {
1258 		pages->dma_pages = qdf_mem_malloc(
1259 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1260 		if (!pages->dma_pages) {
1261 			qdf_print("dmaable page storage alloc fail");
1262 			goto out_fail;
1263 		}
1264 
1265 		dma_pages = pages->dma_pages;
1266 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1267 			dma_pages->page_v_addr_start =
1268 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1269 					 PAGE_SIZE,
1270 					&dma_pages->page_p_addr);
1271 			if (!dma_pages->page_v_addr_start) {
1272 				qdf_print("dmaable page alloc fail pi %d",
1273 					page_idx);
1274 				goto page_alloc_fail;
1275 			}
1276 			dma_pages->page_v_addr_end =
1277 				dma_pages->page_v_addr_start + PAGE_SIZE;
1278 			dma_pages++;
1279 		}
1280 		pages->cacheable_pages = NULL;
1281 	}
1282 	return;
1283 
1284 page_alloc_fail:
1285 	if (cacheable) {
1286 		for (i = 0; i < page_idx; i++)
1287 			qdf_mem_free(pages->cacheable_pages[i]);
1288 		qdf_mem_free(pages->cacheable_pages);
1289 	} else {
1290 		dma_pages = pages->dma_pages;
1291 		for (i = 0; i < page_idx; i++) {
1292 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1293 				dma_pages->page_v_addr_start,
1294 				dma_pages->page_p_addr, memctxt);
1295 			dma_pages++;
1296 		}
1297 		qdf_mem_free(pages->dma_pages);
1298 	}
1299 
1300 out_fail:
1301 	pages->cacheable_pages = NULL;
1302 	pages->dma_pages = NULL;
1303 	pages->num_pages = 0;
1304 	return;
1305 }
1306 EXPORT_SYMBOL(qdf_mem_multi_pages_alloc);
1307 
1308 /**
1309  * qdf_mem_multi_pages_free() - free large size of kernel memory
1310  * @osdev: OS device handle pointer
1311  * @pages: Multi page information storage
1312  * @memctxt: Memory context
1313  * @cacheable: Coherent memory or cacheable memory
1314  *
1315  * This function will free large size of memory over multiple pages.
1316  *
1317  * Return: None
1318  */
1319 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1320 			      struct qdf_mem_multi_page_t *pages,
1321 			      qdf_dma_context_t memctxt, bool cacheable)
1322 {
1323 	unsigned int page_idx;
1324 	struct qdf_mem_dma_page_t *dma_pages;
1325 
1326 	if (cacheable) {
1327 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1328 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1329 		qdf_mem_free(pages->cacheable_pages);
1330 	} else {
1331 		dma_pages = pages->dma_pages;
1332 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1333 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1334 				dma_pages->page_v_addr_start,
1335 				dma_pages->page_p_addr, memctxt);
1336 			dma_pages++;
1337 		}
1338 		qdf_mem_free(pages->dma_pages);
1339 	}
1340 
1341 	pages->cacheable_pages = NULL;
1342 	pages->dma_pages = NULL;
1343 	pages->num_pages = 0;
1344 	return;
1345 }
1346 EXPORT_SYMBOL(qdf_mem_multi_pages_free);
1347 
1348 /**
1349  * qdf_mem_multi_page_link() - Make links for multi page elements
1350  * @osdev: OS device handle pointer
1351  * @pages: Multi page information storage
1352  * @elem_size: Single element size
1353  * @elem_count: elements count should be linked
1354  * @cacheable: Coherent memory or cacheable memory
1355  *
1356  * This function will make links for multi page allocated structure
1357  *
1358  * Return: 0 success
1359  */
1360 int qdf_mem_multi_page_link(qdf_device_t osdev,
1361 		struct qdf_mem_multi_page_t *pages,
1362 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1363 {
1364 	uint16_t i, i_int;
1365 	void *page_info;
1366 	void **c_elem = NULL;
1367 	uint32_t num_link = 0;
1368 
1369 	for (i = 0; i < pages->num_pages; i++) {
1370 		if (cacheable)
1371 			page_info = pages->cacheable_pages[i];
1372 		else
1373 			page_info = pages->dma_pages[i].page_v_addr_start;
1374 
1375 		if (!page_info)
1376 			return -ENOMEM;
1377 
1378 		c_elem = (void **)page_info;
1379 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1380 			if (i_int == (pages->num_element_per_page - 1)) {
1381 				if (cacheable)
1382 					*c_elem = pages->
1383 						cacheable_pages[i + 1];
1384 				else
1385 					*c_elem = pages->
1386 						dma_pages[i + 1].
1387 							page_v_addr_start;
1388 				num_link++;
1389 				break;
1390 			} else {
1391 				*c_elem =
1392 					(void *)(((char *)c_elem) + elem_size);
1393 			}
1394 			num_link++;
1395 			c_elem = (void **)*c_elem;
1396 
1397 			/* Last link established exit */
1398 			if (num_link == (elem_count - 1))
1399 				break;
1400 		}
1401 	}
1402 
1403 	if (c_elem)
1404 		*c_elem = NULL;
1405 
1406 	return 0;
1407 }
1408 EXPORT_SYMBOL(qdf_mem_multi_page_link);
1409 
1410 /**
1411  * qdf_mem_copy() - copy memory
1412  * @dst_addr: Pointer to destination memory location (to copy to)
1413  * @src_addr: Pointer to source memory location (to copy from)
1414  * @num_bytes: Number of bytes to copy.
1415  *
1416  * Copy host memory from one location to another, similar to memcpy in
1417  * standard C.  Note this function does not specifically handle overlapping
1418  * source and destination memory locations.  Calling this function with
1419  * overlapping source and destination memory locations will result in
1420  * unpredictable results.  Use qdf_mem_move() if the memory locations
1421  * for the source and destination are overlapping (or could be overlapping!)
1422  *
1423  * Return: none
1424  */
1425 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1426 {
1427 	if (0 == num_bytes) {
1428 		/* special case where dst_addr or src_addr can be NULL */
1429 		return;
1430 	}
1431 
1432 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1433 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1434 			  "%s called with NULL parameter, source:%pK destination:%pK",
1435 			  __func__, src_addr, dst_addr);
1436 		QDF_ASSERT(0);
1437 		return;
1438 	}
1439 	memcpy(dst_addr, src_addr, num_bytes);
1440 }
1441 EXPORT_SYMBOL(qdf_mem_copy);
1442 
1443 /**
1444  * qdf_mem_zero() - zero out memory
1445  * @ptr: pointer to memory that will be set to zero
1446  * @num_bytes: number of bytes zero
1447  *
1448  * This function sets the memory location to all zeros, essentially clearing
1449  * the memory.
1450  *
1451  * Return: None
1452  */
1453 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1454 {
1455 	if (0 == num_bytes) {
1456 		/* special case where ptr can be NULL */
1457 		return;
1458 	}
1459 
1460 	if (ptr == NULL) {
1461 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1462 			  "%s called with NULL parameter ptr", __func__);
1463 		return;
1464 	}
1465 	memset(ptr, 0, num_bytes);
1466 }
1467 EXPORT_SYMBOL(qdf_mem_zero);
1468 
1469 /**
1470  * qdf_mem_set() - set (fill) memory with a specified byte value.
1471  * @ptr: Pointer to memory that will be set
1472  * @num_bytes: Number of bytes to be set
1473  * @value: Byte set in memory
1474  *
1475  * Return: None
1476  */
1477 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1478 {
1479 	if (ptr == NULL) {
1480 		qdf_print("%s called with NULL parameter ptr", __func__);
1481 		return;
1482 	}
1483 	memset(ptr, value, num_bytes);
1484 }
1485 EXPORT_SYMBOL(qdf_mem_set);
1486 
1487 /**
1488  * qdf_mem_move() - move memory
1489  * @dst_addr: pointer to destination memory location (to move to)
1490  * @src_addr: pointer to source memory location (to move from)
1491  * @num_bytes: number of bytes to move.
1492  *
1493  * Move host memory from one location to another, similar to memmove in
1494  * standard C.  Note this function *does* handle overlapping
1495  * source and destination memory locations.
1496 
1497  * Return: None
1498  */
1499 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1500 {
1501 	if (0 == num_bytes) {
1502 		/* special case where dst_addr or src_addr can be NULL */
1503 		return;
1504 	}
1505 
1506 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1507 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1508 			  "%s called with NULL parameter, source:%pK destination:%pK",
1509 			  __func__, src_addr, dst_addr);
1510 		QDF_ASSERT(0);
1511 		return;
1512 	}
1513 	memmove(dst_addr, src_addr, num_bytes);
1514 }
1515 EXPORT_SYMBOL(qdf_mem_move);
1516 
1517 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1518 /**
1519  * qdf_mem_dma_alloc() - allocates memory for dma
1520  * @osdev: OS device handle
1521  * @dev: Pointer to device handle
1522  * @size: Size to be allocated
1523  * @phy_addr: Physical address
1524  *
1525  * Return: pointer of allocated memory or null if memory alloc fails
1526  */
1527 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1528 				      qdf_size_t size,
1529 				      qdf_dma_addr_t *phy_addr)
1530 {
1531 	void *vaddr;
1532 
1533 	vaddr = qdf_mem_malloc(size);
1534 	*phy_addr = ((uintptr_t) vaddr);
1535 	/* using this type conversion to suppress "cast from pointer to integer
1536 	 * of different size" warning on some platforms
1537 	 */
1538 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1539 	return vaddr;
1540 }
1541 
1542 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1543 #define QCA8074_RAM_BASE 0x50000000
1544 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1545 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1546 			qdf_dma_addr_t *phy_addr)
1547 {
1548 	void *vaddr = NULL;
1549 	int i;
1550 
1551 	*phy_addr = 0;
1552 
1553 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1554 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1555 					   qdf_mem_malloc_flags());
1556 
1557 		if (!vaddr) {
1558 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1559 			return NULL;
1560 		}
1561 
1562 		if (*phy_addr >= QCA8074_RAM_BASE)
1563 			return vaddr;
1564 
1565 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1566 	}
1567 
1568 	return NULL;
1569 }
1570 
1571 #else
1572 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1573 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1574 {
1575 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1576 }
1577 #endif
1578 
1579 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1580 static inline void
1581 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1582 {
1583 	qdf_mem_free(vaddr);
1584 }
1585 #else
1586 
1587 static inline void
1588 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1589 {
1590 	dma_free_coherent(dev, size, vaddr, paddr);
1591 }
1592 #endif
1593 
1594 #ifdef MEMORY_DEBUG
1595 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1596 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1597 				     const char *file, uint32_t line,
1598 				     void *caller)
1599 {
1600 	QDF_STATUS status;
1601 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1602 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1603 	struct qdf_mem_header *header;
1604 	void *vaddr;
1605 
1606 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1607 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1608 		return NULL;
1609 	}
1610 
1611 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1612 				   paddr);
1613 
1614 	if (!vaddr) {
1615 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1616 		return NULL;
1617 	}
1618 
1619 	header = qdf_mem_dma_get_header(vaddr, size);
1620 	/* For DMA buffers we only add trailers, this function will init
1621 	 * the header structure at the tail
1622 	 * Prefix the header into DMA buffer causes SMMU faults, so
1623 	 * do not prefix header into the DMA buffers
1624 	 */
1625 	qdf_mem_header_init(header, size, file, line, caller);
1626 
1627 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1628 	status = qdf_list_insert_front(mem_list, &header->node);
1629 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1630 	if (QDF_IS_STATUS_ERROR(status))
1631 		qdf_err("Failed to insert memory header; status %d", status);
1632 
1633 	qdf_mem_dma_inc(size);
1634 
1635 	return vaddr;
1636 }
1637 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1638 
1639 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1640 				   qdf_size_t size, void *vaddr,
1641 				   qdf_dma_addr_t paddr,
1642 				   qdf_dma_context_t memctx,
1643 				   const char *file, uint32_t line)
1644 {
1645 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1646 	struct qdf_mem_header *header;
1647 	enum qdf_mem_validation_bitmap error_bitmap;
1648 
1649 	/* freeing a null pointer is valid */
1650 	if (qdf_unlikely(!vaddr))
1651 		return;
1652 
1653 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1654 	/* For DMA buffers we only add trailers, this function will retrieve
1655 	 * the header structure at the tail
1656 	 * Prefix the header into DMA buffer causes SMMU faults, so
1657 	 * do not prefix header into the DMA buffers
1658 	 */
1659 	header = qdf_mem_dma_get_header(vaddr, size);
1660 	error_bitmap = qdf_mem_header_validate(header, domain);
1661 	if (!error_bitmap) {
1662 		header->freed = true;
1663 		list_del_init(&header->node);
1664 		qdf_mem_dma_list(header->domain)->count--;
1665 	}
1666 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1667 
1668 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1669 
1670 	qdf_mem_dma_dec(header->size);
1671 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1672 }
1673 qdf_export_symbol(qdf_mem_free_consistent_debug);
1674 
1675 #else
1676 
1677 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1678 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1679 {
1680 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1681 
1682 	if (vaddr)
1683 		qdf_mem_dma_inc(size);
1684 
1685 	return vaddr;
1686 }
1687 qdf_export_symbol(qdf_mem_alloc_consistent);
1688 
1689 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1690 			     qdf_size_t size, void *vaddr,
1691 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1692 {
1693 	qdf_mem_dma_dec(size);
1694 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1695 }
1696 qdf_export_symbol(qdf_mem_free_consistent);
1697 
1698 #endif /* MEMORY_DEBUG */
1699 
1700 /**
1701  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1702  * @osdev: OS device handle
1703  * @bus_addr: dma address to give to the device
1704  * @size: Size of the memory block
1705  * @direction: direction data will be DMAed
1706  *
1707  * Assign memory to the remote device.
1708  * The cache lines are flushed to ram or invalidated as needed.
1709  *
1710  * Return: none
1711  */
1712 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1713 					qdf_dma_addr_t bus_addr,
1714 					qdf_size_t size,
1715 					enum dma_data_direction direction)
1716 {
1717 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1718 }
1719 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
1720 
1721 /**
1722  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1723  * @osdev: OS device handle
1724  * @bus_addr: dma address to give to the cpu
1725  * @size: Size of the memory block
1726  * @direction: direction data will be DMAed
1727  *
1728  * Assign memory to the CPU.
1729  *
1730  * Return: none
1731  */
1732 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1733 				     qdf_dma_addr_t bus_addr,
1734 				     qdf_size_t size,
1735 				     enum dma_data_direction direction)
1736 {
1737 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1738 }
1739 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_cpu);
1740 
1741 void qdf_mem_init(void)
1742 {
1743 	qdf_mem_debug_init();
1744 	qdf_mem_debugfs_init();
1745 	qdf_mem_debug_debugfs_init();
1746 }
1747 EXPORT_SYMBOL(qdf_mem_init);
1748 
1749 void qdf_mem_exit(void)
1750 {
1751 	qdf_mem_debug_debugfs_exit();
1752 	qdf_mem_debugfs_exit();
1753 	qdf_mem_debug_exit();
1754 }
1755 EXPORT_SYMBOL(qdf_mem_exit);
1756 
1757 /**
1758  * qdf_ether_addr_copy() - copy an Ethernet address
1759  *
1760  * @dst_addr: A six-byte array Ethernet address destination
1761  * @src_addr: A six-byte array Ethernet address source
1762  *
1763  * Please note: dst & src must both be aligned to u16.
1764  *
1765  * Return: none
1766  */
1767 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1768 {
1769 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1770 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1771 			  "%s called with NULL parameter, source:%pK destination:%pK",
1772 			  __func__, src_addr, dst_addr);
1773 		QDF_ASSERT(0);
1774 		return;
1775 	}
1776 	ether_addr_copy(dst_addr, src_addr);
1777 }
1778 EXPORT_SYMBOL(qdf_ether_addr_copy);
1779 
1780