xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 
37 #ifdef CONFIG_MCL
38 #include <host_diag_core_event.h>
39 #else
40 #define host_log_low_resource_failure(code) do {} while (0)
41 #endif
42 
43 #if defined(CONFIG_CNSS)
44 #include <net/cnss.h>
45 #endif
46 
47 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
48 #include <net/cnss_prealloc.h>
49 #endif
50 
51 #ifdef MEMORY_DEBUG
52 #include "qdf_debug_domain.h"
53 #include <qdf_list.h>
54 
55 /* Preprocessor Definitions and Constants */
56 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
57 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
58 #define QDF_DEBUG_STRING_SIZE 512
59 #define QDF_MEM_FILE_NAME_SIZE 48
60 
61 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
62 static qdf_spinlock_t qdf_mem_list_lock;
63 
64 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
65 static qdf_spinlock_t qdf_mem_dma_list_lock;
66 
67 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
68 {
69 	return &qdf_mem_domains[domain];
70 }
71 
72 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
73 {
74 	return &qdf_mem_dma_domains[domain];
75 }
76 
77 /**
78  * struct qdf_mem_header - memory object to dubug
79  * @node: node to the list
80  * @domain: the active memory domain at time of allocation
81  * @freed: flag set during free, used to detect double frees
82  *	Use uint8_t so we can detect corruption
83  * @file: name of the file the allocation was made from
84  * @line: line number of the file the allocation was made from
85  * @size: size of the allocation in bytes
86  * @caller: Caller of the function for which memory is allocated
87  * @header: a known value, used to detect out-of-bounds access
88  */
89 struct qdf_mem_header {
90 	qdf_list_node_t node;
91 	enum qdf_debug_domain domain;
92 	uint8_t freed;
93 	char file[QDF_MEM_FILE_NAME_SIZE];
94 	uint32_t line;
95 	uint32_t size;
96 	void *caller;
97 	uint64_t header;
98 };
99 
100 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
101 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
102 
103 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
104 {
105 	return (struct qdf_mem_header *)ptr - 1;
106 }
107 
108 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
109 							    qdf_size_t size)
110 {
111 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
112 }
113 
114 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
115 {
116 	return (uint64_t *)((void *)(header + 1) + header->size);
117 }
118 
119 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
120 {
121 	return (void *)(header + 1);
122 }
123 
124 /* number of bytes needed for the qdf memory debug information */
125 #define QDF_MEM_DEBUG_SIZE \
126 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
127 
128 /* number of bytes needed for the qdf dma memory debug information */
129 #define QDF_DMA_MEM_DEBUG_SIZE \
130 	(sizeof(struct qdf_mem_header))
131 
132 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
133 {
134 	QDF_BUG(header);
135 	if (!header)
136 		return;
137 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
138 }
139 
140 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
141 				const char *file, uint32_t line, void *caller)
142 {
143 	QDF_BUG(header);
144 	if (!header)
145 		return;
146 
147 	header->domain = qdf_debug_domain_get();
148 	header->freed = false;
149 
150 	/* copy the file name, rather than pointing to it */
151 	qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
152 
153 	header->line = line;
154 	header->size = size;
155 	header->caller = caller;
156 	header->header = WLAN_MEM_HEADER;
157 }
158 
159 enum qdf_mem_validation_bitmap {
160 	QDF_MEM_BAD_HEADER = 1 << 0,
161 	QDF_MEM_BAD_TRAILER = 1 << 1,
162 	QDF_MEM_BAD_SIZE = 1 << 2,
163 	QDF_MEM_DOUBLE_FREE = 1 << 3,
164 	QDF_MEM_BAD_FREED = 1 << 4,
165 	QDF_MEM_BAD_NODE = 1 << 5,
166 	QDF_MEM_BAD_DOMAIN = 1 << 6,
167 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
168 };
169 
170 /**
171  * qdf_mem_validate_list_node() - validate that the node is in a list
172  * @qdf_node: node to check for being in a list
173  *
174  * Return: true if the node validly linked in an anchored doubly linked list
175  */
176 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
177 {
178 	struct list_head *node = qdf_node;
179 
180 	/*
181 	 * if the node is an empty list, it is not tied to an anchor node
182 	 * and must have been removed with list_del_init
183 	 */
184 	if (list_empty(node))
185 		return false;
186 
187 	if (!node->prev || !node->next)
188 		return false;
189 
190 	if (node->prev->next != node || node->next->prev != node)
191 		return false;
192 
193 	return true;
194 }
195 
196 static enum qdf_mem_validation_bitmap
197 qdf_mem_trailer_validate(struct qdf_mem_header *header)
198 {
199 	enum qdf_mem_validation_bitmap error_bitmap = 0;
200 
201 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
202 		error_bitmap |= QDF_MEM_BAD_TRAILER;
203 	return error_bitmap;
204 }
205 
206 static enum qdf_mem_validation_bitmap
207 qdf_mem_header_validate(struct qdf_mem_header *header,
208 			enum qdf_debug_domain domain)
209 {
210 	enum qdf_mem_validation_bitmap error_bitmap = 0;
211 
212 	if (header->header != WLAN_MEM_HEADER)
213 		error_bitmap |= QDF_MEM_BAD_HEADER;
214 
215 	if (header->size > QDF_MEM_MAX_MALLOC)
216 		error_bitmap |= QDF_MEM_BAD_SIZE;
217 
218 	if (header->freed == true)
219 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
220 	else if (header->freed)
221 		error_bitmap |= QDF_MEM_BAD_FREED;
222 
223 	if (!qdf_mem_validate_list_node(&header->node))
224 		error_bitmap |= QDF_MEM_BAD_NODE;
225 
226 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
227 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
228 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
229 	else if (header->domain != domain)
230 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
231 
232 	return error_bitmap;
233 }
234 
235 static void
236 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
237 			    enum qdf_debug_domain current_domain,
238 			    enum qdf_mem_validation_bitmap error_bitmap,
239 			    const char *file,
240 			    uint32_t line)
241 {
242 	if (!error_bitmap)
243 		return;
244 
245 	if (error_bitmap & QDF_MEM_BAD_HEADER)
246 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
247 			header->header, WLAN_MEM_HEADER);
248 
249 	if (error_bitmap & QDF_MEM_BAD_SIZE)
250 		qdf_err("Corrupted memory size %u (expected < %d)",
251 			header->size, QDF_MEM_MAX_MALLOC);
252 
253 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
254 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
255 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
256 
257 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
258 		qdf_err("Memory has previously been freed");
259 
260 	if (error_bitmap & QDF_MEM_BAD_FREED)
261 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
262 
263 	if (error_bitmap & QDF_MEM_BAD_NODE)
264 		qdf_err("Corrupted memory header node or double free");
265 
266 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
267 		qdf_err("Corrupted memory domain 0x%x", header->domain);
268 
269 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
270 		qdf_err("Memory domain mismatch; found %s(%d), expected %s(%d)",
271 			qdf_debug_domain_name(header->domain), header->domain,
272 			qdf_debug_domain_name(current_domain), current_domain);
273 
274 	panic("A fatal memory error was detected @ %s:%d",
275 	      file, line);
276 }
277 #endif /* MEMORY_DEBUG */
278 
279 u_int8_t prealloc_disabled = 1;
280 qdf_declare_param(prealloc_disabled, byte);
281 qdf_export_symbol(prealloc_disabled);
282 
283 #if defined WLAN_DEBUGFS
284 
285 /* Debugfs root directory for qdf_mem */
286 static struct dentry *qdf_mem_debugfs_root;
287 
288 /**
289  * struct __qdf_mem_stat - qdf memory statistics
290  * @kmalloc:	total kmalloc allocations
291  * @dma:	total dma allocations
292  * @skb:	total skb allocations
293  */
294 static struct __qdf_mem_stat {
295 	qdf_atomic_t kmalloc;
296 	qdf_atomic_t dma;
297 	qdf_atomic_t skb;
298 } qdf_mem_stat;
299 
300 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
301 {
302 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
303 }
304 
305 static inline void qdf_mem_dma_inc(qdf_size_t size)
306 {
307 	qdf_atomic_add(size, &qdf_mem_stat.dma);
308 }
309 
310 void qdf_mem_skb_inc(qdf_size_t size)
311 {
312 	qdf_atomic_add(size, &qdf_mem_stat.skb);
313 }
314 
315 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
316 {
317 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
318 }
319 
320 static inline void qdf_mem_dma_dec(qdf_size_t size)
321 {
322 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
323 }
324 
325 void qdf_mem_skb_dec(qdf_size_t size)
326 {
327 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
328 }
329 
330 #ifdef MEMORY_DEBUG
331 static int qdf_err_printer(void *priv, const char *fmt, ...)
332 {
333 	va_list args;
334 
335 	va_start(args, fmt);
336 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
337 	va_end(args);
338 
339 	return 0;
340 }
341 
342 static int seq_printf_printer(void *priv, const char *fmt, ...)
343 {
344 	struct seq_file *file = priv;
345 	va_list args;
346 
347 	va_start(args, fmt);
348 	seq_vprintf(file, fmt, args);
349 	seq_puts(file, "\n");
350 	va_end(args);
351 
352 	return 0;
353 }
354 
355 /**
356  * struct __qdf_mem_info - memory statistics
357  * @file: the file which allocated memory
358  * @line: the line at which allocation happened
359  * @size: the size of allocation
360  * @caller: Address of the caller function
361  * @count: how many allocations of same type
362  *
363  */
364 struct __qdf_mem_info {
365 	char file[QDF_MEM_FILE_NAME_SIZE];
366 	uint32_t line;
367 	uint32_t size;
368 	void *caller;
369 	uint32_t count;
370 };
371 
372 /*
373  * The table depth defines the de-duplication proximity scope.
374  * A deeper table takes more time, so choose any optimum value.
375  */
376 #define QDF_MEM_STAT_TABLE_SIZE 8
377 
378 /**
379  * qdf_mem_domain_print_header() - memory domain header print logic
380  * @print: the print adapter function
381  * @print_priv: the private data to be consumed by @print
382  *
383  * Return: None
384  */
385 static void qdf_mem_domain_print_header(qdf_abstract_print print,
386 					void *print_priv)
387 {
388 	print(print_priv,
389 	      "--------------------------------------------------------------");
390 	print(print_priv, " count    size     total    filename     caller");
391 	print(print_priv,
392 	      "--------------------------------------------------------------");
393 }
394 
395 /**
396  * qdf_mem_meta_table_print() - memory metadata table print logic
397  * @table: the memory metadata table to print
398  * @print: the print adapter function
399  * @print_priv: the private data to be consumed by @print
400  *
401  * Return: None
402  */
403 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
404 				     qdf_abstract_print print,
405 				     void *print_priv)
406 {
407 	int i;
408 	char debug_str[QDF_DEBUG_STRING_SIZE];
409 	size_t len = 0;
410 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
411 
412 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
413 			     "%s", debug_prefix);
414 
415 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
416 		if (!table[i].count)
417 			break;
418 
419 		print(print_priv,
420 		      "%6u x %5u = %7uB @ %s:%u   %pS",
421 		      table[i].count,
422 		      table[i].size,
423 		      table[i].count * table[i].size,
424 		      table[i].file,
425 		      table[i].line, table[i].caller);
426 		len += qdf_scnprintf(debug_str + len,
427 				     sizeof(debug_str) - len,
428 				     " @ %s:%u %pS",
429 				     table[i].file,
430 				     table[i].line,
431 				     table[i].caller);
432 	}
433 	print(print_priv, "%s", debug_str);
434 }
435 
436 /**
437  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
438  * @table: the memory metadata table to insert into
439  * @meta: the memory metadata to insert
440  *
441  * Return: true if the table is full after inserting, false otherwise
442  */
443 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
444 				      struct qdf_mem_header *meta)
445 {
446 	int i;
447 
448 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
449 		if (!table[i].count) {
450 			qdf_str_lcopy(table[i].file, meta->file,
451 				      QDF_MEM_FILE_NAME_SIZE);
452 			table[i].line = meta->line;
453 			table[i].size = meta->size;
454 			table[i].count = 1;
455 			table[i].caller = meta->caller;
456 			break;
457 		}
458 
459 		if (qdf_str_eq(table[i].file, meta->file) &&
460 		    table[i].line == meta->line &&
461 		    table[i].size == meta->size &&
462 		    table[i].caller == meta->caller) {
463 			table[i].count++;
464 			break;
465 		}
466 	}
467 
468 	/* return true if the table is now full */
469 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
470 }
471 
472 /**
473  * qdf_mem_domain_print() - output agnostic memory domain print logic
474  * @domain: the memory domain to print
475  * @print: the print adapter function
476  * @print_priv: the private data to be consumed by @print
477  *
478  * Return: None
479  */
480 static void qdf_mem_domain_print(qdf_list_t *domain,
481 				 qdf_abstract_print print,
482 				 void *print_priv)
483 {
484 	QDF_STATUS status;
485 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
486 	qdf_list_node_t *node;
487 
488 	qdf_mem_zero(table, sizeof(table));
489 	qdf_mem_domain_print_header(print, print_priv);
490 
491 	/* hold lock while inserting to avoid use-after free of the metadata */
492 	qdf_spin_lock(&qdf_mem_list_lock);
493 	status = qdf_list_peek_front(domain, &node);
494 	while (QDF_IS_STATUS_SUCCESS(status)) {
495 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
496 		bool is_full = qdf_mem_meta_table_insert(table, meta);
497 
498 		qdf_spin_unlock(&qdf_mem_list_lock);
499 
500 		if (is_full) {
501 			qdf_mem_meta_table_print(table, print, print_priv);
502 			qdf_mem_zero(table, sizeof(table));
503 		}
504 
505 		qdf_spin_lock(&qdf_mem_list_lock);
506 		status = qdf_list_peek_next(domain, node, &node);
507 	}
508 	qdf_spin_unlock(&qdf_mem_list_lock);
509 
510 	qdf_mem_meta_table_print(table, print, print_priv);
511 }
512 
513 /**
514  * qdf_mem_seq_start() - sequential callback to start
515  * @seq: seq_file handle
516  * @pos: The start position of the sequence
517  *
518  * Return: iterator pointer, or NULL if iteration is complete
519  */
520 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
521 {
522 	enum qdf_debug_domain domain = *pos;
523 
524 	if (!qdf_debug_domain_valid(domain))
525 		return NULL;
526 
527 	/* just use the current position as our iterator */
528 	return pos;
529 }
530 
531 /**
532  * qdf_mem_seq_next() - next sequential callback
533  * @seq: seq_file handle
534  * @v: the current iterator
535  * @pos: the current position
536  *
537  * Get the next node and release previous node.
538  *
539  * Return: iterator pointer, or NULL if iteration is complete
540  */
541 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
542 {
543 	++*pos;
544 
545 	return qdf_mem_seq_start(seq, pos);
546 }
547 
548 /**
549  * qdf_mem_seq_stop() - stop sequential callback
550  * @seq: seq_file handle
551  * @v: current iterator
552  *
553  * Return: None
554  */
555 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
556 
557 /**
558  * qdf_mem_seq_show() - print sequential callback
559  * @seq: seq_file handle
560  * @v: current iterator
561  *
562  * Return: 0 - success
563  */
564 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
565 {
566 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
567 
568 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
569 		   qdf_debug_domain_name(domain_id), domain_id);
570 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
571 			     seq_printf_printer, seq);
572 
573 	return 0;
574 }
575 
576 /* sequential file operation table */
577 static const struct seq_operations qdf_mem_seq_ops = {
578 	.start = qdf_mem_seq_start,
579 	.next  = qdf_mem_seq_next,
580 	.stop  = qdf_mem_seq_stop,
581 	.show  = qdf_mem_seq_show,
582 };
583 
584 
585 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
586 {
587 	return seq_open(file, &qdf_mem_seq_ops);
588 }
589 
590 /* debugfs file operation table */
591 static const struct file_operations fops_qdf_mem_debugfs = {
592 	.owner = THIS_MODULE,
593 	.open = qdf_mem_debugfs_open,
594 	.read = seq_read,
595 	.llseek = seq_lseek,
596 	.release = seq_release,
597 };
598 
599 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
600 {
601 	if (!qdf_mem_debugfs_root)
602 		return QDF_STATUS_E_FAILURE;
603 
604 	debugfs_create_file("list",
605 			    S_IRUSR,
606 			    qdf_mem_debugfs_root,
607 			    NULL,
608 			    &fops_qdf_mem_debugfs);
609 
610 	return QDF_STATUS_SUCCESS;
611 }
612 
613 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
614 {
615 	return QDF_STATUS_SUCCESS;
616 }
617 
618 #else /* MEMORY_DEBUG */
619 
620 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
621 {
622 	return QDF_STATUS_E_NOSUPPORT;
623 }
624 
625 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
626 {
627 	return QDF_STATUS_E_NOSUPPORT;
628 }
629 
630 #endif /* MEMORY_DEBUG */
631 
632 
633 static void qdf_mem_debugfs_exit(void)
634 {
635 	debugfs_remove_recursive(qdf_mem_debugfs_root);
636 	qdf_mem_debugfs_root = NULL;
637 }
638 
639 static QDF_STATUS qdf_mem_debugfs_init(void)
640 {
641 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
642 
643 	if (!qdf_debugfs_root)
644 		return QDF_STATUS_E_FAILURE;
645 
646 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
647 
648 	if (!qdf_mem_debugfs_root)
649 		return QDF_STATUS_E_FAILURE;
650 
651 
652 	debugfs_create_atomic_t("kmalloc",
653 				S_IRUSR,
654 				qdf_mem_debugfs_root,
655 				&qdf_mem_stat.kmalloc);
656 
657 	debugfs_create_atomic_t("dma",
658 				S_IRUSR,
659 				qdf_mem_debugfs_root,
660 				&qdf_mem_stat.dma);
661 
662 	debugfs_create_atomic_t("skb",
663 				S_IRUSR,
664 				qdf_mem_debugfs_root,
665 				&qdf_mem_stat.skb);
666 
667 	return QDF_STATUS_SUCCESS;
668 }
669 
670 #else /* WLAN_DEBUGFS */
671 
672 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
673 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
674 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
675 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
676 
677 
678 static QDF_STATUS qdf_mem_debugfs_init(void)
679 {
680 	return QDF_STATUS_E_NOSUPPORT;
681 }
682 static void qdf_mem_debugfs_exit(void) {}
683 
684 
685 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
686 {
687 	return QDF_STATUS_E_NOSUPPORT;
688 }
689 
690 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
691 {
692 	return QDF_STATUS_E_NOSUPPORT;
693 }
694 
695 #endif /* WLAN_DEBUGFS */
696 
697 /**
698  * __qdf_mempool_init() - Create and initialize memory pool
699  *
700  * @osdev: platform device object
701  * @pool_addr: address of the pool created
702  * @elem_cnt: no. of elements in pool
703  * @elem_size: size of each pool element in bytes
704  * @flags: flags
705  *
706  * return: Handle to memory pool or NULL if allocation failed
707  */
708 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
709 		       int elem_cnt, size_t elem_size, u_int32_t flags)
710 {
711 	__qdf_mempool_ctxt_t *new_pool = NULL;
712 	u_int32_t align = L1_CACHE_BYTES;
713 	unsigned long aligned_pool_mem;
714 	int pool_id;
715 	int i;
716 
717 	if (prealloc_disabled) {
718 		/* TBD: We can maintain a list of pools in qdf_device_t
719 		 * to help debugging
720 		 * when pre-allocation is not enabled
721 		 */
722 		new_pool = (__qdf_mempool_ctxt_t *)
723 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
724 		if (new_pool == NULL)
725 			return QDF_STATUS_E_NOMEM;
726 
727 		memset(new_pool, 0, sizeof(*new_pool));
728 		/* TBD: define flags for zeroing buffers etc */
729 		new_pool->flags = flags;
730 		new_pool->elem_size = elem_size;
731 		new_pool->max_elem = elem_cnt;
732 		*pool_addr = new_pool;
733 		return 0;
734 	}
735 
736 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
737 		if (osdev->mem_pool[pool_id] == NULL)
738 			break;
739 	}
740 
741 	if (pool_id == MAX_MEM_POOLS)
742 		return -ENOMEM;
743 
744 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
745 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
746 	if (new_pool == NULL)
747 		return -ENOMEM;
748 
749 	memset(new_pool, 0, sizeof(*new_pool));
750 	/* TBD: define flags for zeroing buffers etc */
751 	new_pool->flags = flags;
752 	new_pool->pool_id = pool_id;
753 
754 	/* Round up the element size to cacheline */
755 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
756 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
757 				((align)?(align - 1):0);
758 
759 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
760 	if (new_pool->pool_mem == NULL) {
761 			/* TBD: Check if we need get_free_pages above */
762 		kfree(new_pool);
763 		osdev->mem_pool[pool_id] = NULL;
764 		return -ENOMEM;
765 	}
766 
767 	spin_lock_init(&new_pool->lock);
768 
769 	/* Initialize free list */
770 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
771 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
772 	STAILQ_INIT(&new_pool->free_list);
773 
774 	for (i = 0; i < elem_cnt; i++)
775 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
776 			(mempool_elem_t *)(aligned_pool_mem +
777 			(new_pool->elem_size * i)), mempool_entry);
778 
779 
780 	new_pool->free_cnt = elem_cnt;
781 	*pool_addr = new_pool;
782 	return 0;
783 }
784 qdf_export_symbol(__qdf_mempool_init);
785 
786 /**
787  * __qdf_mempool_destroy() - Destroy memory pool
788  * @osdev: platform device object
789  * @Handle: to memory pool
790  *
791  * Returns: none
792  */
793 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
794 {
795 	int pool_id = 0;
796 
797 	if (!pool)
798 		return;
799 
800 	if (prealloc_disabled) {
801 		kfree(pool);
802 		return;
803 	}
804 
805 	pool_id = pool->pool_id;
806 
807 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
808 	kfree(pool->pool_mem);
809 	kfree(pool);
810 	osdev->mem_pool[pool_id] = NULL;
811 }
812 qdf_export_symbol(__qdf_mempool_destroy);
813 
814 /**
815  * __qdf_mempool_alloc() - Allocate an element memory pool
816  *
817  * @osdev: platform device object
818  * @Handle: to memory pool
819  *
820  * Return: Pointer to the allocated element or NULL if the pool is empty
821  */
822 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
823 {
824 	void *buf = NULL;
825 
826 	if (!pool)
827 		return NULL;
828 
829 	if (prealloc_disabled)
830 		return  qdf_mem_malloc(pool->elem_size);
831 
832 	spin_lock_bh(&pool->lock);
833 
834 	buf = STAILQ_FIRST(&pool->free_list);
835 	if (buf != NULL) {
836 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
837 		pool->free_cnt--;
838 	}
839 
840 	/* TBD: Update free count if debug is enabled */
841 	spin_unlock_bh(&pool->lock);
842 
843 	return buf;
844 }
845 qdf_export_symbol(__qdf_mempool_alloc);
846 
847 /**
848  * __qdf_mempool_free() - Free a memory pool element
849  * @osdev: Platform device object
850  * @pool: Handle to memory pool
851  * @buf: Element to be freed
852  *
853  * Returns: none
854  */
855 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
856 {
857 	if (!pool)
858 		return;
859 
860 
861 	if (prealloc_disabled)
862 		return qdf_mem_free(buf);
863 
864 	spin_lock_bh(&pool->lock);
865 	pool->free_cnt++;
866 
867 	STAILQ_INSERT_TAIL
868 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
869 	spin_unlock_bh(&pool->lock);
870 }
871 qdf_export_symbol(__qdf_mempool_free);
872 
873 /**
874  * qdf_mem_alloc_outline() - allocation QDF memory
875  * @osdev: platform device object
876  * @size: Number of bytes of memory to allocate.
877  *
878  * This function will dynamicallly allocate the specified number of bytes of
879  * memory.
880  *
881  * Return:
882  * Upon successful allocate, returns a non-NULL pointer to the allocated
883  * memory.  If this function is unable to allocate the amount of memory
884  * specified (for any reason) it returns NULL.
885  */
886 void *
887 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
888 {
889 	return qdf_mem_malloc(size);
890 }
891 qdf_export_symbol(qdf_mem_alloc_outline);
892 
893 /**
894  * qdf_mem_free_outline() - QDF memory free API
895  * @ptr: Pointer to the starting address of the memory to be free'd.
896  *
897  * This function will free the memory pointed to by 'ptr'. It also checks
898  * is memory is corrupted or getting double freed and panic.
899  *
900  * Return: none
901  */
902 void
903 qdf_mem_free_outline(void *buf)
904 {
905 	qdf_mem_free(buf);
906 }
907 qdf_export_symbol(qdf_mem_free_outline);
908 
909 /**
910  * qdf_mem_zero_outline() - zero out memory
911  * @buf: pointer to memory that will be set to zero
912  * @size: number of bytes zero
913  *
914  * This function sets the memory location to all zeros, essentially clearing
915  * the memory.
916  *
917  * Return: none
918  */
919 void
920 qdf_mem_zero_outline(void *buf, qdf_size_t size)
921 {
922 	qdf_mem_zero(buf, size);
923 }
924 qdf_export_symbol(qdf_mem_zero_outline);
925 
926 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
927 /**
928  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
929  * @size: the number of bytes to allocate
930  *
931  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
932  * a chunk of pre-allocated memory. If size if less than or equal to
933  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
934  *
935  * Return: NULL on failure, non-NULL on success
936  */
937 static void *qdf_mem_prealloc_get(size_t size)
938 {
939 	void *ptr;
940 
941 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
942 		return NULL;
943 
944 	ptr = wcnss_prealloc_get(size);
945 	if (!ptr)
946 		return NULL;
947 
948 	memset(ptr, 0, size);
949 
950 	return ptr;
951 }
952 
953 static inline bool qdf_mem_prealloc_put(void *ptr)
954 {
955 	return wcnss_prealloc_put(ptr);
956 }
957 #else
958 static inline void *qdf_mem_prealloc_get(size_t size)
959 {
960 	return NULL;
961 }
962 
963 static inline bool qdf_mem_prealloc_put(void *ptr)
964 {
965 	return false;
966 }
967 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
968 
969 static int qdf_mem_malloc_flags(void)
970 {
971 	if (in_interrupt() || irqs_disabled() || in_atomic())
972 		return GFP_ATOMIC;
973 
974 	return GFP_KERNEL;
975 }
976 
977 /* External Function implementation */
978 #ifdef MEMORY_DEBUG
979 
980 /**
981  * qdf_mem_debug_init() - initialize qdf memory debug functionality
982  *
983  * Return: none
984  */
985 static void qdf_mem_debug_init(void)
986 {
987 	int i;
988 
989 	/* Initalizing the list with maximum size of 60000 */
990 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
991 		qdf_list_create(&qdf_mem_domains[i], 60000);
992 	qdf_spinlock_create(&qdf_mem_list_lock);
993 
994 	/* dma */
995 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
996 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
997 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
998 
999 	/* skb */
1000 	qdf_net_buf_debug_init();
1001 }
1002 
1003 static uint32_t
1004 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1005 			       qdf_list_t *mem_list)
1006 {
1007 	if (qdf_list_empty(mem_list))
1008 		return 0;
1009 
1010 	qdf_err("Memory leaks detected in %s domain!",
1011 		qdf_debug_domain_name(domain));
1012 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1013 
1014 	return mem_list->count;
1015 }
1016 
1017 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1018 {
1019 	uint32_t leak_count = 0;
1020 	int i;
1021 
1022 	/* detect and print leaks */
1023 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1024 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1025 
1026 	if (leak_count)
1027 		panic("%u fatal memory leaks detected!", leak_count);
1028 }
1029 
1030 /**
1031  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1032  *
1033  * Return: none
1034  */
1035 static void qdf_mem_debug_exit(void)
1036 {
1037 	int i;
1038 
1039 	/* skb */
1040 	qdf_net_buf_debug_exit();
1041 
1042 	/* mem */
1043 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1044 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1045 		qdf_list_destroy(qdf_mem_list_get(i));
1046 
1047 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1048 
1049 	/* dma */
1050 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1051 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1052 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1053 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1054 }
1055 
1056 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1057 			   void *caller)
1058 {
1059 	QDF_STATUS status;
1060 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1061 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1062 	struct qdf_mem_header *header;
1063 	void *ptr;
1064 	unsigned long start, duration;
1065 
1066 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1067 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1068 		return NULL;
1069 	}
1070 
1071 	ptr = qdf_mem_prealloc_get(size);
1072 	if (ptr)
1073 		return ptr;
1074 
1075 	start = qdf_mc_timer_get_system_time();
1076 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, qdf_mem_malloc_flags());
1077 	duration = qdf_mc_timer_get_system_time() - start;
1078 
1079 	if (duration > QDF_MEM_WARN_THRESHOLD)
1080 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1081 			 duration, size, file, line);
1082 
1083 	if (!header) {
1084 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1085 		return NULL;
1086 	}
1087 
1088 	qdf_mem_header_init(header, size, file, line, caller);
1089 	qdf_mem_trailer_init(header);
1090 	ptr = qdf_mem_get_ptr(header);
1091 
1092 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1093 	status = qdf_list_insert_front(mem_list, &header->node);
1094 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1095 	if (QDF_IS_STATUS_ERROR(status))
1096 		qdf_err("Failed to insert memory header; status %d", status);
1097 
1098 	qdf_mem_kmalloc_inc(size);
1099 
1100 	return ptr;
1101 }
1102 qdf_export_symbol(qdf_mem_malloc_debug);
1103 
1104 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1105 {
1106 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1107 	struct qdf_mem_header *header;
1108 	enum qdf_mem_validation_bitmap error_bitmap;
1109 
1110 	/* freeing a null pointer is valid */
1111 	if (qdf_unlikely(!ptr))
1112 		return;
1113 
1114 	if (qdf_mem_prealloc_put(ptr))
1115 		return;
1116 
1117 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1118 		panic("Failed to free invalid memory location %pK", ptr);
1119 
1120 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1121 	header = qdf_mem_get_header(ptr);
1122 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1123 	error_bitmap |= qdf_mem_trailer_validate(header);
1124 
1125 	if (!error_bitmap) {
1126 		header->freed = true;
1127 		list_del_init(&header->node);
1128 		qdf_mem_list_get(header->domain)->count--;
1129 	}
1130 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1131 
1132 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1133 				    file, line);
1134 
1135 	qdf_mem_kmalloc_dec(header->size);
1136 	kfree(header);
1137 }
1138 qdf_export_symbol(qdf_mem_free_debug);
1139 
1140 void qdf_mem_check_for_leaks(void)
1141 {
1142 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1143 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1144 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1145 	uint32_t leaks_count = 0;
1146 
1147 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1148 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1149 
1150 	if (leaks_count)
1151 		panic("%u fatal memory leaks detected!", leaks_count);
1152 }
1153 
1154 #else
1155 static void qdf_mem_debug_init(void) {}
1156 
1157 static void qdf_mem_debug_exit(void) {}
1158 
1159 /**
1160  * qdf_mem_malloc() - allocation QDF memory
1161  * @size: Number of bytes of memory to allocate.
1162  *
1163  * This function will dynamicallly allocate the specified number of bytes of
1164  * memory.
1165  *
1166  * Return:
1167  * Upon successful allocate, returns a non-NULL pointer to the allocated
1168  * memory.  If this function is unable to allocate the amount of memory
1169  * specified (for any reason) it returns NULL.
1170  */
1171 void *qdf_mem_malloc(size_t size)
1172 {
1173 	void *ptr;
1174 
1175 	ptr = qdf_mem_prealloc_get(size);
1176 	if (ptr)
1177 		return ptr;
1178 
1179 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1180 	if (!ptr)
1181 		return NULL;
1182 
1183 	qdf_mem_kmalloc_inc(ksize(ptr));
1184 
1185 	return ptr;
1186 }
1187 qdf_export_symbol(qdf_mem_malloc);
1188 
1189 /**
1190  * qdf_mem_free() - free QDF memory
1191  * @ptr: Pointer to the starting address of the memory to be free'd.
1192  *
1193  * This function will free the memory pointed to by 'ptr'.
1194  *
1195  * Return: None
1196  */
1197 void qdf_mem_free(void *ptr)
1198 {
1199 	if (ptr == NULL)
1200 		return;
1201 
1202 	if (qdf_mem_prealloc_put(ptr))
1203 		return;
1204 
1205 	qdf_mem_kmalloc_dec(ksize(ptr));
1206 
1207 	kfree(ptr);
1208 }
1209 qdf_export_symbol(qdf_mem_free);
1210 #endif
1211 
1212 /**
1213  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1214  * @osdev: OS device handle pointer
1215  * @pages: Multi page information storage
1216  * @element_size: Each element size
1217  * @element_num: Total number of elements should be allocated
1218  * @memctxt: Memory context
1219  * @cacheable: Coherent memory or cacheable memory
1220  *
1221  * This function will allocate large size of memory over multiple pages.
1222  * Large size of contiguous memory allocation will fail frequently, then
1223  * instead of allocate large memory by one shot, allocate through multiple, non
1224  * contiguous memory and combine pages when actual usage
1225  *
1226  * Return: None
1227  */
1228 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1229 			       struct qdf_mem_multi_page_t *pages,
1230 			       size_t element_size, uint16_t element_num,
1231 			       qdf_dma_context_t memctxt, bool cacheable)
1232 {
1233 	uint16_t page_idx;
1234 	struct qdf_mem_dma_page_t *dma_pages;
1235 	void **cacheable_pages = NULL;
1236 	uint16_t i;
1237 
1238 	pages->num_element_per_page = PAGE_SIZE / element_size;
1239 	if (!pages->num_element_per_page) {
1240 		qdf_print("Invalid page %d or element size %d",
1241 			  (int)PAGE_SIZE, (int)element_size);
1242 		goto out_fail;
1243 	}
1244 
1245 	pages->num_pages = element_num / pages->num_element_per_page;
1246 	if (element_num % pages->num_element_per_page)
1247 		pages->num_pages++;
1248 
1249 	if (cacheable) {
1250 		/* Pages information storage */
1251 		pages->cacheable_pages = qdf_mem_malloc(
1252 			pages->num_pages * sizeof(pages->cacheable_pages));
1253 		if (!pages->cacheable_pages) {
1254 			qdf_print("Cacheable page storage alloc fail");
1255 			goto out_fail;
1256 		}
1257 
1258 		cacheable_pages = pages->cacheable_pages;
1259 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1260 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1261 			if (!cacheable_pages[page_idx]) {
1262 				qdf_print("cacheable page alloc fail, pi %d",
1263 					  page_idx);
1264 				goto page_alloc_fail;
1265 			}
1266 		}
1267 		pages->dma_pages = NULL;
1268 	} else {
1269 		pages->dma_pages = qdf_mem_malloc(
1270 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1271 		if (!pages->dma_pages) {
1272 			qdf_print("dmaable page storage alloc fail");
1273 			goto out_fail;
1274 		}
1275 
1276 		dma_pages = pages->dma_pages;
1277 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1278 			dma_pages->page_v_addr_start =
1279 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1280 					 PAGE_SIZE,
1281 					&dma_pages->page_p_addr);
1282 			if (!dma_pages->page_v_addr_start) {
1283 				qdf_print("dmaable page alloc fail pi %d",
1284 					page_idx);
1285 				goto page_alloc_fail;
1286 			}
1287 			dma_pages->page_v_addr_end =
1288 				dma_pages->page_v_addr_start + PAGE_SIZE;
1289 			dma_pages++;
1290 		}
1291 		pages->cacheable_pages = NULL;
1292 	}
1293 	return;
1294 
1295 page_alloc_fail:
1296 	if (cacheable) {
1297 		for (i = 0; i < page_idx; i++)
1298 			qdf_mem_free(pages->cacheable_pages[i]);
1299 		qdf_mem_free(pages->cacheable_pages);
1300 	} else {
1301 		dma_pages = pages->dma_pages;
1302 		for (i = 0; i < page_idx; i++) {
1303 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1304 				dma_pages->page_v_addr_start,
1305 				dma_pages->page_p_addr, memctxt);
1306 			dma_pages++;
1307 		}
1308 		qdf_mem_free(pages->dma_pages);
1309 	}
1310 
1311 out_fail:
1312 	pages->cacheable_pages = NULL;
1313 	pages->dma_pages = NULL;
1314 	pages->num_pages = 0;
1315 	return;
1316 }
1317 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1318 
1319 /**
1320  * qdf_mem_multi_pages_free() - free large size of kernel memory
1321  * @osdev: OS device handle pointer
1322  * @pages: Multi page information storage
1323  * @memctxt: Memory context
1324  * @cacheable: Coherent memory or cacheable memory
1325  *
1326  * This function will free large size of memory over multiple pages.
1327  *
1328  * Return: None
1329  */
1330 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1331 			      struct qdf_mem_multi_page_t *pages,
1332 			      qdf_dma_context_t memctxt, bool cacheable)
1333 {
1334 	unsigned int page_idx;
1335 	struct qdf_mem_dma_page_t *dma_pages;
1336 
1337 	if (cacheable) {
1338 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1339 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1340 		qdf_mem_free(pages->cacheable_pages);
1341 	} else {
1342 		dma_pages = pages->dma_pages;
1343 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1344 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1345 				dma_pages->page_v_addr_start,
1346 				dma_pages->page_p_addr, memctxt);
1347 			dma_pages++;
1348 		}
1349 		qdf_mem_free(pages->dma_pages);
1350 	}
1351 
1352 	pages->cacheable_pages = NULL;
1353 	pages->dma_pages = NULL;
1354 	pages->num_pages = 0;
1355 	return;
1356 }
1357 qdf_export_symbol(qdf_mem_multi_pages_free);
1358 
1359 /**
1360  * qdf_mem_multi_page_link() - Make links for multi page elements
1361  * @osdev: OS device handle pointer
1362  * @pages: Multi page information storage
1363  * @elem_size: Single element size
1364  * @elem_count: elements count should be linked
1365  * @cacheable: Coherent memory or cacheable memory
1366  *
1367  * This function will make links for multi page allocated structure
1368  *
1369  * Return: 0 success
1370  */
1371 int qdf_mem_multi_page_link(qdf_device_t osdev,
1372 		struct qdf_mem_multi_page_t *pages,
1373 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1374 {
1375 	uint16_t i, i_int;
1376 	void *page_info;
1377 	void **c_elem = NULL;
1378 	uint32_t num_link = 0;
1379 
1380 	for (i = 0; i < pages->num_pages; i++) {
1381 		if (cacheable)
1382 			page_info = pages->cacheable_pages[i];
1383 		else
1384 			page_info = pages->dma_pages[i].page_v_addr_start;
1385 
1386 		if (!page_info)
1387 			return -ENOMEM;
1388 
1389 		c_elem = (void **)page_info;
1390 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1391 			if (i_int == (pages->num_element_per_page - 1)) {
1392 				if (cacheable)
1393 					*c_elem = pages->
1394 						cacheable_pages[i + 1];
1395 				else
1396 					*c_elem = pages->
1397 						dma_pages[i + 1].
1398 							page_v_addr_start;
1399 				num_link++;
1400 				break;
1401 			} else {
1402 				*c_elem =
1403 					(void *)(((char *)c_elem) + elem_size);
1404 			}
1405 			num_link++;
1406 			c_elem = (void **)*c_elem;
1407 
1408 			/* Last link established exit */
1409 			if (num_link == (elem_count - 1))
1410 				break;
1411 		}
1412 	}
1413 
1414 	if (c_elem)
1415 		*c_elem = NULL;
1416 
1417 	return 0;
1418 }
1419 qdf_export_symbol(qdf_mem_multi_page_link);
1420 
1421 /**
1422  * qdf_mem_copy() - copy memory
1423  * @dst_addr: Pointer to destination memory location (to copy to)
1424  * @src_addr: Pointer to source memory location (to copy from)
1425  * @num_bytes: Number of bytes to copy.
1426  *
1427  * Copy host memory from one location to another, similar to memcpy in
1428  * standard C.  Note this function does not specifically handle overlapping
1429  * source and destination memory locations.  Calling this function with
1430  * overlapping source and destination memory locations will result in
1431  * unpredictable results.  Use qdf_mem_move() if the memory locations
1432  * for the source and destination are overlapping (or could be overlapping!)
1433  *
1434  * Return: none
1435  */
1436 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1437 {
1438 	if (0 == num_bytes) {
1439 		/* special case where dst_addr or src_addr can be NULL */
1440 		return;
1441 	}
1442 
1443 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1444 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1445 			  "%s called with NULL parameter, source:%pK destination:%pK",
1446 			  __func__, src_addr, dst_addr);
1447 		QDF_ASSERT(0);
1448 		return;
1449 	}
1450 	memcpy(dst_addr, src_addr, num_bytes);
1451 }
1452 qdf_export_symbol(qdf_mem_copy);
1453 
1454 /**
1455  * qdf_mem_zero() - zero out memory
1456  * @ptr: pointer to memory that will be set to zero
1457  * @num_bytes: number of bytes zero
1458  *
1459  * This function sets the memory location to all zeros, essentially clearing
1460  * the memory.
1461  *
1462  * Return: None
1463  */
1464 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1465 {
1466 	if (0 == num_bytes) {
1467 		/* special case where ptr can be NULL */
1468 		return;
1469 	}
1470 
1471 	if (ptr == NULL) {
1472 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1473 			  "%s called with NULL parameter ptr", __func__);
1474 		return;
1475 	}
1476 	memset(ptr, 0, num_bytes);
1477 }
1478 qdf_export_symbol(qdf_mem_zero);
1479 
1480 /**
1481  * qdf_mem_set() - set (fill) memory with a specified byte value.
1482  * @ptr: Pointer to memory that will be set
1483  * @num_bytes: Number of bytes to be set
1484  * @value: Byte set in memory
1485  *
1486  * Return: None
1487  */
1488 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1489 {
1490 	if (ptr == NULL) {
1491 		qdf_print("%s called with NULL parameter ptr", __func__);
1492 		return;
1493 	}
1494 	memset(ptr, value, num_bytes);
1495 }
1496 qdf_export_symbol(qdf_mem_set);
1497 
1498 /**
1499  * qdf_mem_move() - move memory
1500  * @dst_addr: pointer to destination memory location (to move to)
1501  * @src_addr: pointer to source memory location (to move from)
1502  * @num_bytes: number of bytes to move.
1503  *
1504  * Move host memory from one location to another, similar to memmove in
1505  * standard C.  Note this function *does* handle overlapping
1506  * source and destination memory locations.
1507 
1508  * Return: None
1509  */
1510 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1511 {
1512 	if (0 == num_bytes) {
1513 		/* special case where dst_addr or src_addr can be NULL */
1514 		return;
1515 	}
1516 
1517 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1518 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1519 			  "%s called with NULL parameter, source:%pK destination:%pK",
1520 			  __func__, src_addr, dst_addr);
1521 		QDF_ASSERT(0);
1522 		return;
1523 	}
1524 	memmove(dst_addr, src_addr, num_bytes);
1525 }
1526 qdf_export_symbol(qdf_mem_move);
1527 
1528 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1529 /**
1530  * qdf_mem_dma_alloc() - allocates memory for dma
1531  * @osdev: OS device handle
1532  * @dev: Pointer to device handle
1533  * @size: Size to be allocated
1534  * @phy_addr: Physical address
1535  *
1536  * Return: pointer of allocated memory or null if memory alloc fails
1537  */
1538 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1539 				      qdf_size_t size,
1540 				      qdf_dma_addr_t *phy_addr)
1541 {
1542 	void *vaddr;
1543 
1544 	vaddr = qdf_mem_malloc(size);
1545 	*phy_addr = ((uintptr_t) vaddr);
1546 	/* using this type conversion to suppress "cast from pointer to integer
1547 	 * of different size" warning on some platforms
1548 	 */
1549 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1550 	return vaddr;
1551 }
1552 
1553 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1554 #define QCA8074_RAM_BASE 0x50000000
1555 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1556 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1557 			qdf_dma_addr_t *phy_addr)
1558 {
1559 	void *vaddr = NULL;
1560 	int i;
1561 
1562 	*phy_addr = 0;
1563 
1564 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1565 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1566 					   qdf_mem_malloc_flags());
1567 
1568 		if (!vaddr) {
1569 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1570 			return NULL;
1571 		}
1572 
1573 		if (*phy_addr >= QCA8074_RAM_BASE)
1574 			return vaddr;
1575 
1576 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1577 	}
1578 
1579 	return NULL;
1580 }
1581 
1582 #else
1583 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1584 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1585 {
1586 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1587 }
1588 #endif
1589 
1590 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1591 static inline void
1592 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1593 {
1594 	qdf_mem_free(vaddr);
1595 }
1596 #else
1597 
1598 static inline void
1599 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1600 {
1601 	dma_free_coherent(dev, size, vaddr, paddr);
1602 }
1603 #endif
1604 
1605 #ifdef MEMORY_DEBUG
1606 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1607 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1608 				     const char *file, uint32_t line,
1609 				     void *caller)
1610 {
1611 	QDF_STATUS status;
1612 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1613 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1614 	struct qdf_mem_header *header;
1615 	void *vaddr;
1616 
1617 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1618 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1619 		return NULL;
1620 	}
1621 
1622 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1623 				   paddr);
1624 
1625 	if (!vaddr) {
1626 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1627 		return NULL;
1628 	}
1629 
1630 	header = qdf_mem_dma_get_header(vaddr, size);
1631 	/* For DMA buffers we only add trailers, this function will init
1632 	 * the header structure at the tail
1633 	 * Prefix the header into DMA buffer causes SMMU faults, so
1634 	 * do not prefix header into the DMA buffers
1635 	 */
1636 	qdf_mem_header_init(header, size, file, line, caller);
1637 
1638 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1639 	status = qdf_list_insert_front(mem_list, &header->node);
1640 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1641 	if (QDF_IS_STATUS_ERROR(status))
1642 		qdf_err("Failed to insert memory header; status %d", status);
1643 
1644 	qdf_mem_dma_inc(size);
1645 
1646 	return vaddr;
1647 }
1648 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1649 
1650 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1651 				   qdf_size_t size, void *vaddr,
1652 				   qdf_dma_addr_t paddr,
1653 				   qdf_dma_context_t memctx,
1654 				   const char *file, uint32_t line)
1655 {
1656 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1657 	struct qdf_mem_header *header;
1658 	enum qdf_mem_validation_bitmap error_bitmap;
1659 
1660 	/* freeing a null pointer is valid */
1661 	if (qdf_unlikely(!vaddr))
1662 		return;
1663 
1664 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1665 	/* For DMA buffers we only add trailers, this function will retrieve
1666 	 * the header structure at the tail
1667 	 * Prefix the header into DMA buffer causes SMMU faults, so
1668 	 * do not prefix header into the DMA buffers
1669 	 */
1670 	header = qdf_mem_dma_get_header(vaddr, size);
1671 	error_bitmap = qdf_mem_header_validate(header, domain);
1672 	if (!error_bitmap) {
1673 		header->freed = true;
1674 		list_del_init(&header->node);
1675 		qdf_mem_dma_list(header->domain)->count--;
1676 	}
1677 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1678 
1679 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1680 
1681 	qdf_mem_dma_dec(header->size);
1682 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1683 }
1684 qdf_export_symbol(qdf_mem_free_consistent_debug);
1685 
1686 #else
1687 
1688 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1689 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1690 {
1691 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1692 
1693 	if (vaddr)
1694 		qdf_mem_dma_inc(size);
1695 
1696 	return vaddr;
1697 }
1698 qdf_export_symbol(qdf_mem_alloc_consistent);
1699 
1700 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1701 			     qdf_size_t size, void *vaddr,
1702 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1703 {
1704 	qdf_mem_dma_dec(size);
1705 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1706 }
1707 qdf_export_symbol(qdf_mem_free_consistent);
1708 
1709 #endif /* MEMORY_DEBUG */
1710 
1711 /**
1712  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1713  * @osdev: OS device handle
1714  * @bus_addr: dma address to give to the device
1715  * @size: Size of the memory block
1716  * @direction: direction data will be DMAed
1717  *
1718  * Assign memory to the remote device.
1719  * The cache lines are flushed to ram or invalidated as needed.
1720  *
1721  * Return: none
1722  */
1723 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1724 					qdf_dma_addr_t bus_addr,
1725 					qdf_size_t size,
1726 					enum dma_data_direction direction)
1727 {
1728 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1729 }
1730 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1731 
1732 /**
1733  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1734  * @osdev: OS device handle
1735  * @bus_addr: dma address to give to the cpu
1736  * @size: Size of the memory block
1737  * @direction: direction data will be DMAed
1738  *
1739  * Assign memory to the CPU.
1740  *
1741  * Return: none
1742  */
1743 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1744 				     qdf_dma_addr_t bus_addr,
1745 				     qdf_size_t size,
1746 				     enum dma_data_direction direction)
1747 {
1748 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1749 }
1750 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1751 
1752 void qdf_mem_init(void)
1753 {
1754 	qdf_mem_debug_init();
1755 	qdf_mem_debugfs_init();
1756 	qdf_mem_debug_debugfs_init();
1757 }
1758 qdf_export_symbol(qdf_mem_init);
1759 
1760 void qdf_mem_exit(void)
1761 {
1762 	qdf_mem_debug_debugfs_exit();
1763 	qdf_mem_debugfs_exit();
1764 	qdf_mem_debug_exit();
1765 }
1766 qdf_export_symbol(qdf_mem_exit);
1767 
1768 /**
1769  * qdf_ether_addr_copy() - copy an Ethernet address
1770  *
1771  * @dst_addr: A six-byte array Ethernet address destination
1772  * @src_addr: A six-byte array Ethernet address source
1773  *
1774  * Please note: dst & src must both be aligned to u16.
1775  *
1776  * Return: none
1777  */
1778 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1779 {
1780 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1781 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1782 			  "%s called with NULL parameter, source:%pK destination:%pK",
1783 			  __func__, src_addr, dst_addr);
1784 		QDF_ASSERT(0);
1785 		return;
1786 	}
1787 	ether_addr_copy(dst_addr, src_addr);
1788 }
1789 qdf_export_symbol(qdf_ether_addr_copy);
1790 
1791