xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/string.h>
36 
37 #ifdef CONFIG_MCL
38 #include <host_diag_core_event.h>
39 #else
40 #define host_log_low_resource_failure(code) do {} while (0)
41 #endif
42 
43 #if defined(CONFIG_CNSS)
44 #include <net/cnss.h>
45 #endif
46 
47 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
48 #include <net/cnss_prealloc.h>
49 #endif
50 
51 #ifdef MEMORY_DEBUG
52 #include "qdf_debug_domain.h"
53 #include <qdf_list.h>
54 
55 /* Preprocessor Definitions and Constants */
56 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
57 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
58 #define QDF_DEBUG_STRING_SIZE 512
59 
60 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
61 static qdf_spinlock_t qdf_mem_list_lock;
62 
63 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
64 static qdf_spinlock_t qdf_mem_dma_list_lock;
65 
66 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
67 {
68 	return &qdf_mem_domains[domain];
69 }
70 
71 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
72 {
73 	return &qdf_mem_dma_domains[domain];
74 }
75 
76 /**
77  * struct qdf_mem_header - memory object to dubug
78  * @node: node to the list
79  * @domain: the active memory domain at time of allocation
80  * @freed: flag set during free, used to detect double frees
81  *	Use uint8_t so we can detect corruption
82  * @file: name of the file the allocation was made from
83  * @line: line number of the file the allocation was made from
84  * @size: size of the allocation in bytes
85  * @caller: Caller of the function for which memory is allocated
86  * @header: a known value, used to detect out-of-bounds access
87  * @time: timestamp at which allocation was made
88  */
89 struct qdf_mem_header {
90 	qdf_list_node_t node;
91 	enum qdf_debug_domain domain;
92 	uint8_t freed;
93 	char file[QDF_MEM_FILE_NAME_SIZE];
94 	uint32_t line;
95 	uint32_t size;
96 	void *caller;
97 	uint64_t header;
98 	uint64_t time;
99 };
100 
101 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
102 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
103 
104 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
105 {
106 	return (struct qdf_mem_header *)ptr - 1;
107 }
108 
109 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
110 							    qdf_size_t size)
111 {
112 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
113 }
114 
115 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
116 {
117 	return (uint64_t *)((void *)(header + 1) + header->size);
118 }
119 
120 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
121 {
122 	return (void *)(header + 1);
123 }
124 
125 /* number of bytes needed for the qdf memory debug information */
126 #define QDF_MEM_DEBUG_SIZE \
127 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
128 
129 /* number of bytes needed for the qdf dma memory debug information */
130 #define QDF_DMA_MEM_DEBUG_SIZE \
131 	(sizeof(struct qdf_mem_header))
132 
133 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
134 {
135 	QDF_BUG(header);
136 	if (!header)
137 		return;
138 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
139 }
140 
141 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
142 				const char *file, uint32_t line, void *caller)
143 {
144 	QDF_BUG(header);
145 	if (!header)
146 		return;
147 
148 	header->domain = qdf_debug_domain_get();
149 	header->freed = false;
150 
151 	/* copy the file name, rather than pointing to it */
152 	qdf_str_lcopy(header->file, kbasename(file), QDF_MEM_FILE_NAME_SIZE);
153 
154 	header->line = line;
155 	header->size = size;
156 	header->caller = caller;
157 	header->header = WLAN_MEM_HEADER;
158 	header->time = qdf_get_log_timestamp();
159 }
160 
161 enum qdf_mem_validation_bitmap {
162 	QDF_MEM_BAD_HEADER = 1 << 0,
163 	QDF_MEM_BAD_TRAILER = 1 << 1,
164 	QDF_MEM_BAD_SIZE = 1 << 2,
165 	QDF_MEM_DOUBLE_FREE = 1 << 3,
166 	QDF_MEM_BAD_FREED = 1 << 4,
167 	QDF_MEM_BAD_NODE = 1 << 5,
168 	QDF_MEM_BAD_DOMAIN = 1 << 6,
169 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
170 };
171 
172 /**
173  * qdf_mem_validate_list_node() - validate that the node is in a list
174  * @qdf_node: node to check for being in a list
175  *
176  * Return: true if the node validly linked in an anchored doubly linked list
177  */
178 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
179 {
180 	struct list_head *node = qdf_node;
181 
182 	/*
183 	 * if the node is an empty list, it is not tied to an anchor node
184 	 * and must have been removed with list_del_init
185 	 */
186 	if (list_empty(node))
187 		return false;
188 
189 	if (!node->prev || !node->next)
190 		return false;
191 
192 	if (node->prev->next != node || node->next->prev != node)
193 		return false;
194 
195 	return true;
196 }
197 
198 static enum qdf_mem_validation_bitmap
199 qdf_mem_trailer_validate(struct qdf_mem_header *header)
200 {
201 	enum qdf_mem_validation_bitmap error_bitmap = 0;
202 
203 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
204 		error_bitmap |= QDF_MEM_BAD_TRAILER;
205 	return error_bitmap;
206 }
207 
208 static enum qdf_mem_validation_bitmap
209 qdf_mem_header_validate(struct qdf_mem_header *header,
210 			enum qdf_debug_domain domain)
211 {
212 	enum qdf_mem_validation_bitmap error_bitmap = 0;
213 
214 	if (header->header != WLAN_MEM_HEADER)
215 		error_bitmap |= QDF_MEM_BAD_HEADER;
216 
217 	if (header->size > QDF_MEM_MAX_MALLOC)
218 		error_bitmap |= QDF_MEM_BAD_SIZE;
219 
220 	if (header->freed == true)
221 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
222 	else if (header->freed)
223 		error_bitmap |= QDF_MEM_BAD_FREED;
224 
225 	if (!qdf_mem_validate_list_node(&header->node))
226 		error_bitmap |= QDF_MEM_BAD_NODE;
227 
228 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
229 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
230 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
231 	else if (header->domain != domain)
232 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
233 
234 	return error_bitmap;
235 }
236 
237 static void
238 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
239 			    enum qdf_debug_domain current_domain,
240 			    enum qdf_mem_validation_bitmap error_bitmap,
241 			    const char *file,
242 			    uint32_t line)
243 {
244 	if (!error_bitmap)
245 		return;
246 
247 	if (error_bitmap & QDF_MEM_BAD_HEADER)
248 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
249 			header->header, WLAN_MEM_HEADER);
250 
251 	if (error_bitmap & QDF_MEM_BAD_SIZE)
252 		qdf_err("Corrupted memory size %u (expected < %d)",
253 			header->size, QDF_MEM_MAX_MALLOC);
254 
255 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
256 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
257 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
258 
259 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
260 		qdf_err("Memory has previously been freed");
261 
262 	if (error_bitmap & QDF_MEM_BAD_FREED)
263 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
264 
265 	if (error_bitmap & QDF_MEM_BAD_NODE)
266 		qdf_err("Corrupted memory header node or double free");
267 
268 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
269 		qdf_err("Corrupted memory domain 0x%x", header->domain);
270 
271 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
272 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
273 			qdf_debug_domain_name(header->domain), header->domain,
274 			qdf_debug_domain_name(current_domain), current_domain);
275 
276 	QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", file, line);
277 }
278 #endif /* MEMORY_DEBUG */
279 
280 u_int8_t prealloc_disabled = 1;
281 qdf_declare_param(prealloc_disabled, byte);
282 qdf_export_symbol(prealloc_disabled);
283 
284 #if defined WLAN_DEBUGFS
285 
286 /* Debugfs root directory for qdf_mem */
287 static struct dentry *qdf_mem_debugfs_root;
288 
289 /**
290  * struct __qdf_mem_stat - qdf memory statistics
291  * @kmalloc:	total kmalloc allocations
292  * @dma:	total dma allocations
293  * @skb:	total skb allocations
294  */
295 static struct __qdf_mem_stat {
296 	qdf_atomic_t kmalloc;
297 	qdf_atomic_t dma;
298 	qdf_atomic_t skb;
299 } qdf_mem_stat;
300 
301 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
302 {
303 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
304 }
305 
306 static inline void qdf_mem_dma_inc(qdf_size_t size)
307 {
308 	qdf_atomic_add(size, &qdf_mem_stat.dma);
309 }
310 
311 void qdf_mem_skb_inc(qdf_size_t size)
312 {
313 	qdf_atomic_add(size, &qdf_mem_stat.skb);
314 }
315 
316 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
317 {
318 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
319 }
320 
321 static inline void qdf_mem_dma_dec(qdf_size_t size)
322 {
323 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
324 }
325 
326 void qdf_mem_skb_dec(qdf_size_t size)
327 {
328 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
329 }
330 
331 #ifdef MEMORY_DEBUG
332 static int qdf_err_printer(void *priv, const char *fmt, ...)
333 {
334 	va_list args;
335 
336 	va_start(args, fmt);
337 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
338 	va_end(args);
339 
340 	return 0;
341 }
342 
343 static int seq_printf_printer(void *priv, const char *fmt, ...)
344 {
345 	struct seq_file *file = priv;
346 	va_list args;
347 
348 	va_start(args, fmt);
349 	seq_vprintf(file, fmt, args);
350 	seq_puts(file, "\n");
351 	va_end(args);
352 
353 	return 0;
354 }
355 
356 /**
357  * struct __qdf_mem_info - memory statistics
358  * @file: the file which allocated memory
359  * @line: the line at which allocation happened
360  * @size: the size of allocation
361  * @caller: Address of the caller function
362  * @count: how many allocations of same type
363  * @time: timestamp at which allocation happened
364  */
365 struct __qdf_mem_info {
366 	char file[QDF_MEM_FILE_NAME_SIZE];
367 	uint32_t line;
368 	uint32_t size;
369 	void *caller;
370 	uint32_t count;
371 	uint64_t time;
372 };
373 
374 /*
375  * The table depth defines the de-duplication proximity scope.
376  * A deeper table takes more time, so choose any optimum value.
377  */
378 #define QDF_MEM_STAT_TABLE_SIZE 8
379 
380 /**
381  * qdf_mem_domain_print_header() - memory domain header print logic
382  * @print: the print adapter function
383  * @print_priv: the private data to be consumed by @print
384  *
385  * Return: None
386  */
387 static void qdf_mem_domain_print_header(qdf_abstract_print print,
388 					void *print_priv)
389 {
390 	print(print_priv,
391 	      "--------------------------------------------------------------");
392 	print(print_priv,
393 	      " count    size     total    filename     caller    timestamp");
394 	print(print_priv,
395 	      "--------------------------------------------------------------");
396 }
397 
398 /**
399  * qdf_mem_meta_table_print() - memory metadata table print logic
400  * @table: the memory metadata table to print
401  * @print: the print adapter function
402  * @print_priv: the private data to be consumed by @print
403  *
404  * Return: None
405  */
406 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
407 				     qdf_abstract_print print,
408 				     void *print_priv)
409 {
410 	int i;
411 	char debug_str[QDF_DEBUG_STRING_SIZE];
412 	size_t len = 0;
413 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
414 
415 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
416 			     "%s", debug_prefix);
417 
418 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
419 		if (!table[i].count)
420 			break;
421 
422 		print(print_priv,
423 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
424 		      table[i].count,
425 		      table[i].size,
426 		      table[i].count * table[i].size,
427 		      table[i].file,
428 		      table[i].line, table[i].caller,
429 		      table[i].time);
430 		len += qdf_scnprintf(debug_str + len,
431 				     sizeof(debug_str) - len,
432 				     " @ %s:%u %pS",
433 				     table[i].file,
434 				     table[i].line,
435 				     table[i].caller);
436 	}
437 	print(print_priv, "%s", debug_str);
438 }
439 
440 /**
441  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
442  * @table: the memory metadata table to insert into
443  * @meta: the memory metadata to insert
444  *
445  * Return: true if the table is full after inserting, false otherwise
446  */
447 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
448 				      struct qdf_mem_header *meta)
449 {
450 	int i;
451 
452 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
453 		if (!table[i].count) {
454 			qdf_str_lcopy(table[i].file, meta->file,
455 				      QDF_MEM_FILE_NAME_SIZE);
456 			table[i].line = meta->line;
457 			table[i].size = meta->size;
458 			table[i].count = 1;
459 			table[i].caller = meta->caller;
460 			table[i].time = meta->time;
461 			break;
462 		}
463 
464 		if (qdf_str_eq(table[i].file, meta->file) &&
465 		    table[i].line == meta->line &&
466 		    table[i].size == meta->size &&
467 		    table[i].caller == meta->caller) {
468 			table[i].count++;
469 			break;
470 		}
471 	}
472 
473 	/* return true if the table is now full */
474 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
475 }
476 
477 /**
478  * qdf_mem_domain_print() - output agnostic memory domain print logic
479  * @domain: the memory domain to print
480  * @print: the print adapter function
481  * @print_priv: the private data to be consumed by @print
482  *
483  * Return: None
484  */
485 static void qdf_mem_domain_print(qdf_list_t *domain,
486 				 qdf_abstract_print print,
487 				 void *print_priv)
488 {
489 	QDF_STATUS status;
490 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
491 	qdf_list_node_t *node;
492 
493 	qdf_mem_zero(table, sizeof(table));
494 	qdf_mem_domain_print_header(print, print_priv);
495 
496 	/* hold lock while inserting to avoid use-after free of the metadata */
497 	qdf_spin_lock(&qdf_mem_list_lock);
498 	status = qdf_list_peek_front(domain, &node);
499 	while (QDF_IS_STATUS_SUCCESS(status)) {
500 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
501 		bool is_full = qdf_mem_meta_table_insert(table, meta);
502 
503 		qdf_spin_unlock(&qdf_mem_list_lock);
504 
505 		if (is_full) {
506 			qdf_mem_meta_table_print(table, print, print_priv);
507 			qdf_mem_zero(table, sizeof(table));
508 		}
509 
510 		qdf_spin_lock(&qdf_mem_list_lock);
511 		status = qdf_list_peek_next(domain, node, &node);
512 	}
513 	qdf_spin_unlock(&qdf_mem_list_lock);
514 
515 	qdf_mem_meta_table_print(table, print, print_priv);
516 }
517 
518 /**
519  * qdf_mem_seq_start() - sequential callback to start
520  * @seq: seq_file handle
521  * @pos: The start position of the sequence
522  *
523  * Return: iterator pointer, or NULL if iteration is complete
524  */
525 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
526 {
527 	enum qdf_debug_domain domain = *pos;
528 
529 	if (!qdf_debug_domain_valid(domain))
530 		return NULL;
531 
532 	/* just use the current position as our iterator */
533 	return pos;
534 }
535 
536 /**
537  * qdf_mem_seq_next() - next sequential callback
538  * @seq: seq_file handle
539  * @v: the current iterator
540  * @pos: the current position
541  *
542  * Get the next node and release previous node.
543  *
544  * Return: iterator pointer, or NULL if iteration is complete
545  */
546 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
547 {
548 	++*pos;
549 
550 	return qdf_mem_seq_start(seq, pos);
551 }
552 
553 /**
554  * qdf_mem_seq_stop() - stop sequential callback
555  * @seq: seq_file handle
556  * @v: current iterator
557  *
558  * Return: None
559  */
560 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
561 
562 /**
563  * qdf_mem_seq_show() - print sequential callback
564  * @seq: seq_file handle
565  * @v: current iterator
566  *
567  * Return: 0 - success
568  */
569 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
570 {
571 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
572 
573 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
574 		   qdf_debug_domain_name(domain_id), domain_id);
575 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
576 			     seq_printf_printer, seq);
577 
578 	return 0;
579 }
580 
581 /* sequential file operation table */
582 static const struct seq_operations qdf_mem_seq_ops = {
583 	.start = qdf_mem_seq_start,
584 	.next  = qdf_mem_seq_next,
585 	.stop  = qdf_mem_seq_stop,
586 	.show  = qdf_mem_seq_show,
587 };
588 
589 
590 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
591 {
592 	return seq_open(file, &qdf_mem_seq_ops);
593 }
594 
595 /* debugfs file operation table */
596 static const struct file_operations fops_qdf_mem_debugfs = {
597 	.owner = THIS_MODULE,
598 	.open = qdf_mem_debugfs_open,
599 	.read = seq_read,
600 	.llseek = seq_lseek,
601 	.release = seq_release,
602 };
603 
604 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
605 {
606 	if (!qdf_mem_debugfs_root)
607 		return QDF_STATUS_E_FAILURE;
608 
609 	debugfs_create_file("list",
610 			    S_IRUSR,
611 			    qdf_mem_debugfs_root,
612 			    NULL,
613 			    &fops_qdf_mem_debugfs);
614 
615 	return QDF_STATUS_SUCCESS;
616 }
617 
618 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
619 {
620 	return QDF_STATUS_SUCCESS;
621 }
622 
623 #else /* MEMORY_DEBUG */
624 
625 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
626 {
627 	return QDF_STATUS_E_NOSUPPORT;
628 }
629 
630 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
631 {
632 	return QDF_STATUS_E_NOSUPPORT;
633 }
634 
635 #endif /* MEMORY_DEBUG */
636 
637 
638 static void qdf_mem_debugfs_exit(void)
639 {
640 	debugfs_remove_recursive(qdf_mem_debugfs_root);
641 	qdf_mem_debugfs_root = NULL;
642 }
643 
644 static QDF_STATUS qdf_mem_debugfs_init(void)
645 {
646 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
647 
648 	if (!qdf_debugfs_root)
649 		return QDF_STATUS_E_FAILURE;
650 
651 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
652 
653 	if (!qdf_mem_debugfs_root)
654 		return QDF_STATUS_E_FAILURE;
655 
656 
657 	debugfs_create_atomic_t("kmalloc",
658 				S_IRUSR,
659 				qdf_mem_debugfs_root,
660 				&qdf_mem_stat.kmalloc);
661 
662 	debugfs_create_atomic_t("dma",
663 				S_IRUSR,
664 				qdf_mem_debugfs_root,
665 				&qdf_mem_stat.dma);
666 
667 	debugfs_create_atomic_t("skb",
668 				S_IRUSR,
669 				qdf_mem_debugfs_root,
670 				&qdf_mem_stat.skb);
671 
672 	return QDF_STATUS_SUCCESS;
673 }
674 
675 #else /* WLAN_DEBUGFS */
676 
677 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
678 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
679 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
680 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
681 
682 
683 static QDF_STATUS qdf_mem_debugfs_init(void)
684 {
685 	return QDF_STATUS_E_NOSUPPORT;
686 }
687 static void qdf_mem_debugfs_exit(void) {}
688 
689 
690 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
691 {
692 	return QDF_STATUS_E_NOSUPPORT;
693 }
694 
695 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
696 {
697 	return QDF_STATUS_E_NOSUPPORT;
698 }
699 
700 #endif /* WLAN_DEBUGFS */
701 
702 /**
703  * __qdf_mempool_init() - Create and initialize memory pool
704  *
705  * @osdev: platform device object
706  * @pool_addr: address of the pool created
707  * @elem_cnt: no. of elements in pool
708  * @elem_size: size of each pool element in bytes
709  * @flags: flags
710  *
711  * return: Handle to memory pool or NULL if allocation failed
712  */
713 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
714 		       int elem_cnt, size_t elem_size, u_int32_t flags)
715 {
716 	__qdf_mempool_ctxt_t *new_pool = NULL;
717 	u_int32_t align = L1_CACHE_BYTES;
718 	unsigned long aligned_pool_mem;
719 	int pool_id;
720 	int i;
721 
722 	if (prealloc_disabled) {
723 		/* TBD: We can maintain a list of pools in qdf_device_t
724 		 * to help debugging
725 		 * when pre-allocation is not enabled
726 		 */
727 		new_pool = (__qdf_mempool_ctxt_t *)
728 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
729 		if (new_pool == NULL)
730 			return QDF_STATUS_E_NOMEM;
731 
732 		memset(new_pool, 0, sizeof(*new_pool));
733 		/* TBD: define flags for zeroing buffers etc */
734 		new_pool->flags = flags;
735 		new_pool->elem_size = elem_size;
736 		new_pool->max_elem = elem_cnt;
737 		*pool_addr = new_pool;
738 		return 0;
739 	}
740 
741 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
742 		if (osdev->mem_pool[pool_id] == NULL)
743 			break;
744 	}
745 
746 	if (pool_id == MAX_MEM_POOLS)
747 		return -ENOMEM;
748 
749 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
750 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
751 	if (new_pool == NULL)
752 		return -ENOMEM;
753 
754 	memset(new_pool, 0, sizeof(*new_pool));
755 	/* TBD: define flags for zeroing buffers etc */
756 	new_pool->flags = flags;
757 	new_pool->pool_id = pool_id;
758 
759 	/* Round up the element size to cacheline */
760 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
761 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
762 				((align)?(align - 1):0);
763 
764 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
765 	if (new_pool->pool_mem == NULL) {
766 			/* TBD: Check if we need get_free_pages above */
767 		kfree(new_pool);
768 		osdev->mem_pool[pool_id] = NULL;
769 		return -ENOMEM;
770 	}
771 
772 	spin_lock_init(&new_pool->lock);
773 
774 	/* Initialize free list */
775 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
776 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
777 	STAILQ_INIT(&new_pool->free_list);
778 
779 	for (i = 0; i < elem_cnt; i++)
780 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
781 			(mempool_elem_t *)(aligned_pool_mem +
782 			(new_pool->elem_size * i)), mempool_entry);
783 
784 
785 	new_pool->free_cnt = elem_cnt;
786 	*pool_addr = new_pool;
787 	return 0;
788 }
789 qdf_export_symbol(__qdf_mempool_init);
790 
791 /**
792  * __qdf_mempool_destroy() - Destroy memory pool
793  * @osdev: platform device object
794  * @Handle: to memory pool
795  *
796  * Returns: none
797  */
798 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
799 {
800 	int pool_id = 0;
801 
802 	if (!pool)
803 		return;
804 
805 	if (prealloc_disabled) {
806 		kfree(pool);
807 		return;
808 	}
809 
810 	pool_id = pool->pool_id;
811 
812 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
813 	kfree(pool->pool_mem);
814 	kfree(pool);
815 	osdev->mem_pool[pool_id] = NULL;
816 }
817 qdf_export_symbol(__qdf_mempool_destroy);
818 
819 /**
820  * __qdf_mempool_alloc() - Allocate an element memory pool
821  *
822  * @osdev: platform device object
823  * @Handle: to memory pool
824  *
825  * Return: Pointer to the allocated element or NULL if the pool is empty
826  */
827 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
828 {
829 	void *buf = NULL;
830 
831 	if (!pool)
832 		return NULL;
833 
834 	if (prealloc_disabled)
835 		return  qdf_mem_malloc(pool->elem_size);
836 
837 	spin_lock_bh(&pool->lock);
838 
839 	buf = STAILQ_FIRST(&pool->free_list);
840 	if (buf != NULL) {
841 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
842 		pool->free_cnt--;
843 	}
844 
845 	/* TBD: Update free count if debug is enabled */
846 	spin_unlock_bh(&pool->lock);
847 
848 	return buf;
849 }
850 qdf_export_symbol(__qdf_mempool_alloc);
851 
852 /**
853  * __qdf_mempool_free() - Free a memory pool element
854  * @osdev: Platform device object
855  * @pool: Handle to memory pool
856  * @buf: Element to be freed
857  *
858  * Returns: none
859  */
860 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
861 {
862 	if (!pool)
863 		return;
864 
865 
866 	if (prealloc_disabled)
867 		return qdf_mem_free(buf);
868 
869 	spin_lock_bh(&pool->lock);
870 	pool->free_cnt++;
871 
872 	STAILQ_INSERT_TAIL
873 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
874 	spin_unlock_bh(&pool->lock);
875 }
876 qdf_export_symbol(__qdf_mempool_free);
877 
878 /**
879  * qdf_mem_alloc_outline() - allocation QDF memory
880  * @osdev: platform device object
881  * @size: Number of bytes of memory to allocate.
882  *
883  * This function will dynamicallly allocate the specified number of bytes of
884  * memory.
885  *
886  * Return:
887  * Upon successful allocate, returns a non-NULL pointer to the allocated
888  * memory.  If this function is unable to allocate the amount of memory
889  * specified (for any reason) it returns NULL.
890  */
891 void *
892 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
893 {
894 	return qdf_mem_malloc(size);
895 }
896 qdf_export_symbol(qdf_mem_alloc_outline);
897 
898 /**
899  * qdf_mem_free_outline() - QDF memory free API
900  * @ptr: Pointer to the starting address of the memory to be free'd.
901  *
902  * This function will free the memory pointed to by 'ptr'. It also checks
903  * is memory is corrupted or getting double freed and panic.
904  *
905  * Return: none
906  */
907 void
908 qdf_mem_free_outline(void *buf)
909 {
910 	qdf_mem_free(buf);
911 }
912 qdf_export_symbol(qdf_mem_free_outline);
913 
914 /**
915  * qdf_mem_zero_outline() - zero out memory
916  * @buf: pointer to memory that will be set to zero
917  * @size: number of bytes zero
918  *
919  * This function sets the memory location to all zeros, essentially clearing
920  * the memory.
921  *
922  * Return: none
923  */
924 void
925 qdf_mem_zero_outline(void *buf, qdf_size_t size)
926 {
927 	qdf_mem_zero(buf, size);
928 }
929 qdf_export_symbol(qdf_mem_zero_outline);
930 
931 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
932 /**
933  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
934  * @size: the number of bytes to allocate
935  *
936  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
937  * a chunk of pre-allocated memory. If size if less than or equal to
938  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
939  *
940  * Return: NULL on failure, non-NULL on success
941  */
942 static void *qdf_mem_prealloc_get(size_t size)
943 {
944 	void *ptr;
945 
946 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
947 		return NULL;
948 
949 	ptr = wcnss_prealloc_get(size);
950 	if (!ptr)
951 		return NULL;
952 
953 	memset(ptr, 0, size);
954 
955 	return ptr;
956 }
957 
958 static inline bool qdf_mem_prealloc_put(void *ptr)
959 {
960 	return wcnss_prealloc_put(ptr);
961 }
962 #else
963 static inline void *qdf_mem_prealloc_get(size_t size)
964 {
965 	return NULL;
966 }
967 
968 static inline bool qdf_mem_prealloc_put(void *ptr)
969 {
970 	return false;
971 }
972 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
973 
974 static int qdf_mem_malloc_flags(void)
975 {
976 	if (in_interrupt() || irqs_disabled() || in_atomic())
977 		return GFP_ATOMIC;
978 
979 	return GFP_KERNEL;
980 }
981 
982 /* External Function implementation */
983 #ifdef MEMORY_DEBUG
984 
985 /**
986  * qdf_mem_debug_init() - initialize qdf memory debug functionality
987  *
988  * Return: none
989  */
990 static void qdf_mem_debug_init(void)
991 {
992 	int i;
993 
994 	/* Initalizing the list with maximum size of 60000 */
995 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
996 		qdf_list_create(&qdf_mem_domains[i], 60000);
997 	qdf_spinlock_create(&qdf_mem_list_lock);
998 
999 	/* dma */
1000 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1001 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1002 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1003 }
1004 
1005 static uint32_t
1006 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1007 			       qdf_list_t *mem_list)
1008 {
1009 	if (qdf_list_empty(mem_list))
1010 		return 0;
1011 
1012 	qdf_err("Memory leaks detected in %s domain!",
1013 		qdf_debug_domain_name(domain));
1014 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1015 
1016 	return mem_list->count;
1017 }
1018 
1019 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1020 {
1021 	uint32_t leak_count = 0;
1022 	int i;
1023 
1024 	/* detect and print leaks */
1025 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1026 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1027 
1028 	if (leak_count)
1029 		panic("%u fatal memory leaks detected!", leak_count);
1030 }
1031 
1032 /**
1033  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1034  *
1035  * Return: none
1036  */
1037 static void qdf_mem_debug_exit(void)
1038 {
1039 	int i;
1040 
1041 	/* mem */
1042 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1043 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1044 		qdf_list_destroy(qdf_mem_list_get(i));
1045 
1046 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1047 
1048 	/* dma */
1049 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1050 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1051 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1052 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1053 }
1054 
1055 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line,
1056 			   void *caller, uint32_t flag)
1057 {
1058 	QDF_STATUS status;
1059 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1060 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1061 	struct qdf_mem_header *header;
1062 	void *ptr;
1063 	unsigned long start, duration;
1064 
1065 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1066 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1067 		return NULL;
1068 	}
1069 
1070 	ptr = qdf_mem_prealloc_get(size);
1071 	if (ptr)
1072 		return ptr;
1073 
1074 	if (!flag)
1075 		flag = qdf_mem_malloc_flags();
1076 
1077 	start = qdf_mc_timer_get_system_time();
1078 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1079 	duration = qdf_mc_timer_get_system_time() - start;
1080 
1081 	if (duration > QDF_MEM_WARN_THRESHOLD)
1082 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1083 			 duration, size, file, line);
1084 
1085 	if (!header) {
1086 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1087 		return NULL;
1088 	}
1089 
1090 	qdf_mem_header_init(header, size, file, line, caller);
1091 	qdf_mem_trailer_init(header);
1092 	ptr = qdf_mem_get_ptr(header);
1093 
1094 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1095 	status = qdf_list_insert_front(mem_list, &header->node);
1096 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1097 	if (QDF_IS_STATUS_ERROR(status))
1098 		qdf_err("Failed to insert memory header; status %d", status);
1099 
1100 	qdf_mem_kmalloc_inc(size);
1101 
1102 	return ptr;
1103 }
1104 qdf_export_symbol(qdf_mem_malloc_debug);
1105 
1106 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1107 {
1108 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1109 	struct qdf_mem_header *header;
1110 	enum qdf_mem_validation_bitmap error_bitmap;
1111 
1112 	/* freeing a null pointer is valid */
1113 	if (qdf_unlikely(!ptr))
1114 		return;
1115 
1116 	if (qdf_mem_prealloc_put(ptr))
1117 		return;
1118 
1119 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1120 		panic("Failed to free invalid memory location %pK", ptr);
1121 
1122 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1123 	header = qdf_mem_get_header(ptr);
1124 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1125 	error_bitmap |= qdf_mem_trailer_validate(header);
1126 
1127 	if (!error_bitmap) {
1128 		header->freed = true;
1129 		list_del_init(&header->node);
1130 		qdf_mem_list_get(header->domain)->count--;
1131 	}
1132 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1133 
1134 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1135 				    file, line);
1136 
1137 	qdf_mem_kmalloc_dec(header->size);
1138 	kfree(header);
1139 }
1140 qdf_export_symbol(qdf_mem_free_debug);
1141 
1142 void qdf_mem_check_for_leaks(void)
1143 {
1144 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1145 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1146 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1147 	uint32_t leaks_count = 0;
1148 
1149 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1150 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1151 
1152 	if (leaks_count)
1153 		panic("%u fatal memory leaks detected!", leaks_count);
1154 }
1155 
1156 #else
1157 static void qdf_mem_debug_init(void) {}
1158 
1159 static void qdf_mem_debug_exit(void) {}
1160 
1161 void *qdf_mem_malloc_fl(size_t size, const char *func, uint32_t line)
1162 {
1163 	void *ptr;
1164 
1165 	ptr = qdf_mem_prealloc_get(size);
1166 	if (ptr)
1167 		return ptr;
1168 
1169 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1170 	if (!ptr) {
1171 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1172 			      size, func, line);
1173 		return NULL;
1174 	}
1175 
1176 	qdf_mem_kmalloc_inc(ksize(ptr));
1177 
1178 	return ptr;
1179 }
1180 qdf_export_symbol(qdf_mem_malloc_fl);
1181 
1182 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1183 {
1184 	void *ptr;
1185 
1186 	ptr = qdf_mem_prealloc_get(size);
1187 	if (ptr)
1188 		return ptr;
1189 
1190 	ptr = kzalloc(size, GFP_ATOMIC);
1191 	if (!ptr) {
1192 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1193 			      size, func, line);
1194 		return NULL;
1195 	}
1196 
1197 	qdf_mem_kmalloc_inc(ksize(ptr));
1198 
1199 	return ptr;
1200 }
1201 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1202 
1203 /**
1204  * qdf_mem_free() - free QDF memory
1205  * @ptr: Pointer to the starting address of the memory to be free'd.
1206  *
1207  * This function will free the memory pointed to by 'ptr'.
1208  *
1209  * Return: None
1210  */
1211 void qdf_mem_free(void *ptr)
1212 {
1213 	if (ptr == NULL)
1214 		return;
1215 
1216 	if (qdf_mem_prealloc_put(ptr))
1217 		return;
1218 
1219 	qdf_mem_kmalloc_dec(ksize(ptr));
1220 
1221 	kfree(ptr);
1222 }
1223 qdf_export_symbol(qdf_mem_free);
1224 #endif
1225 
1226 /**
1227  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1228  * @osdev: OS device handle pointer
1229  * @pages: Multi page information storage
1230  * @element_size: Each element size
1231  * @element_num: Total number of elements should be allocated
1232  * @memctxt: Memory context
1233  * @cacheable: Coherent memory or cacheable memory
1234  *
1235  * This function will allocate large size of memory over multiple pages.
1236  * Large size of contiguous memory allocation will fail frequently, then
1237  * instead of allocate large memory by one shot, allocate through multiple, non
1238  * contiguous memory and combine pages when actual usage
1239  *
1240  * Return: None
1241  */
1242 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1243 			       struct qdf_mem_multi_page_t *pages,
1244 			       size_t element_size, uint16_t element_num,
1245 			       qdf_dma_context_t memctxt, bool cacheable)
1246 {
1247 	uint16_t page_idx;
1248 	struct qdf_mem_dma_page_t *dma_pages;
1249 	void **cacheable_pages = NULL;
1250 	uint16_t i;
1251 
1252 	pages->num_element_per_page = PAGE_SIZE / element_size;
1253 	if (!pages->num_element_per_page) {
1254 		qdf_print("Invalid page %d or element size %d",
1255 			  (int)PAGE_SIZE, (int)element_size);
1256 		goto out_fail;
1257 	}
1258 
1259 	pages->num_pages = element_num / pages->num_element_per_page;
1260 	if (element_num % pages->num_element_per_page)
1261 		pages->num_pages++;
1262 
1263 	if (cacheable) {
1264 		/* Pages information storage */
1265 		pages->cacheable_pages = qdf_mem_malloc(
1266 			pages->num_pages * sizeof(pages->cacheable_pages));
1267 		if (!pages->cacheable_pages) {
1268 			qdf_print("Cacheable page storage alloc fail");
1269 			goto out_fail;
1270 		}
1271 
1272 		cacheable_pages = pages->cacheable_pages;
1273 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1274 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1275 			if (!cacheable_pages[page_idx]) {
1276 				qdf_print("cacheable page alloc fail, pi %d",
1277 					  page_idx);
1278 				goto page_alloc_fail;
1279 			}
1280 		}
1281 		pages->dma_pages = NULL;
1282 	} else {
1283 		pages->dma_pages = qdf_mem_malloc(
1284 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1285 		if (!pages->dma_pages) {
1286 			qdf_print("dmaable page storage alloc fail");
1287 			goto out_fail;
1288 		}
1289 
1290 		dma_pages = pages->dma_pages;
1291 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1292 			dma_pages->page_v_addr_start =
1293 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1294 					 PAGE_SIZE,
1295 					&dma_pages->page_p_addr);
1296 			if (!dma_pages->page_v_addr_start) {
1297 				qdf_print("dmaable page alloc fail pi %d",
1298 					page_idx);
1299 				goto page_alloc_fail;
1300 			}
1301 			dma_pages->page_v_addr_end =
1302 				dma_pages->page_v_addr_start + PAGE_SIZE;
1303 			dma_pages++;
1304 		}
1305 		pages->cacheable_pages = NULL;
1306 	}
1307 	return;
1308 
1309 page_alloc_fail:
1310 	if (cacheable) {
1311 		for (i = 0; i < page_idx; i++)
1312 			qdf_mem_free(pages->cacheable_pages[i]);
1313 		qdf_mem_free(pages->cacheable_pages);
1314 	} else {
1315 		dma_pages = pages->dma_pages;
1316 		for (i = 0; i < page_idx; i++) {
1317 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1318 				dma_pages->page_v_addr_start,
1319 				dma_pages->page_p_addr, memctxt);
1320 			dma_pages++;
1321 		}
1322 		qdf_mem_free(pages->dma_pages);
1323 	}
1324 
1325 out_fail:
1326 	pages->cacheable_pages = NULL;
1327 	pages->dma_pages = NULL;
1328 	pages->num_pages = 0;
1329 	return;
1330 }
1331 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1332 
1333 /**
1334  * qdf_mem_multi_pages_free() - free large size of kernel memory
1335  * @osdev: OS device handle pointer
1336  * @pages: Multi page information storage
1337  * @memctxt: Memory context
1338  * @cacheable: Coherent memory or cacheable memory
1339  *
1340  * This function will free large size of memory over multiple pages.
1341  *
1342  * Return: None
1343  */
1344 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1345 			      struct qdf_mem_multi_page_t *pages,
1346 			      qdf_dma_context_t memctxt, bool cacheable)
1347 {
1348 	unsigned int page_idx;
1349 	struct qdf_mem_dma_page_t *dma_pages;
1350 
1351 	if (cacheable) {
1352 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1353 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1354 		qdf_mem_free(pages->cacheable_pages);
1355 	} else {
1356 		dma_pages = pages->dma_pages;
1357 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1358 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1359 				dma_pages->page_v_addr_start,
1360 				dma_pages->page_p_addr, memctxt);
1361 			dma_pages++;
1362 		}
1363 		qdf_mem_free(pages->dma_pages);
1364 	}
1365 
1366 	pages->cacheable_pages = NULL;
1367 	pages->dma_pages = NULL;
1368 	pages->num_pages = 0;
1369 	return;
1370 }
1371 qdf_export_symbol(qdf_mem_multi_pages_free);
1372 
1373 /**
1374  * qdf_mem_multi_page_link() - Make links for multi page elements
1375  * @osdev: OS device handle pointer
1376  * @pages: Multi page information storage
1377  * @elem_size: Single element size
1378  * @elem_count: elements count should be linked
1379  * @cacheable: Coherent memory or cacheable memory
1380  *
1381  * This function will make links for multi page allocated structure
1382  *
1383  * Return: 0 success
1384  */
1385 int qdf_mem_multi_page_link(qdf_device_t osdev,
1386 		struct qdf_mem_multi_page_t *pages,
1387 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1388 {
1389 	uint16_t i, i_int;
1390 	void *page_info;
1391 	void **c_elem = NULL;
1392 	uint32_t num_link = 0;
1393 
1394 	for (i = 0; i < pages->num_pages; i++) {
1395 		if (cacheable)
1396 			page_info = pages->cacheable_pages[i];
1397 		else
1398 			page_info = pages->dma_pages[i].page_v_addr_start;
1399 
1400 		if (!page_info)
1401 			return -ENOMEM;
1402 
1403 		c_elem = (void **)page_info;
1404 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1405 			if (i_int == (pages->num_element_per_page - 1)) {
1406 				if (cacheable)
1407 					*c_elem = pages->
1408 						cacheable_pages[i + 1];
1409 				else
1410 					*c_elem = pages->
1411 						dma_pages[i + 1].
1412 							page_v_addr_start;
1413 				num_link++;
1414 				break;
1415 			} else {
1416 				*c_elem =
1417 					(void *)(((char *)c_elem) + elem_size);
1418 			}
1419 			num_link++;
1420 			c_elem = (void **)*c_elem;
1421 
1422 			/* Last link established exit */
1423 			if (num_link == (elem_count - 1))
1424 				break;
1425 		}
1426 	}
1427 
1428 	if (c_elem)
1429 		*c_elem = NULL;
1430 
1431 	return 0;
1432 }
1433 qdf_export_symbol(qdf_mem_multi_page_link);
1434 
1435 /**
1436  * qdf_mem_copy() - copy memory
1437  * @dst_addr: Pointer to destination memory location (to copy to)
1438  * @src_addr: Pointer to source memory location (to copy from)
1439  * @num_bytes: Number of bytes to copy.
1440  *
1441  * Copy host memory from one location to another, similar to memcpy in
1442  * standard C.  Note this function does not specifically handle overlapping
1443  * source and destination memory locations.  Calling this function with
1444  * overlapping source and destination memory locations will result in
1445  * unpredictable results.  Use qdf_mem_move() if the memory locations
1446  * for the source and destination are overlapping (or could be overlapping!)
1447  *
1448  * Return: none
1449  */
1450 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1451 {
1452 	if (0 == num_bytes) {
1453 		/* special case where dst_addr or src_addr can be NULL */
1454 		return;
1455 	}
1456 
1457 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1458 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1459 			  "%s called with NULL parameter, source:%pK destination:%pK",
1460 			  __func__, src_addr, dst_addr);
1461 		QDF_ASSERT(0);
1462 		return;
1463 	}
1464 	memcpy(dst_addr, src_addr, num_bytes);
1465 }
1466 qdf_export_symbol(qdf_mem_copy);
1467 
1468 /**
1469  * qdf_mem_zero() - zero out memory
1470  * @ptr: pointer to memory that will be set to zero
1471  * @num_bytes: number of bytes zero
1472  *
1473  * This function sets the memory location to all zeros, essentially clearing
1474  * the memory.
1475  *
1476  * Return: None
1477  */
1478 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1479 {
1480 	if (0 == num_bytes) {
1481 		/* special case where ptr can be NULL */
1482 		return;
1483 	}
1484 
1485 	if (ptr == NULL) {
1486 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1487 			  "%s called with NULL parameter ptr", __func__);
1488 		return;
1489 	}
1490 	memset(ptr, 0, num_bytes);
1491 }
1492 qdf_export_symbol(qdf_mem_zero);
1493 
1494 /**
1495  * qdf_mem_copy_toio() - copy memory
1496  * @dst_addr: Pointer to destination memory location (to copy to)
1497  * @src_addr: Pointer to source memory location (to copy from)
1498  * @num_bytes: Number of bytes to copy.
1499  *
1500  * Return: none
1501  */
1502 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1503 {
1504 	if (0 == num_bytes) {
1505 		/* special case where dst_addr or src_addr can be NULL */
1506 		return;
1507 	}
1508 
1509 	if ((!dst_addr) || (!src_addr)) {
1510 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1511 			  "%s called with NULL parameter, source:%pK destination:%pK",
1512 			  __func__, src_addr, dst_addr);
1513 		QDF_ASSERT(0);
1514 		return;
1515 	}
1516 	memcpy_toio(dst_addr, src_addr, num_bytes);
1517 }
1518 
1519 qdf_export_symbol(qdf_mem_copy_toio);
1520 
1521 /**
1522  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1523  * @ptr: Pointer to memory that will be set
1524  * @value: Byte set in memory
1525  * @num_bytes: Number of bytes to be set
1526  *
1527  * Return: None
1528  */
1529 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1530 {
1531 	if (!ptr) {
1532 		qdf_print("%s called with NULL parameter ptr", __func__);
1533 		return;
1534 	}
1535 	memset_io(ptr, value, num_bytes);
1536 }
1537 
1538 qdf_export_symbol(qdf_mem_set_io);
1539 
1540 /**
1541  * qdf_mem_set() - set (fill) memory with a specified byte value.
1542  * @ptr: Pointer to memory that will be set
1543  * @num_bytes: Number of bytes to be set
1544  * @value: Byte set in memory
1545  *
1546  * Return: None
1547  */
1548 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1549 {
1550 	if (ptr == NULL) {
1551 		qdf_print("%s called with NULL parameter ptr", __func__);
1552 		return;
1553 	}
1554 	memset(ptr, value, num_bytes);
1555 }
1556 qdf_export_symbol(qdf_mem_set);
1557 
1558 /**
1559  * qdf_mem_move() - move memory
1560  * @dst_addr: pointer to destination memory location (to move to)
1561  * @src_addr: pointer to source memory location (to move from)
1562  * @num_bytes: number of bytes to move.
1563  *
1564  * Move host memory from one location to another, similar to memmove in
1565  * standard C.  Note this function *does* handle overlapping
1566  * source and destination memory locations.
1567 
1568  * Return: None
1569  */
1570 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1571 {
1572 	if (0 == num_bytes) {
1573 		/* special case where dst_addr or src_addr can be NULL */
1574 		return;
1575 	}
1576 
1577 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1578 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1579 			  "%s called with NULL parameter, source:%pK destination:%pK",
1580 			  __func__, src_addr, dst_addr);
1581 		QDF_ASSERT(0);
1582 		return;
1583 	}
1584 	memmove(dst_addr, src_addr, num_bytes);
1585 }
1586 qdf_export_symbol(qdf_mem_move);
1587 
1588 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1589 /**
1590  * qdf_mem_dma_alloc() - allocates memory for dma
1591  * @osdev: OS device handle
1592  * @dev: Pointer to device handle
1593  * @size: Size to be allocated
1594  * @phy_addr: Physical address
1595  *
1596  * Return: pointer of allocated memory or null if memory alloc fails
1597  */
1598 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1599 				      qdf_size_t size,
1600 				      qdf_dma_addr_t *phy_addr)
1601 {
1602 	void *vaddr;
1603 
1604 	vaddr = qdf_mem_malloc(size);
1605 	*phy_addr = ((uintptr_t) vaddr);
1606 	/* using this type conversion to suppress "cast from pointer to integer
1607 	 * of different size" warning on some platforms
1608 	 */
1609 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1610 	return vaddr;
1611 }
1612 
1613 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1614 #define QCA8074_RAM_BASE 0x50000000
1615 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1616 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1617 			qdf_dma_addr_t *phy_addr)
1618 {
1619 	void *vaddr = NULL;
1620 	int i;
1621 
1622 	*phy_addr = 0;
1623 
1624 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1625 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1626 					   qdf_mem_malloc_flags());
1627 
1628 		if (!vaddr) {
1629 			qdf_err("%s failed , size: %zu!", __func__, size);
1630 			return NULL;
1631 		}
1632 
1633 		if (*phy_addr >= QCA8074_RAM_BASE)
1634 			return vaddr;
1635 
1636 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1637 	}
1638 
1639 	return NULL;
1640 }
1641 
1642 #else
1643 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1644 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1645 {
1646 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1647 }
1648 #endif
1649 
1650 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1651 static inline void
1652 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1653 {
1654 	qdf_mem_free(vaddr);
1655 }
1656 #else
1657 
1658 static inline void
1659 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1660 {
1661 	dma_free_coherent(dev, size, vaddr, paddr);
1662 }
1663 #endif
1664 
1665 #ifdef MEMORY_DEBUG
1666 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1667 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1668 				     const char *file, uint32_t line,
1669 				     void *caller)
1670 {
1671 	QDF_STATUS status;
1672 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1673 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1674 	struct qdf_mem_header *header;
1675 	void *vaddr;
1676 
1677 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1678 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1679 		return NULL;
1680 	}
1681 
1682 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1683 				   paddr);
1684 
1685 	if (!vaddr) {
1686 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, file, line);
1687 		return NULL;
1688 	}
1689 
1690 	header = qdf_mem_dma_get_header(vaddr, size);
1691 	/* For DMA buffers we only add trailers, this function will init
1692 	 * the header structure at the tail
1693 	 * Prefix the header into DMA buffer causes SMMU faults, so
1694 	 * do not prefix header into the DMA buffers
1695 	 */
1696 	qdf_mem_header_init(header, size, file, line, caller);
1697 
1698 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1699 	status = qdf_list_insert_front(mem_list, &header->node);
1700 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1701 	if (QDF_IS_STATUS_ERROR(status))
1702 		qdf_err("Failed to insert memory header; status %d", status);
1703 
1704 	qdf_mem_dma_inc(size);
1705 
1706 	return vaddr;
1707 }
1708 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1709 
1710 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1711 				   qdf_size_t size, void *vaddr,
1712 				   qdf_dma_addr_t paddr,
1713 				   qdf_dma_context_t memctx,
1714 				   const char *file, uint32_t line)
1715 {
1716 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1717 	struct qdf_mem_header *header;
1718 	enum qdf_mem_validation_bitmap error_bitmap;
1719 
1720 	/* freeing a null pointer is valid */
1721 	if (qdf_unlikely(!vaddr))
1722 		return;
1723 
1724 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1725 	/* For DMA buffers we only add trailers, this function will retrieve
1726 	 * the header structure at the tail
1727 	 * Prefix the header into DMA buffer causes SMMU faults, so
1728 	 * do not prefix header into the DMA buffers
1729 	 */
1730 	header = qdf_mem_dma_get_header(vaddr, size);
1731 	error_bitmap = qdf_mem_header_validate(header, domain);
1732 	if (!error_bitmap) {
1733 		header->freed = true;
1734 		list_del_init(&header->node);
1735 		qdf_mem_dma_list(header->domain)->count--;
1736 	}
1737 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1738 
1739 	qdf_mem_header_assert_valid(header, domain, error_bitmap, file, line);
1740 
1741 	qdf_mem_dma_dec(header->size);
1742 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1743 }
1744 qdf_export_symbol(qdf_mem_free_consistent_debug);
1745 
1746 #else
1747 
1748 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1749 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1750 {
1751 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1752 
1753 	if (vaddr)
1754 		qdf_mem_dma_inc(size);
1755 
1756 	return vaddr;
1757 }
1758 qdf_export_symbol(qdf_mem_alloc_consistent);
1759 
1760 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1761 			     qdf_size_t size, void *vaddr,
1762 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1763 {
1764 	qdf_mem_dma_dec(size);
1765 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1766 }
1767 qdf_export_symbol(qdf_mem_free_consistent);
1768 
1769 #endif /* MEMORY_DEBUG */
1770 
1771 /**
1772  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1773  * @osdev: OS device handle
1774  * @bus_addr: dma address to give to the device
1775  * @size: Size of the memory block
1776  * @direction: direction data will be DMAed
1777  *
1778  * Assign memory to the remote device.
1779  * The cache lines are flushed to ram or invalidated as needed.
1780  *
1781  * Return: none
1782  */
1783 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1784 					qdf_dma_addr_t bus_addr,
1785 					qdf_size_t size,
1786 					enum dma_data_direction direction)
1787 {
1788 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1789 }
1790 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1791 
1792 /**
1793  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1794  * @osdev: OS device handle
1795  * @bus_addr: dma address to give to the cpu
1796  * @size: Size of the memory block
1797  * @direction: direction data will be DMAed
1798  *
1799  * Assign memory to the CPU.
1800  *
1801  * Return: none
1802  */
1803 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1804 				     qdf_dma_addr_t bus_addr,
1805 				     qdf_size_t size,
1806 				     enum dma_data_direction direction)
1807 {
1808 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1809 }
1810 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1811 
1812 void qdf_mem_init(void)
1813 {
1814 	qdf_mem_debug_init();
1815 	qdf_net_buf_debug_init();
1816 	qdf_mem_debugfs_init();
1817 	qdf_mem_debug_debugfs_init();
1818 }
1819 qdf_export_symbol(qdf_mem_init);
1820 
1821 void qdf_mem_exit(void)
1822 {
1823 	qdf_mem_debug_debugfs_exit();
1824 	qdf_mem_debugfs_exit();
1825 	qdf_net_buf_debug_exit();
1826 	qdf_mem_debug_exit();
1827 }
1828 qdf_export_symbol(qdf_mem_exit);
1829 
1830 /**
1831  * qdf_ether_addr_copy() - copy an Ethernet address
1832  *
1833  * @dst_addr: A six-byte array Ethernet address destination
1834  * @src_addr: A six-byte array Ethernet address source
1835  *
1836  * Please note: dst & src must both be aligned to u16.
1837  *
1838  * Return: none
1839  */
1840 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1841 {
1842 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1843 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1844 			  "%s called with NULL parameter, source:%pK destination:%pK",
1845 			  __func__, src_addr, dst_addr);
1846 		QDF_ASSERT(0);
1847 		return;
1848 	}
1849 	ether_addr_copy(dst_addr, src_addr);
1850 }
1851 qdf_export_symbol(qdf_ether_addr_copy);
1852 
1853