xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
40 #include <net/cnss_prealloc.h>
41 #endif
42 
43 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
44 static bool mem_debug_disabled;
45 qdf_declare_param(mem_debug_disabled, bool);
46 qdf_export_symbol(mem_debug_disabled);
47 #endif
48 
49 #ifdef MEMORY_DEBUG
50 static bool is_initial_mem_debug_disabled;
51 #endif
52 
53 /* Preprocessor Definitions and Constants */
54 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
55 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
56 #define QDF_DEBUG_STRING_SIZE 512
57 
58 /**
59  * struct __qdf_mem_stat - qdf memory statistics
60  * @kmalloc: total kmalloc allocations
61  * @dma: total dma allocations
62  * @skb: total skb allocations
63  */
64 static struct __qdf_mem_stat {
65 	qdf_atomic_t kmalloc;
66 	qdf_atomic_t dma;
67 	qdf_atomic_t skb;
68 } qdf_mem_stat;
69 
70 #ifdef MEMORY_DEBUG
71 #include "qdf_debug_domain.h"
72 
73 enum list_type {
74 	LIST_TYPE_MEM = 0,
75 	LIST_TYPE_DMA = 1,
76 	LIST_TYPE_MAX,
77 };
78 
79 /**
80  * major_alloc_priv: private data registered to debugfs entry created to list
81  *                   the list major allocations
82  * @type:            type of the list to be parsed
83  * @threshold:       configured by user by overwriting the respective debugfs
84  *                   sys entry. This is to list the functions which requested
85  *                   memory/dma allocations more than threshold nubmer of times.
86  */
87 struct major_alloc_priv {
88 	enum list_type type;
89 	uint32_t threshold;
90 };
91 
92 static struct major_alloc_priv mem_priv = {
93 	/* List type set to mem */
94 	LIST_TYPE_MEM,
95 	/* initial threshold to list APIs which allocates mem >= 50 times */
96 	50
97 };
98 
99 static struct major_alloc_priv dma_priv = {
100 	/* List type set to DMA */
101 	LIST_TYPE_DMA,
102 	/* initial threshold to list APIs which allocates dma >= 50 times */
103 	50
104 };
105 
106 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
107 static qdf_spinlock_t qdf_mem_list_lock;
108 
109 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
110 static qdf_spinlock_t qdf_mem_dma_list_lock;
111 
112 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
113 {
114 	return &qdf_mem_domains[domain];
115 }
116 
117 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
118 {
119 	return &qdf_mem_dma_domains[domain];
120 }
121 
122 /**
123  * struct qdf_mem_header - memory object to dubug
124  * @node: node to the list
125  * @domain: the active memory domain at time of allocation
126  * @freed: flag set during free, used to detect double frees
127  *	Use uint8_t so we can detect corruption
128  * @func: name of the function the allocation was made from
129  * @line: line number of the file the allocation was made from
130  * @size: size of the allocation in bytes
131  * @caller: Caller of the function for which memory is allocated
132  * @header: a known value, used to detect out-of-bounds access
133  * @time: timestamp at which allocation was made
134  */
135 struct qdf_mem_header {
136 	qdf_list_node_t node;
137 	enum qdf_debug_domain domain;
138 	uint8_t freed;
139 	char func[QDF_MEM_FUNC_NAME_SIZE];
140 	uint32_t line;
141 	uint32_t size;
142 	void *caller;
143 	uint64_t header;
144 	uint64_t time;
145 };
146 
147 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
148 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
149 
150 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
151 {
152 	return (struct qdf_mem_header *)ptr - 1;
153 }
154 
155 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
156 							    qdf_size_t size)
157 {
158 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
159 }
160 
161 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
162 {
163 	return (uint64_t *)((void *)(header + 1) + header->size);
164 }
165 
166 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
167 {
168 	return (void *)(header + 1);
169 }
170 
171 /* number of bytes needed for the qdf memory debug information */
172 #define QDF_MEM_DEBUG_SIZE \
173 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
174 
175 /* number of bytes needed for the qdf dma memory debug information */
176 #define QDF_DMA_MEM_DEBUG_SIZE \
177 	(sizeof(struct qdf_mem_header))
178 
179 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
180 {
181 	QDF_BUG(header);
182 	if (!header)
183 		return;
184 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
185 }
186 
187 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
188 				const char *func, uint32_t line, void *caller)
189 {
190 	QDF_BUG(header);
191 	if (!header)
192 		return;
193 
194 	header->domain = qdf_debug_domain_get();
195 	header->freed = false;
196 
197 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
198 
199 	header->line = line;
200 	header->size = size;
201 	header->caller = caller;
202 	header->header = WLAN_MEM_HEADER;
203 	header->time = qdf_get_log_timestamp();
204 }
205 
206 enum qdf_mem_validation_bitmap {
207 	QDF_MEM_BAD_HEADER = 1 << 0,
208 	QDF_MEM_BAD_TRAILER = 1 << 1,
209 	QDF_MEM_BAD_SIZE = 1 << 2,
210 	QDF_MEM_DOUBLE_FREE = 1 << 3,
211 	QDF_MEM_BAD_FREED = 1 << 4,
212 	QDF_MEM_BAD_NODE = 1 << 5,
213 	QDF_MEM_BAD_DOMAIN = 1 << 6,
214 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
215 };
216 
217 static enum qdf_mem_validation_bitmap
218 qdf_mem_trailer_validate(struct qdf_mem_header *header)
219 {
220 	enum qdf_mem_validation_bitmap error_bitmap = 0;
221 
222 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
223 		error_bitmap |= QDF_MEM_BAD_TRAILER;
224 	return error_bitmap;
225 }
226 
227 static enum qdf_mem_validation_bitmap
228 qdf_mem_header_validate(struct qdf_mem_header *header,
229 			enum qdf_debug_domain domain)
230 {
231 	enum qdf_mem_validation_bitmap error_bitmap = 0;
232 
233 	if (header->header != WLAN_MEM_HEADER)
234 		error_bitmap |= QDF_MEM_BAD_HEADER;
235 
236 	if (header->size > QDF_MEM_MAX_MALLOC)
237 		error_bitmap |= QDF_MEM_BAD_SIZE;
238 
239 	if (header->freed == true)
240 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
241 	else if (header->freed)
242 		error_bitmap |= QDF_MEM_BAD_FREED;
243 
244 	if (!qdf_list_node_in_any_list(&header->node))
245 		error_bitmap |= QDF_MEM_BAD_NODE;
246 
247 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
248 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
249 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
250 	else if (header->domain != domain)
251 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
252 
253 	return error_bitmap;
254 }
255 
256 static void
257 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
258 			    enum qdf_debug_domain current_domain,
259 			    enum qdf_mem_validation_bitmap error_bitmap,
260 			    const char *func,
261 			    uint32_t line)
262 {
263 	if (!error_bitmap)
264 		return;
265 
266 	if (error_bitmap & QDF_MEM_BAD_HEADER)
267 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
268 			header->header, WLAN_MEM_HEADER);
269 
270 	if (error_bitmap & QDF_MEM_BAD_SIZE)
271 		qdf_err("Corrupted memory size %u (expected < %d)",
272 			header->size, QDF_MEM_MAX_MALLOC);
273 
274 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
275 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
276 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
277 
278 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
279 		qdf_err("Memory has previously been freed");
280 
281 	if (error_bitmap & QDF_MEM_BAD_FREED)
282 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
283 
284 	if (error_bitmap & QDF_MEM_BAD_NODE)
285 		qdf_err("Corrupted memory header node or double free");
286 
287 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
288 		qdf_err("Corrupted memory domain 0x%x", header->domain);
289 
290 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
291 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
292 			qdf_debug_domain_name(header->domain), header->domain,
293 			qdf_debug_domain_name(current_domain), current_domain);
294 
295 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
296 }
297 #endif /* MEMORY_DEBUG */
298 
299 u_int8_t prealloc_disabled = 1;
300 qdf_declare_param(prealloc_disabled, byte);
301 qdf_export_symbol(prealloc_disabled);
302 
303 #if defined WLAN_DEBUGFS
304 
305 /* Debugfs root directory for qdf_mem */
306 static struct dentry *qdf_mem_debugfs_root;
307 
308 #ifdef MEMORY_DEBUG
309 static int qdf_err_printer(void *priv, const char *fmt, ...)
310 {
311 	va_list args;
312 
313 	va_start(args, fmt);
314 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
315 	va_end(args);
316 
317 	return 0;
318 }
319 
320 static int seq_printf_printer(void *priv, const char *fmt, ...)
321 {
322 	struct seq_file *file = priv;
323 	va_list args;
324 
325 	va_start(args, fmt);
326 	seq_vprintf(file, fmt, args);
327 	seq_puts(file, "\n");
328 	va_end(args);
329 
330 	return 0;
331 }
332 
333 /**
334  * struct __qdf_mem_info - memory statistics
335  * @func: the function which allocated memory
336  * @line: the line at which allocation happened
337  * @size: the size of allocation
338  * @caller: Address of the caller function
339  * @count: how many allocations of same type
340  * @time: timestamp at which allocation happened
341  */
342 struct __qdf_mem_info {
343 	char func[QDF_MEM_FUNC_NAME_SIZE];
344 	uint32_t line;
345 	uint32_t size;
346 	void *caller;
347 	uint32_t count;
348 	uint64_t time;
349 };
350 
351 /*
352  * The table depth defines the de-duplication proximity scope.
353  * A deeper table takes more time, so choose any optimum value.
354  */
355 #define QDF_MEM_STAT_TABLE_SIZE 8
356 
357 /**
358  * qdf_mem_debug_print_header() - memory debug header print logic
359  * @print: the print adapter function
360  * @print_priv: the private data to be consumed by @print
361  * @threshold: the threshold value set by user to list top allocations
362  *
363  * Return: None
364  */
365 static void qdf_mem_debug_print_header(qdf_abstract_print print,
366 				       void *print_priv,
367 				       uint32_t threshold)
368 {
369 	if (threshold)
370 		print(print_priv, "APIs requested allocations >= %u no of time",
371 		      threshold);
372 	print(print_priv,
373 	      "--------------------------------------------------------------");
374 	print(print_priv,
375 	      " count    size     total    filename     caller    timestamp");
376 	print(print_priv,
377 	      "--------------------------------------------------------------");
378 }
379 
380 /**
381  * qdf_mem_meta_table_print() - memory metadata table print logic
382  * @table: the memory metadata table to print
383  * @print: the print adapter function
384  * @print_priv: the private data to be consumed by @print
385  * @threshold: the threshold value set by user to list top allocations
386  *
387  * Return: None
388  */
389 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
390 				     qdf_abstract_print print,
391 				     void *print_priv,
392 				     uint32_t threshold)
393 {
394 	int i;
395 	char debug_str[QDF_DEBUG_STRING_SIZE];
396 	size_t len = 0;
397 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
398 
399 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
400 			     "%s", debug_prefix);
401 
402 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
403 		if (!table[i].count)
404 			break;
405 
406 		print(print_priv,
407 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
408 		      table[i].count,
409 		      table[i].size,
410 		      table[i].count * table[i].size,
411 		      table[i].func,
412 		      table[i].line, table[i].caller,
413 		      table[i].time);
414 		len += qdf_scnprintf(debug_str + len,
415 				     sizeof(debug_str) - len,
416 				     " @ %s:%u %pS",
417 				     table[i].func,
418 				     table[i].line,
419 				     table[i].caller);
420 	}
421 	print(print_priv, "%s", debug_str);
422 }
423 
424 /**
425  * qdf_print_major_alloc() - memory metadata table print logic
426  * @table: the memory metadata table to print
427  * @print: the print adapter function
428  * @print_priv: the private data to be consumed by @print
429  * @threshold: the threshold value set by uset to list top allocations
430  *
431  * Return: None
432  */
433 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
434 				  qdf_abstract_print print,
435 				  void *print_priv,
436 				  uint32_t threshold)
437 {
438 	int i;
439 
440 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
441 		if (!table[i].count)
442 			break;
443 		if (table[i].count >= threshold)
444 			print(print_priv,
445 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
446 			      table[i].count,
447 			      table[i].size,
448 			      table[i].count * table[i].size,
449 			      table[i].func,
450 			      table[i].line, table[i].caller,
451 			      table[i].time);
452 	}
453 }
454 
455 /**
456  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
457  * @table: the memory metadata table to insert into
458  * @meta: the memory metadata to insert
459  *
460  * Return: true if the table is full after inserting, false otherwise
461  */
462 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
463 				      struct qdf_mem_header *meta)
464 {
465 	int i;
466 
467 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
468 		if (!table[i].count) {
469 			qdf_str_lcopy(table[i].func, meta->func,
470 				      QDF_MEM_FUNC_NAME_SIZE);
471 			table[i].line = meta->line;
472 			table[i].size = meta->size;
473 			table[i].count = 1;
474 			table[i].caller = meta->caller;
475 			table[i].time = meta->time;
476 			break;
477 		}
478 
479 		if (qdf_str_eq(table[i].func, meta->func) &&
480 		    table[i].line == meta->line &&
481 		    table[i].size == meta->size &&
482 		    table[i].caller == meta->caller) {
483 			table[i].count++;
484 			break;
485 		}
486 	}
487 
488 	/* return true if the table is now full */
489 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
490 }
491 
492 /**
493  * qdf_mem_domain_print() - output agnostic memory domain print logic
494  * @domain: the memory domain to print
495  * @print: the print adapter function
496  * @print_priv: the private data to be consumed by @print
497  * @threshold: the threshold value set by uset to list top allocations
498  * @mem_print: pointer to function which prints the memory allocation data
499  *
500  * Return: None
501  */
502 static void qdf_mem_domain_print(qdf_list_t *domain,
503 				 qdf_abstract_print print,
504 				 void *print_priv,
505 				 uint32_t threshold,
506 				 void (*mem_print)(struct __qdf_mem_info *,
507 						   qdf_abstract_print,
508 						   void *, uint32_t))
509 {
510 	QDF_STATUS status;
511 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
512 	qdf_list_node_t *node;
513 
514 	qdf_mem_zero(table, sizeof(table));
515 	qdf_mem_debug_print_header(print, print_priv, threshold);
516 
517 	/* hold lock while inserting to avoid use-after free of the metadata */
518 	qdf_spin_lock(&qdf_mem_list_lock);
519 	status = qdf_list_peek_front(domain, &node);
520 	while (QDF_IS_STATUS_SUCCESS(status)) {
521 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
522 		bool is_full = qdf_mem_meta_table_insert(table, meta);
523 
524 		qdf_spin_unlock(&qdf_mem_list_lock);
525 
526 		if (is_full) {
527 			(*mem_print)(table, print, print_priv, threshold);
528 			qdf_mem_zero(table, sizeof(table));
529 		}
530 
531 		qdf_spin_lock(&qdf_mem_list_lock);
532 		status = qdf_list_peek_next(domain, node, &node);
533 	}
534 	qdf_spin_unlock(&qdf_mem_list_lock);
535 
536 	(*mem_print)(table, print, print_priv, threshold);
537 }
538 
539 /**
540  * qdf_mem_seq_start() - sequential callback to start
541  * @seq: seq_file handle
542  * @pos: The start position of the sequence
543  *
544  * Return: iterator pointer, or NULL if iteration is complete
545  */
546 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
547 {
548 	enum qdf_debug_domain domain = *pos;
549 
550 	if (!qdf_debug_domain_valid(domain))
551 		return NULL;
552 
553 	/* just use the current position as our iterator */
554 	return pos;
555 }
556 
557 /**
558  * qdf_mem_seq_next() - next sequential callback
559  * @seq: seq_file handle
560  * @v: the current iterator
561  * @pos: the current position
562  *
563  * Get the next node and release previous node.
564  *
565  * Return: iterator pointer, or NULL if iteration is complete
566  */
567 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
568 {
569 	++*pos;
570 
571 	return qdf_mem_seq_start(seq, pos);
572 }
573 
574 /**
575  * qdf_mem_seq_stop() - stop sequential callback
576  * @seq: seq_file handle
577  * @v: current iterator
578  *
579  * Return: None
580  */
581 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
582 
583 /**
584  * qdf_mem_seq_show() - print sequential callback
585  * @seq: seq_file handle
586  * @v: current iterator
587  *
588  * Return: 0 - success
589  */
590 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
591 {
592 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
593 
594 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
595 		   qdf_debug_domain_name(domain_id), domain_id);
596 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
597 			     seq_printf_printer,
598 			     seq,
599 			     0,
600 			     qdf_mem_meta_table_print);
601 
602 	return 0;
603 }
604 
605 /* sequential file operation table */
606 static const struct seq_operations qdf_mem_seq_ops = {
607 	.start = qdf_mem_seq_start,
608 	.next  = qdf_mem_seq_next,
609 	.stop  = qdf_mem_seq_stop,
610 	.show  = qdf_mem_seq_show,
611 };
612 
613 
614 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
615 {
616 	return seq_open(file, &qdf_mem_seq_ops);
617 }
618 
619 /**
620  * qdf_major_alloc_show() - print sequential callback
621  * @seq: seq_file handle
622  * @v: current iterator
623  *
624  * Return: 0 - success
625  */
626 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
627 {
628 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
629 	struct major_alloc_priv *priv;
630 	qdf_list_t *list;
631 
632 	priv = (struct major_alloc_priv *)seq->private;
633 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
634 		   qdf_debug_domain_name(domain_id), domain_id);
635 
636 	switch (priv->type) {
637 	case LIST_TYPE_MEM:
638 		list = qdf_mem_list_get(domain_id);
639 		break;
640 	case LIST_TYPE_DMA:
641 		list = qdf_mem_dma_list(domain_id);
642 		break;
643 	default:
644 		list = NULL;
645 		break;
646 	}
647 
648 	if (list)
649 		qdf_mem_domain_print(list,
650 				     seq_printf_printer,
651 				     seq,
652 				     priv->threshold,
653 				     qdf_print_major_alloc);
654 
655 	return 0;
656 }
657 
658 /* sequential file operation table created to track major allocs */
659 static const struct seq_operations qdf_major_allocs_seq_ops = {
660 	.start = qdf_mem_seq_start,
661 	.next = qdf_mem_seq_next,
662 	.stop = qdf_mem_seq_stop,
663 	.show = qdf_major_alloc_show,
664 };
665 
666 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
667 {
668 	void *private = inode->i_private;
669 	struct seq_file *seq;
670 	int rc;
671 
672 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
673 	if (rc == 0) {
674 		seq = file->private_data;
675 		seq->private = private;
676 	}
677 	return rc;
678 }
679 
680 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
681 					     const char __user *user_buf,
682 					     size_t count,
683 					     loff_t *pos)
684 {
685 	char buf[32];
686 	ssize_t buf_size;
687 	uint32_t threshold;
688 	struct seq_file *seq = file->private_data;
689 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
690 
691 	buf_size = min(count, (sizeof(buf) - 1));
692 	if (buf_size <= 0)
693 		return 0;
694 	if (copy_from_user(buf, user_buf, buf_size))
695 		return -EFAULT;
696 	buf[buf_size] = '\0';
697 	if (!kstrtou32(buf, 10, &threshold))
698 		priv->threshold = threshold;
699 	return buf_size;
700 }
701 
702 /* file operation table for listing major allocs */
703 static const struct file_operations fops_qdf_major_allocs = {
704 	.owner = THIS_MODULE,
705 	.open = qdf_major_allocs_open,
706 	.read = seq_read,
707 	.llseek = seq_lseek,
708 	.release = seq_release,
709 	.write = qdf_major_alloc_set_threshold,
710 };
711 
712 /* debugfs file operation table */
713 static const struct file_operations fops_qdf_mem_debugfs = {
714 	.owner = THIS_MODULE,
715 	.open = qdf_mem_debugfs_open,
716 	.read = seq_read,
717 	.llseek = seq_lseek,
718 	.release = seq_release,
719 };
720 
721 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
722 {
723 	if (is_initial_mem_debug_disabled)
724 		return QDF_STATUS_SUCCESS;
725 
726 	if (!qdf_mem_debugfs_root)
727 		return QDF_STATUS_E_FAILURE;
728 
729 	debugfs_create_file("list",
730 			    S_IRUSR,
731 			    qdf_mem_debugfs_root,
732 			    NULL,
733 			    &fops_qdf_mem_debugfs);
734 
735 	debugfs_create_file("major_mem_allocs",
736 			    0600,
737 			    qdf_mem_debugfs_root,
738 			    &mem_priv,
739 			    &fops_qdf_major_allocs);
740 
741 	debugfs_create_file("major_dma_allocs",
742 			    0600,
743 			    qdf_mem_debugfs_root,
744 			    &dma_priv,
745 			    &fops_qdf_major_allocs);
746 
747 	return QDF_STATUS_SUCCESS;
748 }
749 
750 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
751 {
752 	return QDF_STATUS_SUCCESS;
753 }
754 
755 #else /* MEMORY_DEBUG */
756 
757 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
758 {
759 	return QDF_STATUS_E_NOSUPPORT;
760 }
761 
762 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
763 {
764 	return QDF_STATUS_E_NOSUPPORT;
765 }
766 
767 #endif /* MEMORY_DEBUG */
768 
769 
770 static void qdf_mem_debugfs_exit(void)
771 {
772 	debugfs_remove_recursive(qdf_mem_debugfs_root);
773 	qdf_mem_debugfs_root = NULL;
774 }
775 
776 static QDF_STATUS qdf_mem_debugfs_init(void)
777 {
778 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
779 
780 	if (!qdf_debugfs_root)
781 		return QDF_STATUS_E_FAILURE;
782 
783 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
784 
785 	if (!qdf_mem_debugfs_root)
786 		return QDF_STATUS_E_FAILURE;
787 
788 
789 	debugfs_create_atomic_t("kmalloc",
790 				S_IRUSR,
791 				qdf_mem_debugfs_root,
792 				&qdf_mem_stat.kmalloc);
793 
794 	debugfs_create_atomic_t("dma",
795 				S_IRUSR,
796 				qdf_mem_debugfs_root,
797 				&qdf_mem_stat.dma);
798 
799 	debugfs_create_atomic_t("skb",
800 				S_IRUSR,
801 				qdf_mem_debugfs_root,
802 				&qdf_mem_stat.skb);
803 
804 	return QDF_STATUS_SUCCESS;
805 }
806 
807 #else /* WLAN_DEBUGFS */
808 
809 static QDF_STATUS qdf_mem_debugfs_init(void)
810 {
811 	return QDF_STATUS_E_NOSUPPORT;
812 }
813 static void qdf_mem_debugfs_exit(void) {}
814 
815 
816 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
817 {
818 	return QDF_STATUS_E_NOSUPPORT;
819 }
820 
821 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
822 {
823 	return QDF_STATUS_E_NOSUPPORT;
824 }
825 
826 #endif /* WLAN_DEBUGFS */
827 
828 void qdf_mem_kmalloc_inc(qdf_size_t size)
829 {
830 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
831 }
832 
833 static void qdf_mem_dma_inc(qdf_size_t size)
834 {
835 	qdf_atomic_add(size, &qdf_mem_stat.dma);
836 }
837 
838 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
839 void qdf_mem_skb_inc(qdf_size_t size)
840 {
841 	qdf_atomic_add(size, &qdf_mem_stat.skb);
842 }
843 
844 void qdf_mem_skb_dec(qdf_size_t size)
845 {
846 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
847 }
848 #endif
849 
850 void qdf_mem_kmalloc_dec(qdf_size_t size)
851 {
852 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
853 }
854 
855 static inline void qdf_mem_dma_dec(qdf_size_t size)
856 {
857 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
858 }
859 
860 /**
861  * __qdf_mempool_init() - Create and initialize memory pool
862  *
863  * @osdev: platform device object
864  * @pool_addr: address of the pool created
865  * @elem_cnt: no. of elements in pool
866  * @elem_size: size of each pool element in bytes
867  * @flags: flags
868  *
869  * return: Handle to memory pool or NULL if allocation failed
870  */
871 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
872 		       int elem_cnt, size_t elem_size, u_int32_t flags)
873 {
874 	__qdf_mempool_ctxt_t *new_pool = NULL;
875 	u_int32_t align = L1_CACHE_BYTES;
876 	unsigned long aligned_pool_mem;
877 	int pool_id;
878 	int i;
879 
880 	if (prealloc_disabled) {
881 		/* TBD: We can maintain a list of pools in qdf_device_t
882 		 * to help debugging
883 		 * when pre-allocation is not enabled
884 		 */
885 		new_pool = (__qdf_mempool_ctxt_t *)
886 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
887 		if (!new_pool)
888 			return QDF_STATUS_E_NOMEM;
889 
890 		memset(new_pool, 0, sizeof(*new_pool));
891 		/* TBD: define flags for zeroing buffers etc */
892 		new_pool->flags = flags;
893 		new_pool->elem_size = elem_size;
894 		new_pool->max_elem = elem_cnt;
895 		*pool_addr = new_pool;
896 		return 0;
897 	}
898 
899 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
900 		if (!osdev->mem_pool[pool_id])
901 			break;
902 	}
903 
904 	if (pool_id == MAX_MEM_POOLS)
905 		return -ENOMEM;
906 
907 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
908 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
909 	if (!new_pool)
910 		return -ENOMEM;
911 
912 	memset(new_pool, 0, sizeof(*new_pool));
913 	/* TBD: define flags for zeroing buffers etc */
914 	new_pool->flags = flags;
915 	new_pool->pool_id = pool_id;
916 
917 	/* Round up the element size to cacheline */
918 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
919 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
920 				((align)?(align - 1):0);
921 
922 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
923 	if (!new_pool->pool_mem) {
924 			/* TBD: Check if we need get_free_pages above */
925 		kfree(new_pool);
926 		osdev->mem_pool[pool_id] = NULL;
927 		return -ENOMEM;
928 	}
929 
930 	spin_lock_init(&new_pool->lock);
931 
932 	/* Initialize free list */
933 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
934 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
935 	STAILQ_INIT(&new_pool->free_list);
936 
937 	for (i = 0; i < elem_cnt; i++)
938 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
939 			(mempool_elem_t *)(aligned_pool_mem +
940 			(new_pool->elem_size * i)), mempool_entry);
941 
942 
943 	new_pool->free_cnt = elem_cnt;
944 	*pool_addr = new_pool;
945 	return 0;
946 }
947 qdf_export_symbol(__qdf_mempool_init);
948 
949 /**
950  * __qdf_mempool_destroy() - Destroy memory pool
951  * @osdev: platform device object
952  * @Handle: to memory pool
953  *
954  * Returns: none
955  */
956 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
957 {
958 	int pool_id = 0;
959 
960 	if (!pool)
961 		return;
962 
963 	if (prealloc_disabled) {
964 		kfree(pool);
965 		return;
966 	}
967 
968 	pool_id = pool->pool_id;
969 
970 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
971 	kfree(pool->pool_mem);
972 	kfree(pool);
973 	osdev->mem_pool[pool_id] = NULL;
974 }
975 qdf_export_symbol(__qdf_mempool_destroy);
976 
977 /**
978  * __qdf_mempool_alloc() - Allocate an element memory pool
979  *
980  * @osdev: platform device object
981  * @Handle: to memory pool
982  *
983  * Return: Pointer to the allocated element or NULL if the pool is empty
984  */
985 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
986 {
987 	void *buf = NULL;
988 
989 	if (!pool)
990 		return NULL;
991 
992 	if (prealloc_disabled)
993 		return  qdf_mem_malloc(pool->elem_size);
994 
995 	spin_lock_bh(&pool->lock);
996 
997 	buf = STAILQ_FIRST(&pool->free_list);
998 	if (buf) {
999 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1000 		pool->free_cnt--;
1001 	}
1002 
1003 	/* TBD: Update free count if debug is enabled */
1004 	spin_unlock_bh(&pool->lock);
1005 
1006 	return buf;
1007 }
1008 qdf_export_symbol(__qdf_mempool_alloc);
1009 
1010 /**
1011  * __qdf_mempool_free() - Free a memory pool element
1012  * @osdev: Platform device object
1013  * @pool: Handle to memory pool
1014  * @buf: Element to be freed
1015  *
1016  * Returns: none
1017  */
1018 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1019 {
1020 	if (!pool)
1021 		return;
1022 
1023 
1024 	if (prealloc_disabled)
1025 		return qdf_mem_free(buf);
1026 
1027 	spin_lock_bh(&pool->lock);
1028 	pool->free_cnt++;
1029 
1030 	STAILQ_INSERT_TAIL
1031 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1032 	spin_unlock_bh(&pool->lock);
1033 }
1034 qdf_export_symbol(__qdf_mempool_free);
1035 
1036 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
1037 /**
1038  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1039  * @size: the number of bytes to allocate
1040  *
1041  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1042  * a chunk of pre-allocated memory. If size if less than or equal to
1043  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1044  *
1045  * Return: NULL on failure, non-NULL on success
1046  */
1047 static void *qdf_mem_prealloc_get(size_t size)
1048 {
1049 	void *ptr;
1050 
1051 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1052 		return NULL;
1053 
1054 	ptr = wcnss_prealloc_get(size);
1055 	if (!ptr)
1056 		return NULL;
1057 
1058 	memset(ptr, 0, size);
1059 
1060 	return ptr;
1061 }
1062 
1063 static inline bool qdf_mem_prealloc_put(void *ptr)
1064 {
1065 	return wcnss_prealloc_put(ptr);
1066 }
1067 #else
1068 static inline void *qdf_mem_prealloc_get(size_t size)
1069 {
1070 	return NULL;
1071 }
1072 
1073 static inline bool qdf_mem_prealloc_put(void *ptr)
1074 {
1075 	return false;
1076 }
1077 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1078 
1079 static int qdf_mem_malloc_flags(void)
1080 {
1081 	if (in_interrupt() || irqs_disabled() || in_atomic())
1082 		return GFP_ATOMIC;
1083 
1084 	return GFP_KERNEL;
1085 }
1086 
1087 /* External Function implementation */
1088 #ifdef MEMORY_DEBUG
1089 /**
1090  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1091  *
1092  * Return: value of mem_debug_disabled qdf module argument
1093  */
1094 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1095 bool qdf_mem_debug_config_get(void)
1096 {
1097 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1098 	return false;
1099 }
1100 #else
1101 bool qdf_mem_debug_config_get(void)
1102 {
1103 	return mem_debug_disabled;
1104 }
1105 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1106 
1107 /**
1108  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1109  *
1110  * Return: none
1111  */
1112 static void qdf_mem_debug_init(void)
1113 {
1114 	int i;
1115 
1116 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1117 
1118 	if (is_initial_mem_debug_disabled)
1119 		return;
1120 
1121 	/* Initalizing the list with maximum size of 60000 */
1122 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1123 		qdf_list_create(&qdf_mem_domains[i], 60000);
1124 	qdf_spinlock_create(&qdf_mem_list_lock);
1125 
1126 	/* dma */
1127 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1128 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1129 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1130 }
1131 
1132 static uint32_t
1133 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1134 			       qdf_list_t *mem_list)
1135 {
1136 	if (is_initial_mem_debug_disabled)
1137 		return 0;
1138 
1139 	if (qdf_list_empty(mem_list))
1140 		return 0;
1141 
1142 	qdf_err("Memory leaks detected in %s domain!",
1143 		qdf_debug_domain_name(domain));
1144 	qdf_mem_domain_print(mem_list,
1145 			     qdf_err_printer,
1146 			     NULL,
1147 			     0,
1148 			     qdf_mem_meta_table_print);
1149 
1150 	return mem_list->count;
1151 }
1152 
1153 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1154 {
1155 	uint32_t leak_count = 0;
1156 	int i;
1157 
1158 	if (is_initial_mem_debug_disabled)
1159 		return;
1160 
1161 	/* detect and print leaks */
1162 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1163 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1164 
1165 	if (leak_count)
1166 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1167 				   leak_count);
1168 }
1169 
1170 /**
1171  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1172  *
1173  * Return: none
1174  */
1175 static void qdf_mem_debug_exit(void)
1176 {
1177 	int i;
1178 
1179 	if (is_initial_mem_debug_disabled)
1180 		return;
1181 
1182 	/* mem */
1183 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1184 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1185 		qdf_list_destroy(qdf_mem_list_get(i));
1186 
1187 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1188 
1189 	/* dma */
1190 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1191 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1192 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1193 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1194 }
1195 
1196 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1197 			   void *caller, uint32_t flag)
1198 {
1199 	QDF_STATUS status;
1200 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1201 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1202 	struct qdf_mem_header *header;
1203 	void *ptr;
1204 	unsigned long start, duration;
1205 
1206 	if (is_initial_mem_debug_disabled)
1207 		return __qdf_mem_malloc(size, func, line);
1208 
1209 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1210 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1211 		return NULL;
1212 	}
1213 
1214 	ptr = qdf_mem_prealloc_get(size);
1215 	if (ptr)
1216 		return ptr;
1217 
1218 	if (!flag)
1219 		flag = qdf_mem_malloc_flags();
1220 
1221 	start = qdf_mc_timer_get_system_time();
1222 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1223 	duration = qdf_mc_timer_get_system_time() - start;
1224 
1225 	if (duration > QDF_MEM_WARN_THRESHOLD)
1226 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1227 			 duration, size, func, line);
1228 
1229 	if (!header) {
1230 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1231 		return NULL;
1232 	}
1233 
1234 	qdf_mem_header_init(header, size, func, line, caller);
1235 	qdf_mem_trailer_init(header);
1236 	ptr = qdf_mem_get_ptr(header);
1237 
1238 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1239 	status = qdf_list_insert_front(mem_list, &header->node);
1240 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1241 	if (QDF_IS_STATUS_ERROR(status))
1242 		qdf_err("Failed to insert memory header; status %d", status);
1243 
1244 	qdf_mem_kmalloc_inc(ksize(header));
1245 
1246 	return ptr;
1247 }
1248 qdf_export_symbol(qdf_mem_malloc_debug);
1249 
1250 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1251 {
1252 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1253 	struct qdf_mem_header *header;
1254 	enum qdf_mem_validation_bitmap error_bitmap;
1255 
1256 	if (is_initial_mem_debug_disabled) {
1257 		__qdf_mem_free(ptr);
1258 		return;
1259 	}
1260 
1261 	/* freeing a null pointer is valid */
1262 	if (qdf_unlikely(!ptr))
1263 		return;
1264 
1265 	if (qdf_mem_prealloc_put(ptr))
1266 		return;
1267 
1268 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1269 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1270 				   ptr);
1271 
1272 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1273 
1274 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1275 	header = qdf_mem_get_header(ptr);
1276 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1277 	error_bitmap |= qdf_mem_trailer_validate(header);
1278 
1279 	if (!error_bitmap) {
1280 		header->freed = true;
1281 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1282 				     &header->node);
1283 	}
1284 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1285 
1286 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1287 				    func, line);
1288 
1289 	qdf_mem_kmalloc_dec(ksize(header));
1290 	kfree(header);
1291 }
1292 qdf_export_symbol(qdf_mem_free_debug);
1293 
1294 void qdf_mem_check_for_leaks(void)
1295 {
1296 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1297 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1298 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1299 	uint32_t leaks_count = 0;
1300 
1301 	if (is_initial_mem_debug_disabled)
1302 		return;
1303 
1304 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1305 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1306 
1307 	if (leaks_count)
1308 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1309 				   leaks_count);
1310 }
1311 
1312 /**
1313  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1314  * qdf_mem_multi_pages_alloc
1315  * @osdev: OS device handle pointer
1316  * @pages: Multi page information storage
1317  * @element_size: Each element size
1318  * @element_num: Total number of elements should be allocated
1319  * @memctxt: Memory context
1320  * @cacheable: Coherent memory or cacheable memory
1321  * @func: Caller of this allocator
1322  * @line: Line number of the caller
1323  * @caller: Return address of the caller
1324  *
1325  * This function will allocate large size of memory over multiple pages.
1326  * Large size of contiguous memory allocation will fail frequently, then
1327  * instead of allocate large memory by one shot, allocate through multiple, non
1328  * contiguous memory and combine pages when actual usage
1329  *
1330  * Return: None
1331  */
1332 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1333 				     struct qdf_mem_multi_page_t *pages,
1334 				     size_t element_size, uint16_t element_num,
1335 				     qdf_dma_context_t memctxt, bool cacheable,
1336 				     const char *func, uint32_t line,
1337 				     void *caller)
1338 {
1339 	uint16_t page_idx;
1340 	struct qdf_mem_dma_page_t *dma_pages;
1341 	void **cacheable_pages = NULL;
1342 	uint16_t i;
1343 
1344 	if (!pages->page_size)
1345 		pages->page_size = qdf_page_size;
1346 
1347 	pages->num_element_per_page = pages->page_size / element_size;
1348 	if (!pages->num_element_per_page) {
1349 		qdf_print("Invalid page %d or element size %d",
1350 			  (int)pages->page_size, (int)element_size);
1351 		goto out_fail;
1352 	}
1353 
1354 	pages->num_pages = element_num / pages->num_element_per_page;
1355 	if (element_num % pages->num_element_per_page)
1356 		pages->num_pages++;
1357 
1358 	if (cacheable) {
1359 		/* Pages information storage */
1360 		pages->cacheable_pages = qdf_mem_malloc_debug(
1361 			pages->num_pages * sizeof(pages->cacheable_pages),
1362 			func, line, caller, 0);
1363 		if (!pages->cacheable_pages)
1364 			goto out_fail;
1365 
1366 		cacheable_pages = pages->cacheable_pages;
1367 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1368 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1369 				pages->page_size, func, line, caller, 0);
1370 			if (!cacheable_pages[page_idx])
1371 				goto page_alloc_fail;
1372 		}
1373 		pages->dma_pages = NULL;
1374 	} else {
1375 		pages->dma_pages = qdf_mem_malloc_debug(
1376 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1377 			func, line, caller, 0);
1378 		if (!pages->dma_pages)
1379 			goto out_fail;
1380 
1381 		dma_pages = pages->dma_pages;
1382 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1383 			dma_pages->page_v_addr_start =
1384 				qdf_mem_alloc_consistent_debug(
1385 					osdev, osdev->dev, pages->page_size,
1386 					&dma_pages->page_p_addr,
1387 					func, line, caller);
1388 			if (!dma_pages->page_v_addr_start) {
1389 				qdf_print("dmaable page alloc fail pi %d",
1390 					  page_idx);
1391 				goto page_alloc_fail;
1392 			}
1393 			dma_pages->page_v_addr_end =
1394 				dma_pages->page_v_addr_start + pages->page_size;
1395 			dma_pages++;
1396 		}
1397 		pages->cacheable_pages = NULL;
1398 	}
1399 	return;
1400 
1401 page_alloc_fail:
1402 	if (cacheable) {
1403 		for (i = 0; i < page_idx; i++)
1404 			qdf_mem_free_debug(pages->cacheable_pages[i],
1405 					   func, line);
1406 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1407 	} else {
1408 		dma_pages = pages->dma_pages;
1409 		for (i = 0; i < page_idx; i++) {
1410 			qdf_mem_free_consistent_debug(
1411 				osdev, osdev->dev,
1412 				pages->page_size, dma_pages->page_v_addr_start,
1413 				dma_pages->page_p_addr, memctxt, func, line);
1414 			dma_pages++;
1415 		}
1416 		qdf_mem_free_debug(pages->dma_pages, func, line);
1417 	}
1418 
1419 out_fail:
1420 	pages->cacheable_pages = NULL;
1421 	pages->dma_pages = NULL;
1422 	pages->num_pages = 0;
1423 }
1424 
1425 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1426 
1427 /**
1428  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1429  * @osdev: OS device handle pointer
1430  * @pages: Multi page information storage
1431  * @memctxt: Memory context
1432  * @cacheable: Coherent memory or cacheable memory
1433  * @func: Caller of this allocator
1434  * @line: Line number of the caller
1435  *
1436  * This function will free large size of memory over multiple pages.
1437  *
1438  * Return: None
1439  */
1440 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1441 				    struct qdf_mem_multi_page_t *pages,
1442 				    qdf_dma_context_t memctxt, bool cacheable,
1443 				    const char *func, uint32_t line)
1444 {
1445 	unsigned int page_idx;
1446 	struct qdf_mem_dma_page_t *dma_pages;
1447 
1448 	if (!pages->page_size)
1449 		pages->page_size = qdf_page_size;
1450 
1451 	if (cacheable) {
1452 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1453 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1454 					   func, line);
1455 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1456 	} else {
1457 		dma_pages = pages->dma_pages;
1458 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1459 			qdf_mem_free_consistent_debug(
1460 				osdev, osdev->dev, pages->page_size,
1461 				dma_pages->page_v_addr_start,
1462 				dma_pages->page_p_addr, memctxt, func, line);
1463 			dma_pages++;
1464 		}
1465 		qdf_mem_free_debug(pages->dma_pages, func, line);
1466 	}
1467 
1468 	pages->cacheable_pages = NULL;
1469 	pages->dma_pages = NULL;
1470 	pages->num_pages = 0;
1471 }
1472 
1473 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1474 
1475 #else
1476 static void qdf_mem_debug_init(void) {}
1477 
1478 static void qdf_mem_debug_exit(void) {}
1479 
1480 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1481 {
1482 	void *ptr;
1483 
1484 	ptr = qdf_mem_prealloc_get(size);
1485 	if (ptr)
1486 		return ptr;
1487 
1488 	ptr = kzalloc(size, GFP_ATOMIC);
1489 	if (!ptr) {
1490 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1491 			      size, func, line);
1492 		return NULL;
1493 	}
1494 
1495 	qdf_mem_kmalloc_inc(ksize(ptr));
1496 
1497 	return ptr;
1498 }
1499 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1500 
1501 /**
1502  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1503  * @osdev: OS device handle pointer
1504  * @pages: Multi page information storage
1505  * @element_size: Each element size
1506  * @element_num: Total number of elements should be allocated
1507  * @memctxt: Memory context
1508  * @cacheable: Coherent memory or cacheable memory
1509  *
1510  * This function will allocate large size of memory over multiple pages.
1511  * Large size of contiguous memory allocation will fail frequently, then
1512  * instead of allocate large memory by one shot, allocate through multiple, non
1513  * contiguous memory and combine pages when actual usage
1514  *
1515  * Return: None
1516  */
1517 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1518 			       struct qdf_mem_multi_page_t *pages,
1519 			       size_t element_size, uint16_t element_num,
1520 			       qdf_dma_context_t memctxt, bool cacheable)
1521 {
1522 	uint16_t page_idx;
1523 	struct qdf_mem_dma_page_t *dma_pages;
1524 	void **cacheable_pages = NULL;
1525 	uint16_t i;
1526 
1527 	if (!pages->page_size)
1528 		pages->page_size = qdf_page_size;
1529 
1530 	pages->num_element_per_page = pages->page_size / element_size;
1531 	if (!pages->num_element_per_page) {
1532 		qdf_print("Invalid page %d or element size %d",
1533 			  (int)pages->page_size, (int)element_size);
1534 		goto out_fail;
1535 	}
1536 
1537 	pages->num_pages = element_num / pages->num_element_per_page;
1538 	if (element_num % pages->num_element_per_page)
1539 		pages->num_pages++;
1540 
1541 	if (cacheable) {
1542 		/* Pages information storage */
1543 		pages->cacheable_pages = qdf_mem_malloc(
1544 			pages->num_pages * sizeof(pages->cacheable_pages));
1545 		if (!pages->cacheable_pages)
1546 			goto out_fail;
1547 
1548 		cacheable_pages = pages->cacheable_pages;
1549 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1550 			cacheable_pages[page_idx] =
1551 				qdf_mem_malloc(pages->page_size);
1552 			if (!cacheable_pages[page_idx])
1553 				goto page_alloc_fail;
1554 		}
1555 		pages->dma_pages = NULL;
1556 	} else {
1557 		pages->dma_pages = qdf_mem_malloc(
1558 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1559 		if (!pages->dma_pages)
1560 			goto out_fail;
1561 
1562 		dma_pages = pages->dma_pages;
1563 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1564 			dma_pages->page_v_addr_start =
1565 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1566 					 pages->page_size,
1567 					&dma_pages->page_p_addr);
1568 			if (!dma_pages->page_v_addr_start) {
1569 				qdf_print("dmaable page alloc fail pi %d",
1570 					page_idx);
1571 				goto page_alloc_fail;
1572 			}
1573 			dma_pages->page_v_addr_end =
1574 				dma_pages->page_v_addr_start + pages->page_size;
1575 			dma_pages++;
1576 		}
1577 		pages->cacheable_pages = NULL;
1578 	}
1579 	return;
1580 
1581 page_alloc_fail:
1582 	if (cacheable) {
1583 		for (i = 0; i < page_idx; i++)
1584 			qdf_mem_free(pages->cacheable_pages[i]);
1585 		qdf_mem_free(pages->cacheable_pages);
1586 	} else {
1587 		dma_pages = pages->dma_pages;
1588 		for (i = 0; i < page_idx; i++) {
1589 			qdf_mem_free_consistent(
1590 				osdev, osdev->dev, pages->page_size,
1591 				dma_pages->page_v_addr_start,
1592 				dma_pages->page_p_addr, memctxt);
1593 			dma_pages++;
1594 		}
1595 		qdf_mem_free(pages->dma_pages);
1596 	}
1597 
1598 out_fail:
1599 	pages->cacheable_pages = NULL;
1600 	pages->dma_pages = NULL;
1601 	pages->num_pages = 0;
1602 	return;
1603 }
1604 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1605 
1606 /**
1607  * qdf_mem_multi_pages_free() - free large size of kernel memory
1608  * @osdev: OS device handle pointer
1609  * @pages: Multi page information storage
1610  * @memctxt: Memory context
1611  * @cacheable: Coherent memory or cacheable memory
1612  *
1613  * This function will free large size of memory over multiple pages.
1614  *
1615  * Return: None
1616  */
1617 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1618 			      struct qdf_mem_multi_page_t *pages,
1619 			      qdf_dma_context_t memctxt, bool cacheable)
1620 {
1621 	unsigned int page_idx;
1622 	struct qdf_mem_dma_page_t *dma_pages;
1623 
1624 	if (!pages->page_size)
1625 		pages->page_size = qdf_page_size;
1626 
1627 	if (cacheable) {
1628 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1629 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1630 		qdf_mem_free(pages->cacheable_pages);
1631 	} else {
1632 		dma_pages = pages->dma_pages;
1633 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1634 			qdf_mem_free_consistent(
1635 				osdev, osdev->dev, pages->page_size,
1636 				dma_pages->page_v_addr_start,
1637 				dma_pages->page_p_addr, memctxt);
1638 			dma_pages++;
1639 		}
1640 		qdf_mem_free(pages->dma_pages);
1641 	}
1642 
1643 	pages->cacheable_pages = NULL;
1644 	pages->dma_pages = NULL;
1645 	pages->num_pages = 0;
1646 	return;
1647 }
1648 qdf_export_symbol(qdf_mem_multi_pages_free);
1649 #endif
1650 
1651 void __qdf_mem_free(void *ptr)
1652 {
1653 	if (!ptr)
1654 		return;
1655 
1656 	if (qdf_mem_prealloc_put(ptr))
1657 		return;
1658 
1659 	qdf_mem_kmalloc_dec(ksize(ptr));
1660 
1661 	kfree(ptr);
1662 }
1663 
1664 qdf_export_symbol(__qdf_mem_free);
1665 
1666 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
1667 {
1668 	void *ptr;
1669 
1670 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1671 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1672 			     line);
1673 		return NULL;
1674 	}
1675 
1676 	ptr = qdf_mem_prealloc_get(size);
1677 	if (ptr)
1678 		return ptr;
1679 
1680 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1681 	if (!ptr)
1682 		return NULL;
1683 
1684 	qdf_mem_kmalloc_inc(ksize(ptr));
1685 
1686 	return ptr;
1687 }
1688 
1689 qdf_export_symbol(__qdf_mem_malloc);
1690 
1691 void *qdf_aligned_malloc_fl(uint32_t *size,
1692 			    void **vaddr_unaligned,
1693 				qdf_dma_addr_t *paddr_unaligned,
1694 				qdf_dma_addr_t *paddr_aligned,
1695 				uint32_t align,
1696 			    const char *func, uint32_t line)
1697 {
1698 	void *vaddr_aligned;
1699 	uint32_t align_alloc_size;
1700 
1701 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
1702 			line);
1703 	if (!*vaddr_unaligned) {
1704 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
1705 		return NULL;
1706 	}
1707 
1708 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
1709 
1710 	/* Re-allocate additional bytes to align base address only if
1711 	 * above allocation returns unaligned address. Reason for
1712 	 * trying exact size allocation above is, OS tries to allocate
1713 	 * blocks of size power-of-2 pages and then free extra pages.
1714 	 * e.g., of a ring size of 1MB, the allocation below will
1715 	 * request 1MB plus 7 bytes for alignment, which will cause a
1716 	 * 2MB block allocation,and that is failing sometimes due to
1717 	 * memory fragmentation.
1718 	 */
1719 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1720 		align_alloc_size = *size + align - 1;
1721 
1722 		qdf_mem_free(*vaddr_unaligned);
1723 		*vaddr_unaligned = qdf_mem_malloc_fl(
1724 				(qdf_size_t)align_alloc_size, func, line);
1725 		if (!*vaddr_unaligned) {
1726 			qdf_warn("Failed to alloc %uB @ %s:%d",
1727 				 align_alloc_size, func, line);
1728 			return NULL;
1729 		}
1730 
1731 		*paddr_unaligned = qdf_mem_virt_to_phys(
1732 				*vaddr_unaligned);
1733 		*size = align_alloc_size;
1734 	}
1735 
1736 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
1737 		((unsigned long)(*paddr_unaligned), align);
1738 
1739 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1740 			((unsigned long)(*paddr_aligned) -
1741 			 (unsigned long)(*paddr_unaligned)));
1742 
1743 	return vaddr_aligned;
1744 }
1745 
1746 qdf_export_symbol(qdf_aligned_malloc_fl);
1747 
1748 /**
1749  * qdf_mem_multi_page_link() - Make links for multi page elements
1750  * @osdev: OS device handle pointer
1751  * @pages: Multi page information storage
1752  * @elem_size: Single element size
1753  * @elem_count: elements count should be linked
1754  * @cacheable: Coherent memory or cacheable memory
1755  *
1756  * This function will make links for multi page allocated structure
1757  *
1758  * Return: 0 success
1759  */
1760 int qdf_mem_multi_page_link(qdf_device_t osdev,
1761 		struct qdf_mem_multi_page_t *pages,
1762 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1763 {
1764 	uint16_t i, i_int;
1765 	void *page_info;
1766 	void **c_elem = NULL;
1767 	uint32_t num_link = 0;
1768 
1769 	for (i = 0; i < pages->num_pages; i++) {
1770 		if (cacheable)
1771 			page_info = pages->cacheable_pages[i];
1772 		else
1773 			page_info = pages->dma_pages[i].page_v_addr_start;
1774 
1775 		if (!page_info)
1776 			return -ENOMEM;
1777 
1778 		c_elem = (void **)page_info;
1779 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1780 			if (i_int == (pages->num_element_per_page - 1)) {
1781 				if (cacheable)
1782 					*c_elem = pages->
1783 						cacheable_pages[i + 1];
1784 				else
1785 					*c_elem = pages->
1786 						dma_pages[i + 1].
1787 							page_v_addr_start;
1788 				num_link++;
1789 				break;
1790 			} else {
1791 				*c_elem =
1792 					(void *)(((char *)c_elem) + elem_size);
1793 			}
1794 			num_link++;
1795 			c_elem = (void **)*c_elem;
1796 
1797 			/* Last link established exit */
1798 			if (num_link == (elem_count - 1))
1799 				break;
1800 		}
1801 	}
1802 
1803 	if (c_elem)
1804 		*c_elem = NULL;
1805 
1806 	return 0;
1807 }
1808 qdf_export_symbol(qdf_mem_multi_page_link);
1809 
1810 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1811 {
1812 	/* special case where dst_addr or src_addr can be NULL */
1813 	if (!num_bytes)
1814 		return;
1815 
1816 	QDF_BUG(dst_addr);
1817 	QDF_BUG(src_addr);
1818 	if (!dst_addr || !src_addr)
1819 		return;
1820 
1821 	memcpy(dst_addr, src_addr, num_bytes);
1822 }
1823 qdf_export_symbol(qdf_mem_copy);
1824 
1825 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1826 {
1827 	qdf_shared_mem_t *shared_mem;
1828 	qdf_dma_addr_t dma_addr, paddr;
1829 	int ret;
1830 
1831 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1832 	if (!shared_mem)
1833 		return NULL;
1834 
1835 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1836 				size, qdf_mem_get_dma_addr_ptr(osdev,
1837 						&shared_mem->mem_info));
1838 	if (!shared_mem->vaddr) {
1839 		qdf_err("Unable to allocate DMA memory for shared resource");
1840 		qdf_mem_free(shared_mem);
1841 		return NULL;
1842 	}
1843 
1844 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1845 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1846 
1847 	qdf_mem_zero(shared_mem->vaddr, size);
1848 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1849 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1850 
1851 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1852 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1853 				      shared_mem->vaddr, dma_addr, size);
1854 	if (ret) {
1855 		qdf_err("Unable to get DMA sgtable");
1856 		qdf_mem_free_consistent(osdev, osdev->dev,
1857 					shared_mem->mem_info.size,
1858 					shared_mem->vaddr,
1859 					dma_addr,
1860 					qdf_get_dma_mem_context(shared_mem,
1861 								memctx));
1862 		qdf_mem_free(shared_mem);
1863 		return NULL;
1864 	}
1865 
1866 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1867 
1868 	return shared_mem;
1869 }
1870 
1871 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1872 
1873 /**
1874  * qdf_mem_copy_toio() - copy memory
1875  * @dst_addr: Pointer to destination memory location (to copy to)
1876  * @src_addr: Pointer to source memory location (to copy from)
1877  * @num_bytes: Number of bytes to copy.
1878  *
1879  * Return: none
1880  */
1881 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1882 {
1883 	if (0 == num_bytes) {
1884 		/* special case where dst_addr or src_addr can be NULL */
1885 		return;
1886 	}
1887 
1888 	if ((!dst_addr) || (!src_addr)) {
1889 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1890 			  "%s called with NULL parameter, source:%pK destination:%pK",
1891 			  __func__, src_addr, dst_addr);
1892 		QDF_ASSERT(0);
1893 		return;
1894 	}
1895 	memcpy_toio(dst_addr, src_addr, num_bytes);
1896 }
1897 
1898 qdf_export_symbol(qdf_mem_copy_toio);
1899 
1900 /**
1901  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1902  * @ptr: Pointer to memory that will be set
1903  * @value: Byte set in memory
1904  * @num_bytes: Number of bytes to be set
1905  *
1906  * Return: None
1907  */
1908 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1909 {
1910 	if (!ptr) {
1911 		qdf_print("%s called with NULL parameter ptr", __func__);
1912 		return;
1913 	}
1914 	memset_io(ptr, value, num_bytes);
1915 }
1916 
1917 qdf_export_symbol(qdf_mem_set_io);
1918 
1919 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1920 {
1921 	QDF_BUG(ptr);
1922 	if (!ptr)
1923 		return;
1924 
1925 	memset(ptr, value, num_bytes);
1926 }
1927 qdf_export_symbol(qdf_mem_set);
1928 
1929 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1930 {
1931 	/* special case where dst_addr or src_addr can be NULL */
1932 	if (!num_bytes)
1933 		return;
1934 
1935 	QDF_BUG(dst_addr);
1936 	QDF_BUG(src_addr);
1937 	if (!dst_addr || !src_addr)
1938 		return;
1939 
1940 	memmove(dst_addr, src_addr, num_bytes);
1941 }
1942 qdf_export_symbol(qdf_mem_move);
1943 
1944 int qdf_mem_cmp(const void *left, const void *right, size_t size)
1945 {
1946 	QDF_BUG(left);
1947 	QDF_BUG(right);
1948 
1949 	return memcmp(left, right, size);
1950 }
1951 qdf_export_symbol(qdf_mem_cmp);
1952 
1953 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1954 /**
1955  * qdf_mem_dma_alloc() - allocates memory for dma
1956  * @osdev: OS device handle
1957  * @dev: Pointer to device handle
1958  * @size: Size to be allocated
1959  * @phy_addr: Physical address
1960  *
1961  * Return: pointer of allocated memory or null if memory alloc fails
1962  */
1963 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1964 				      qdf_size_t size,
1965 				      qdf_dma_addr_t *phy_addr)
1966 {
1967 	void *vaddr;
1968 
1969 	vaddr = qdf_mem_malloc(size);
1970 	*phy_addr = ((uintptr_t) vaddr);
1971 	/* using this type conversion to suppress "cast from pointer to integer
1972 	 * of different size" warning on some platforms
1973 	 */
1974 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1975 	return vaddr;
1976 }
1977 
1978 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
1979 	!defined(QCA_WIFI_QCN9000)
1980 
1981 #define QCA8074_RAM_BASE 0x50000000
1982 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1983 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1984 			qdf_dma_addr_t *phy_addr)
1985 {
1986 	void *vaddr = NULL;
1987 	int i;
1988 
1989 	*phy_addr = 0;
1990 
1991 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1992 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1993 					   qdf_mem_malloc_flags());
1994 
1995 		if (!vaddr) {
1996 			qdf_err("%s failed , size: %zu!", __func__, size);
1997 			return NULL;
1998 		}
1999 
2000 		if (*phy_addr >= QCA8074_RAM_BASE)
2001 			return vaddr;
2002 
2003 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2004 	}
2005 
2006 	return NULL;
2007 }
2008 
2009 #else
2010 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2011 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2012 {
2013 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2014 }
2015 #endif
2016 
2017 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2018 static inline void
2019 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2020 {
2021 	qdf_mem_free(vaddr);
2022 }
2023 #else
2024 
2025 static inline void
2026 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2027 {
2028 	dma_free_coherent(dev, size, vaddr, paddr);
2029 }
2030 #endif
2031 
2032 #ifdef MEMORY_DEBUG
2033 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2034 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2035 				     const char *func, uint32_t line,
2036 				     void *caller)
2037 {
2038 	QDF_STATUS status;
2039 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2040 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2041 	struct qdf_mem_header *header;
2042 	void *vaddr;
2043 
2044 	if (is_initial_mem_debug_disabled)
2045 		return __qdf_mem_alloc_consistent(osdev, dev,
2046 						  size, paddr,
2047 						  func, line);
2048 
2049 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2050 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2051 		return NULL;
2052 	}
2053 
2054 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2055 				   paddr);
2056 
2057 	if (!vaddr) {
2058 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2059 		return NULL;
2060 	}
2061 
2062 	header = qdf_mem_dma_get_header(vaddr, size);
2063 	/* For DMA buffers we only add trailers, this function will init
2064 	 * the header structure at the tail
2065 	 * Prefix the header into DMA buffer causes SMMU faults, so
2066 	 * do not prefix header into the DMA buffers
2067 	 */
2068 	qdf_mem_header_init(header, size, func, line, caller);
2069 
2070 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2071 	status = qdf_list_insert_front(mem_list, &header->node);
2072 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2073 	if (QDF_IS_STATUS_ERROR(status))
2074 		qdf_err("Failed to insert memory header; status %d", status);
2075 
2076 	qdf_mem_dma_inc(size);
2077 
2078 	return vaddr;
2079 }
2080 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2081 
2082 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2083 				   qdf_size_t size, void *vaddr,
2084 				   qdf_dma_addr_t paddr,
2085 				   qdf_dma_context_t memctx,
2086 				   const char *func, uint32_t line)
2087 {
2088 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2089 	struct qdf_mem_header *header;
2090 	enum qdf_mem_validation_bitmap error_bitmap;
2091 
2092 	if (is_initial_mem_debug_disabled) {
2093 		__qdf_mem_free_consistent(
2094 					  osdev, dev,
2095 					  size, vaddr,
2096 					  paddr, memctx);
2097 		return;
2098 	}
2099 
2100 	/* freeing a null pointer is valid */
2101 	if (qdf_unlikely(!vaddr))
2102 		return;
2103 
2104 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2105 
2106 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2107 	/* For DMA buffers we only add trailers, this function will retrieve
2108 	 * the header structure at the tail
2109 	 * Prefix the header into DMA buffer causes SMMU faults, so
2110 	 * do not prefix header into the DMA buffers
2111 	 */
2112 	header = qdf_mem_dma_get_header(vaddr, size);
2113 	error_bitmap = qdf_mem_header_validate(header, domain);
2114 	if (!error_bitmap) {
2115 		header->freed = true;
2116 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2117 				     &header->node);
2118 	}
2119 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2120 
2121 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2122 
2123 	qdf_mem_dma_dec(header->size);
2124 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2125 }
2126 qdf_export_symbol(qdf_mem_free_consistent_debug);
2127 #endif /* MEMORY_DEBUG */
2128 
2129 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2130 			       qdf_size_t size, void *vaddr,
2131 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2132 {
2133 	qdf_mem_dma_dec(size);
2134 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2135 }
2136 
2137 qdf_export_symbol(__qdf_mem_free_consistent);
2138 
2139 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2140 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2141 				 const char *func, uint32_t line)
2142 {
2143 	void *vaddr;
2144 
2145 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2146 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2147 			     size, func, line);
2148 		return NULL;
2149 	}
2150 
2151 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2152 
2153 	if (vaddr)
2154 		qdf_mem_dma_inc(size);
2155 
2156 	return vaddr;
2157 }
2158 
2159 qdf_export_symbol(__qdf_mem_alloc_consistent);
2160 
2161 void *qdf_aligned_mem_alloc_consistent_fl(
2162 	qdf_device_t osdev, uint32_t *size,
2163 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2164 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2165 	const char *func, uint32_t line)
2166 {
2167 	void *vaddr_aligned;
2168 	uint32_t align_alloc_size;
2169 
2170 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2171 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2172 	if (!*vaddr_unaligned) {
2173 		qdf_warn("Failed to alloc %uB @ %s:%d",
2174 			 *size, func, line);
2175 		return NULL;
2176 	}
2177 
2178 	/* Re-allocate additional bytes to align base address only if
2179 	 * above allocation returns unaligned address. Reason for
2180 	 * trying exact size allocation above is, OS tries to allocate
2181 	 * blocks of size power-of-2 pages and then free extra pages.
2182 	 * e.g., of a ring size of 1MB, the allocation below will
2183 	 * request 1MB plus 7 bytes for alignment, which will cause a
2184 	 * 2MB block allocation,and that is failing sometimes due to
2185 	 * memory fragmentation.
2186 	 */
2187 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2188 		align_alloc_size = *size + align - 1;
2189 
2190 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2191 					*vaddr_unaligned,
2192 					*paddr_unaligned, 0);
2193 
2194 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2195 				osdev, osdev->dev, align_alloc_size,
2196 				paddr_unaligned);
2197 		if (!*vaddr_unaligned) {
2198 			qdf_warn("Failed to alloc %uB @ %s:%d",
2199 				 align_alloc_size, func, line);
2200 			return NULL;
2201 		}
2202 
2203 		*size = align_alloc_size;
2204 	}
2205 
2206 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2207 			(unsigned long)(*paddr_unaligned), align);
2208 
2209 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2210 				 ((unsigned long)(*paddr_aligned) -
2211 				  (unsigned long)(*paddr_unaligned)));
2212 
2213 	return vaddr_aligned;
2214 }
2215 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2216 
2217 /**
2218  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2219  * @osdev: OS device handle
2220  * @bus_addr: dma address to give to the device
2221  * @size: Size of the memory block
2222  * @direction: direction data will be DMAed
2223  *
2224  * Assign memory to the remote device.
2225  * The cache lines are flushed to ram or invalidated as needed.
2226  *
2227  * Return: none
2228  */
2229 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2230 					qdf_dma_addr_t bus_addr,
2231 					qdf_size_t size,
2232 					enum dma_data_direction direction)
2233 {
2234 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2235 }
2236 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2237 
2238 /**
2239  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2240  * @osdev: OS device handle
2241  * @bus_addr: dma address to give to the cpu
2242  * @size: Size of the memory block
2243  * @direction: direction data will be DMAed
2244  *
2245  * Assign memory to the CPU.
2246  *
2247  * Return: none
2248  */
2249 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2250 				     qdf_dma_addr_t bus_addr,
2251 				     qdf_size_t size,
2252 				     enum dma_data_direction direction)
2253 {
2254 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2255 }
2256 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2257 
2258 void qdf_mem_init(void)
2259 {
2260 	qdf_mem_debug_init();
2261 	qdf_net_buf_debug_init();
2262 	qdf_mem_debugfs_init();
2263 	qdf_mem_debug_debugfs_init();
2264 }
2265 qdf_export_symbol(qdf_mem_init);
2266 
2267 void qdf_mem_exit(void)
2268 {
2269 	qdf_mem_debug_debugfs_exit();
2270 	qdf_mem_debugfs_exit();
2271 	qdf_net_buf_debug_exit();
2272 	qdf_mem_debug_exit();
2273 }
2274 qdf_export_symbol(qdf_mem_exit);
2275 
2276 /**
2277  * qdf_ether_addr_copy() - copy an Ethernet address
2278  *
2279  * @dst_addr: A six-byte array Ethernet address destination
2280  * @src_addr: A six-byte array Ethernet address source
2281  *
2282  * Please note: dst & src must both be aligned to u16.
2283  *
2284  * Return: none
2285  */
2286 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2287 {
2288 	if ((!dst_addr) || (!src_addr)) {
2289 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2290 			  "%s called with NULL parameter, source:%pK destination:%pK",
2291 			  __func__, src_addr, dst_addr);
2292 		QDF_ASSERT(0);
2293 		return;
2294 	}
2295 	ether_addr_copy(dst_addr, src_addr);
2296 }
2297 qdf_export_symbol(qdf_ether_addr_copy);
2298 
2299 int32_t qdf_dma_mem_stats_read(void)
2300 {
2301 	return qdf_atomic_read(&qdf_mem_stat.dma);
2302 }
2303 
2304 qdf_export_symbol(qdf_dma_mem_stats_read);
2305 
2306 int32_t qdf_heap_mem_stats_read(void)
2307 {
2308 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2309 }
2310 
2311 qdf_export_symbol(qdf_heap_mem_stats_read);
2312 
2313 int32_t qdf_skb_mem_stats_read(void)
2314 {
2315 	return qdf_atomic_read(&qdf_mem_stat.skb);
2316 }
2317 
2318 qdf_export_symbol(qdf_skb_mem_stats_read);
2319 
2320