xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision fed4bfb04901bc53e8f21d8cfd8d4a151e546d11)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 
38 #if defined(CONFIG_CNSS)
39 #include <net/cnss.h>
40 #endif
41 
42 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
43 #include <net/cnss_prealloc.h>
44 #endif
45 
46 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
47 static bool mem_debug_disabled;
48 qdf_declare_param(mem_debug_disabled, bool);
49 qdf_export_symbol(mem_debug_disabled);
50 static bool is_initial_mem_debug_disabled;
51 #endif
52 
53 /* Preprocessor Definitions and Constants */
54 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
55 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
56 #define QDF_DEBUG_STRING_SIZE 512
57 
58 #ifdef MEMORY_DEBUG
59 #include "qdf_debug_domain.h"
60 #include <qdf_list.h>
61 
62 enum list_type {
63 	LIST_TYPE_MEM = 0,
64 	LIST_TYPE_DMA = 1,
65 	LIST_TYPE_MAX,
66 };
67 
68 /**
69  * major_alloc_priv: private data registered to debugfs entry created to list
70  *                   the list major allocations
71  * @type:            type of the list to be parsed
72  * @threshold:       configured by user by overwriting the respective debugfs
73  *                   sys entry. This is to list the functions which requested
74  *                   memory/dma allocations more than threshold nubmer of times.
75  */
76 struct major_alloc_priv {
77 	enum list_type type;
78 	uint32_t threshold;
79 };
80 
81 static struct major_alloc_priv mem_priv = {
82 	/* List type set to mem */
83 	LIST_TYPE_MEM,
84 	/* initial threshold to list APIs which allocates mem >= 50 times */
85 	50
86 };
87 
88 static struct major_alloc_priv dma_priv = {
89 	/* List type set to DMA */
90 	LIST_TYPE_DMA,
91 	/* initial threshold to list APIs which allocates dma >= 50 times */
92 	50
93 };
94 
95 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
96 static qdf_spinlock_t qdf_mem_list_lock;
97 
98 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
99 static qdf_spinlock_t qdf_mem_dma_list_lock;
100 
101 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
102 {
103 	return &qdf_mem_domains[domain];
104 }
105 
106 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
107 {
108 	return &qdf_mem_dma_domains[domain];
109 }
110 
111 /**
112  * struct qdf_mem_header - memory object to dubug
113  * @node: node to the list
114  * @domain: the active memory domain at time of allocation
115  * @freed: flag set during free, used to detect double frees
116  *	Use uint8_t so we can detect corruption
117  * @func: name of the function the allocation was made from
118  * @line: line number of the file the allocation was made from
119  * @size: size of the allocation in bytes
120  * @caller: Caller of the function for which memory is allocated
121  * @header: a known value, used to detect out-of-bounds access
122  * @time: timestamp at which allocation was made
123  */
124 struct qdf_mem_header {
125 	qdf_list_node_t node;
126 	enum qdf_debug_domain domain;
127 	uint8_t freed;
128 	char func[QDF_MEM_FUNC_NAME_SIZE];
129 	uint32_t line;
130 	uint32_t size;
131 	void *caller;
132 	uint64_t header;
133 	uint64_t time;
134 };
135 
136 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
137 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
138 
139 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
140 {
141 	return (struct qdf_mem_header *)ptr - 1;
142 }
143 
144 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
145 							    qdf_size_t size)
146 {
147 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
148 }
149 
150 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
151 {
152 	return (uint64_t *)((void *)(header + 1) + header->size);
153 }
154 
155 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
156 {
157 	return (void *)(header + 1);
158 }
159 
160 /* number of bytes needed for the qdf memory debug information */
161 #define QDF_MEM_DEBUG_SIZE \
162 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
163 
164 /* number of bytes needed for the qdf dma memory debug information */
165 #define QDF_DMA_MEM_DEBUG_SIZE \
166 	(sizeof(struct qdf_mem_header))
167 
168 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
169 {
170 	QDF_BUG(header);
171 	if (!header)
172 		return;
173 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
174 }
175 
176 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
177 				const char *func, uint32_t line, void *caller)
178 {
179 	QDF_BUG(header);
180 	if (!header)
181 		return;
182 
183 	header->domain = qdf_debug_domain_get();
184 	header->freed = false;
185 
186 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
187 
188 	header->line = line;
189 	header->size = size;
190 	header->caller = caller;
191 	header->header = WLAN_MEM_HEADER;
192 	header->time = qdf_get_log_timestamp();
193 }
194 
195 enum qdf_mem_validation_bitmap {
196 	QDF_MEM_BAD_HEADER = 1 << 0,
197 	QDF_MEM_BAD_TRAILER = 1 << 1,
198 	QDF_MEM_BAD_SIZE = 1 << 2,
199 	QDF_MEM_DOUBLE_FREE = 1 << 3,
200 	QDF_MEM_BAD_FREED = 1 << 4,
201 	QDF_MEM_BAD_NODE = 1 << 5,
202 	QDF_MEM_BAD_DOMAIN = 1 << 6,
203 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
204 };
205 
206 static enum qdf_mem_validation_bitmap
207 qdf_mem_trailer_validate(struct qdf_mem_header *header)
208 {
209 	enum qdf_mem_validation_bitmap error_bitmap = 0;
210 
211 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
212 		error_bitmap |= QDF_MEM_BAD_TRAILER;
213 	return error_bitmap;
214 }
215 
216 static enum qdf_mem_validation_bitmap
217 qdf_mem_header_validate(struct qdf_mem_header *header,
218 			enum qdf_debug_domain domain)
219 {
220 	enum qdf_mem_validation_bitmap error_bitmap = 0;
221 
222 	if (header->header != WLAN_MEM_HEADER)
223 		error_bitmap |= QDF_MEM_BAD_HEADER;
224 
225 	if (header->size > QDF_MEM_MAX_MALLOC)
226 		error_bitmap |= QDF_MEM_BAD_SIZE;
227 
228 	if (header->freed == true)
229 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
230 	else if (header->freed)
231 		error_bitmap |= QDF_MEM_BAD_FREED;
232 
233 	if (!qdf_list_node_in_any_list(&header->node))
234 		error_bitmap |= QDF_MEM_BAD_NODE;
235 
236 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
237 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
238 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
239 	else if (header->domain != domain)
240 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
241 
242 	return error_bitmap;
243 }
244 
245 static void
246 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
247 			    enum qdf_debug_domain current_domain,
248 			    enum qdf_mem_validation_bitmap error_bitmap,
249 			    const char *func,
250 			    uint32_t line)
251 {
252 	if (!error_bitmap)
253 		return;
254 
255 	if (error_bitmap & QDF_MEM_BAD_HEADER)
256 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
257 			header->header, WLAN_MEM_HEADER);
258 
259 	if (error_bitmap & QDF_MEM_BAD_SIZE)
260 		qdf_err("Corrupted memory size %u (expected < %d)",
261 			header->size, QDF_MEM_MAX_MALLOC);
262 
263 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
264 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
265 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
266 
267 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
268 		qdf_err("Memory has previously been freed");
269 
270 	if (error_bitmap & QDF_MEM_BAD_FREED)
271 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
272 
273 	if (error_bitmap & QDF_MEM_BAD_NODE)
274 		qdf_err("Corrupted memory header node or double free");
275 
276 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
277 		qdf_err("Corrupted memory domain 0x%x", header->domain);
278 
279 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
280 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
281 			qdf_debug_domain_name(header->domain), header->domain,
282 			qdf_debug_domain_name(current_domain), current_domain);
283 
284 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
285 }
286 #endif /* MEMORY_DEBUG */
287 
288 u_int8_t prealloc_disabled = 1;
289 qdf_declare_param(prealloc_disabled, byte);
290 qdf_export_symbol(prealloc_disabled);
291 
292 #if defined WLAN_DEBUGFS
293 
294 /* Debugfs root directory for qdf_mem */
295 static struct dentry *qdf_mem_debugfs_root;
296 
297 /**
298  * struct __qdf_mem_stat - qdf memory statistics
299  * @kmalloc:	total kmalloc allocations
300  * @dma:	total dma allocations
301  * @skb:	total skb allocations
302  */
303 static struct __qdf_mem_stat {
304 	qdf_atomic_t kmalloc;
305 	qdf_atomic_t dma;
306 	qdf_atomic_t skb;
307 } qdf_mem_stat;
308 
309 void qdf_mem_kmalloc_inc(qdf_size_t size)
310 {
311 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
312 }
313 
314 static void qdf_mem_dma_inc(qdf_size_t size)
315 {
316 	qdf_atomic_add(size, &qdf_mem_stat.dma);
317 }
318 
319 void qdf_mem_skb_inc(qdf_size_t size)
320 {
321 	qdf_atomic_add(size, &qdf_mem_stat.skb);
322 }
323 
324 void qdf_mem_kmalloc_dec(qdf_size_t size)
325 {
326 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
327 }
328 
329 static inline void qdf_mem_dma_dec(qdf_size_t size)
330 {
331 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
332 }
333 
334 void qdf_mem_skb_dec(qdf_size_t size)
335 {
336 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
337 }
338 
339 #ifdef MEMORY_DEBUG
340 static int qdf_err_printer(void *priv, const char *fmt, ...)
341 {
342 	va_list args;
343 
344 	va_start(args, fmt);
345 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
346 	va_end(args);
347 
348 	return 0;
349 }
350 
351 static int seq_printf_printer(void *priv, const char *fmt, ...)
352 {
353 	struct seq_file *file = priv;
354 	va_list args;
355 
356 	va_start(args, fmt);
357 	seq_vprintf(file, fmt, args);
358 	seq_puts(file, "\n");
359 	va_end(args);
360 
361 	return 0;
362 }
363 
364 /**
365  * struct __qdf_mem_info - memory statistics
366  * @func: the function which allocated memory
367  * @line: the line at which allocation happened
368  * @size: the size of allocation
369  * @caller: Address of the caller function
370  * @count: how many allocations of same type
371  * @time: timestamp at which allocation happened
372  */
373 struct __qdf_mem_info {
374 	char func[QDF_MEM_FUNC_NAME_SIZE];
375 	uint32_t line;
376 	uint32_t size;
377 	void *caller;
378 	uint32_t count;
379 	uint64_t time;
380 };
381 
382 /*
383  * The table depth defines the de-duplication proximity scope.
384  * A deeper table takes more time, so choose any optimum value.
385  */
386 #define QDF_MEM_STAT_TABLE_SIZE 8
387 
388 /**
389  * qdf_mem_debug_print_header() - memory debug header print logic
390  * @print: the print adapter function
391  * @print_priv: the private data to be consumed by @print
392  * @threshold: the threshold value set by user to list top allocations
393  *
394  * Return: None
395  */
396 static void qdf_mem_debug_print_header(qdf_abstract_print print,
397 				       void *print_priv,
398 				       uint32_t threshold)
399 {
400 	if (threshold)
401 		print(print_priv, "APIs requested allocations >= %u no of time",
402 		      threshold);
403 	print(print_priv,
404 	      "--------------------------------------------------------------");
405 	print(print_priv,
406 	      " count    size     total    filename     caller    timestamp");
407 	print(print_priv,
408 	      "--------------------------------------------------------------");
409 }
410 
411 /**
412  * qdf_mem_meta_table_print() - memory metadata table print logic
413  * @table: the memory metadata table to print
414  * @print: the print adapter function
415  * @print_priv: the private data to be consumed by @print
416  * @threshold: the threshold value set by user to list top allocations
417  *
418  * Return: None
419  */
420 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
421 				     qdf_abstract_print print,
422 				     void *print_priv,
423 				     uint32_t threshold)
424 {
425 	int i;
426 	char debug_str[QDF_DEBUG_STRING_SIZE];
427 	size_t len = 0;
428 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
429 
430 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
431 			     "%s", debug_prefix);
432 
433 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
434 		if (!table[i].count)
435 			break;
436 
437 		print(print_priv,
438 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
439 		      table[i].count,
440 		      table[i].size,
441 		      table[i].count * table[i].size,
442 		      table[i].func,
443 		      table[i].line, table[i].caller,
444 		      table[i].time);
445 		len += qdf_scnprintf(debug_str + len,
446 				     sizeof(debug_str) - len,
447 				     " @ %s:%u %pS",
448 				     table[i].func,
449 				     table[i].line,
450 				     table[i].caller);
451 	}
452 	print(print_priv, "%s", debug_str);
453 }
454 
455 /**
456  * qdf_print_major_alloc() - memory metadata table print logic
457  * @table: the memory metadata table to print
458  * @print: the print adapter function
459  * @print_priv: the private data to be consumed by @print
460  * @threshold: the threshold value set by uset to list top allocations
461  *
462  * Return: None
463  */
464 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
465 				  qdf_abstract_print print,
466 				  void *print_priv,
467 				  uint32_t threshold)
468 {
469 	int i;
470 
471 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 		if (!table[i].count)
473 			break;
474 		if (table[i].count >= threshold)
475 			print(print_priv,
476 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
477 			      table[i].count,
478 			      table[i].size,
479 			      table[i].count * table[i].size,
480 			      table[i].func,
481 			      table[i].line, table[i].caller,
482 			      table[i].time);
483 	}
484 }
485 
486 /**
487  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
488  * @table: the memory metadata table to insert into
489  * @meta: the memory metadata to insert
490  *
491  * Return: true if the table is full after inserting, false otherwise
492  */
493 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
494 				      struct qdf_mem_header *meta)
495 {
496 	int i;
497 
498 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
499 		if (!table[i].count) {
500 			qdf_str_lcopy(table[i].func, meta->func,
501 				      QDF_MEM_FUNC_NAME_SIZE);
502 			table[i].line = meta->line;
503 			table[i].size = meta->size;
504 			table[i].count = 1;
505 			table[i].caller = meta->caller;
506 			table[i].time = meta->time;
507 			break;
508 		}
509 
510 		if (qdf_str_eq(table[i].func, meta->func) &&
511 		    table[i].line == meta->line &&
512 		    table[i].size == meta->size &&
513 		    table[i].caller == meta->caller) {
514 			table[i].count++;
515 			break;
516 		}
517 	}
518 
519 	/* return true if the table is now full */
520 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
521 }
522 
523 /**
524  * qdf_mem_domain_print() - output agnostic memory domain print logic
525  * @domain: the memory domain to print
526  * @print: the print adapter function
527  * @print_priv: the private data to be consumed by @print
528  * @threshold: the threshold value set by uset to list top allocations
529  * @mem_print: pointer to function which prints the memory allocation data
530  *
531  * Return: None
532  */
533 static void qdf_mem_domain_print(qdf_list_t *domain,
534 				 qdf_abstract_print print,
535 				 void *print_priv,
536 				 uint32_t threshold,
537 				 void (*mem_print)(struct __qdf_mem_info *,
538 						   qdf_abstract_print,
539 						   void *, uint32_t))
540 {
541 	QDF_STATUS status;
542 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
543 	qdf_list_node_t *node;
544 
545 	qdf_mem_zero(table, sizeof(table));
546 	qdf_mem_debug_print_header(print, print_priv, threshold);
547 
548 	/* hold lock while inserting to avoid use-after free of the metadata */
549 	qdf_spin_lock(&qdf_mem_list_lock);
550 	status = qdf_list_peek_front(domain, &node);
551 	while (QDF_IS_STATUS_SUCCESS(status)) {
552 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
553 		bool is_full = qdf_mem_meta_table_insert(table, meta);
554 
555 		qdf_spin_unlock(&qdf_mem_list_lock);
556 
557 		if (is_full) {
558 			(*mem_print)(table, print, print_priv, threshold);
559 			qdf_mem_zero(table, sizeof(table));
560 		}
561 
562 		qdf_spin_lock(&qdf_mem_list_lock);
563 		status = qdf_list_peek_next(domain, node, &node);
564 	}
565 	qdf_spin_unlock(&qdf_mem_list_lock);
566 
567 	(*mem_print)(table, print, print_priv, threshold);
568 }
569 
570 /**
571  * qdf_mem_seq_start() - sequential callback to start
572  * @seq: seq_file handle
573  * @pos: The start position of the sequence
574  *
575  * Return: iterator pointer, or NULL if iteration is complete
576  */
577 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
578 {
579 	enum qdf_debug_domain domain = *pos;
580 
581 	if (!qdf_debug_domain_valid(domain))
582 		return NULL;
583 
584 	/* just use the current position as our iterator */
585 	return pos;
586 }
587 
588 /**
589  * qdf_mem_seq_next() - next sequential callback
590  * @seq: seq_file handle
591  * @v: the current iterator
592  * @pos: the current position
593  *
594  * Get the next node and release previous node.
595  *
596  * Return: iterator pointer, or NULL if iteration is complete
597  */
598 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
599 {
600 	++*pos;
601 
602 	return qdf_mem_seq_start(seq, pos);
603 }
604 
605 /**
606  * qdf_mem_seq_stop() - stop sequential callback
607  * @seq: seq_file handle
608  * @v: current iterator
609  *
610  * Return: None
611  */
612 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
613 
614 /**
615  * qdf_mem_seq_show() - print sequential callback
616  * @seq: seq_file handle
617  * @v: current iterator
618  *
619  * Return: 0 - success
620  */
621 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
622 {
623 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
624 
625 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
626 		   qdf_debug_domain_name(domain_id), domain_id);
627 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
628 			     seq_printf_printer,
629 			     seq,
630 			     0,
631 			     qdf_mem_meta_table_print);
632 
633 	return 0;
634 }
635 
636 /* sequential file operation table */
637 static const struct seq_operations qdf_mem_seq_ops = {
638 	.start = qdf_mem_seq_start,
639 	.next  = qdf_mem_seq_next,
640 	.stop  = qdf_mem_seq_stop,
641 	.show  = qdf_mem_seq_show,
642 };
643 
644 
645 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
646 {
647 	return seq_open(file, &qdf_mem_seq_ops);
648 }
649 
650 /**
651  * qdf_major_alloc_show() - print sequential callback
652  * @seq: seq_file handle
653  * @v: current iterator
654  *
655  * Return: 0 - success
656  */
657 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
658 {
659 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
660 	struct major_alloc_priv *priv;
661 	qdf_list_t *list;
662 
663 	priv = (struct major_alloc_priv *)seq->private;
664 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
665 		   qdf_debug_domain_name(domain_id), domain_id);
666 
667 	switch (priv->type) {
668 	case LIST_TYPE_MEM:
669 		list = qdf_mem_list_get(domain_id);
670 		break;
671 	case LIST_TYPE_DMA:
672 		list = qdf_mem_dma_list(domain_id);
673 		break;
674 	default:
675 		list = NULL;
676 		break;
677 	}
678 
679 	if (list)
680 		qdf_mem_domain_print(list,
681 				     seq_printf_printer,
682 				     seq,
683 				     priv->threshold,
684 				     qdf_print_major_alloc);
685 
686 	return 0;
687 }
688 
689 /* sequential file operation table created to track major allocs */
690 static const struct seq_operations qdf_major_allocs_seq_ops = {
691 	.start = qdf_mem_seq_start,
692 	.next = qdf_mem_seq_next,
693 	.stop = qdf_mem_seq_stop,
694 	.show = qdf_major_alloc_show,
695 };
696 
697 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
698 {
699 	void *private = inode->i_private;
700 	struct seq_file *seq;
701 	int rc;
702 
703 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
704 	if (rc == 0) {
705 		seq = file->private_data;
706 		seq->private = private;
707 	}
708 	return rc;
709 }
710 
711 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
712 					     const char __user *user_buf,
713 					     size_t count,
714 					     loff_t *pos)
715 {
716 	char buf[32];
717 	ssize_t buf_size;
718 	uint32_t threshold;
719 	struct seq_file *seq = file->private_data;
720 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
721 
722 	buf_size = min(count, (sizeof(buf) - 1));
723 	if (buf_size <= 0)
724 		return 0;
725 	if (copy_from_user(buf, user_buf, buf_size))
726 		return -EFAULT;
727 	buf[buf_size] = '\0';
728 	if (!kstrtou32(buf, 10, &threshold))
729 		priv->threshold = threshold;
730 	return buf_size;
731 }
732 
733 /* file operation table for listing major allocs */
734 static const struct file_operations fops_qdf_major_allocs = {
735 	.owner = THIS_MODULE,
736 	.open = qdf_major_allocs_open,
737 	.read = seq_read,
738 	.llseek = seq_lseek,
739 	.release = seq_release,
740 	.write = qdf_major_alloc_set_threshold,
741 };
742 
743 /* debugfs file operation table */
744 static const struct file_operations fops_qdf_mem_debugfs = {
745 	.owner = THIS_MODULE,
746 	.open = qdf_mem_debugfs_open,
747 	.read = seq_read,
748 	.llseek = seq_lseek,
749 	.release = seq_release,
750 };
751 
752 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
753 {
754 	if (is_initial_mem_debug_disabled)
755 		return QDF_STATUS_SUCCESS;
756 
757 	if (!qdf_mem_debugfs_root)
758 		return QDF_STATUS_E_FAILURE;
759 
760 	debugfs_create_file("list",
761 			    S_IRUSR,
762 			    qdf_mem_debugfs_root,
763 			    NULL,
764 			    &fops_qdf_mem_debugfs);
765 
766 	debugfs_create_file("major_mem_allocs",
767 			    0600,
768 			    qdf_mem_debugfs_root,
769 			    &mem_priv,
770 			    &fops_qdf_major_allocs);
771 
772 	debugfs_create_file("major_dma_allocs",
773 			    0600,
774 			    qdf_mem_debugfs_root,
775 			    &dma_priv,
776 			    &fops_qdf_major_allocs);
777 
778 	return QDF_STATUS_SUCCESS;
779 }
780 
781 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
782 {
783 	return QDF_STATUS_SUCCESS;
784 }
785 
786 #else /* MEMORY_DEBUG */
787 
788 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
789 {
790 	return QDF_STATUS_E_NOSUPPORT;
791 }
792 
793 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
794 {
795 	return QDF_STATUS_E_NOSUPPORT;
796 }
797 
798 #endif /* MEMORY_DEBUG */
799 
800 
801 static void qdf_mem_debugfs_exit(void)
802 {
803 	debugfs_remove_recursive(qdf_mem_debugfs_root);
804 	qdf_mem_debugfs_root = NULL;
805 }
806 
807 static QDF_STATUS qdf_mem_debugfs_init(void)
808 {
809 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
810 
811 	if (!qdf_debugfs_root)
812 		return QDF_STATUS_E_FAILURE;
813 
814 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
815 
816 	if (!qdf_mem_debugfs_root)
817 		return QDF_STATUS_E_FAILURE;
818 
819 
820 	debugfs_create_atomic_t("kmalloc",
821 				S_IRUSR,
822 				qdf_mem_debugfs_root,
823 				&qdf_mem_stat.kmalloc);
824 
825 	debugfs_create_atomic_t("dma",
826 				S_IRUSR,
827 				qdf_mem_debugfs_root,
828 				&qdf_mem_stat.dma);
829 
830 	debugfs_create_atomic_t("skb",
831 				S_IRUSR,
832 				qdf_mem_debugfs_root,
833 				&qdf_mem_stat.skb);
834 
835 	return QDF_STATUS_SUCCESS;
836 }
837 
838 #else /* WLAN_DEBUGFS */
839 
840 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
841 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
842 
843 static QDF_STATUS qdf_mem_debugfs_init(void)
844 {
845 	return QDF_STATUS_E_NOSUPPORT;
846 }
847 static void qdf_mem_debugfs_exit(void) {}
848 
849 
850 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
851 {
852 	return QDF_STATUS_E_NOSUPPORT;
853 }
854 
855 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
856 {
857 	return QDF_STATUS_E_NOSUPPORT;
858 }
859 
860 #endif /* WLAN_DEBUGFS */
861 
862 /**
863  * __qdf_mempool_init() - Create and initialize memory pool
864  *
865  * @osdev: platform device object
866  * @pool_addr: address of the pool created
867  * @elem_cnt: no. of elements in pool
868  * @elem_size: size of each pool element in bytes
869  * @flags: flags
870  *
871  * return: Handle to memory pool or NULL if allocation failed
872  */
873 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
874 		       int elem_cnt, size_t elem_size, u_int32_t flags)
875 {
876 	__qdf_mempool_ctxt_t *new_pool = NULL;
877 	u_int32_t align = L1_CACHE_BYTES;
878 	unsigned long aligned_pool_mem;
879 	int pool_id;
880 	int i;
881 
882 	if (prealloc_disabled) {
883 		/* TBD: We can maintain a list of pools in qdf_device_t
884 		 * to help debugging
885 		 * when pre-allocation is not enabled
886 		 */
887 		new_pool = (__qdf_mempool_ctxt_t *)
888 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
889 		if (!new_pool)
890 			return QDF_STATUS_E_NOMEM;
891 
892 		memset(new_pool, 0, sizeof(*new_pool));
893 		/* TBD: define flags for zeroing buffers etc */
894 		new_pool->flags = flags;
895 		new_pool->elem_size = elem_size;
896 		new_pool->max_elem = elem_cnt;
897 		*pool_addr = new_pool;
898 		return 0;
899 	}
900 
901 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
902 		if (!osdev->mem_pool[pool_id])
903 			break;
904 	}
905 
906 	if (pool_id == MAX_MEM_POOLS)
907 		return -ENOMEM;
908 
909 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
910 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
911 	if (!new_pool)
912 		return -ENOMEM;
913 
914 	memset(new_pool, 0, sizeof(*new_pool));
915 	/* TBD: define flags for zeroing buffers etc */
916 	new_pool->flags = flags;
917 	new_pool->pool_id = pool_id;
918 
919 	/* Round up the element size to cacheline */
920 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
921 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
922 				((align)?(align - 1):0);
923 
924 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
925 	if (!new_pool->pool_mem) {
926 			/* TBD: Check if we need get_free_pages above */
927 		kfree(new_pool);
928 		osdev->mem_pool[pool_id] = NULL;
929 		return -ENOMEM;
930 	}
931 
932 	spin_lock_init(&new_pool->lock);
933 
934 	/* Initialize free list */
935 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
936 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
937 	STAILQ_INIT(&new_pool->free_list);
938 
939 	for (i = 0; i < elem_cnt; i++)
940 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
941 			(mempool_elem_t *)(aligned_pool_mem +
942 			(new_pool->elem_size * i)), mempool_entry);
943 
944 
945 	new_pool->free_cnt = elem_cnt;
946 	*pool_addr = new_pool;
947 	return 0;
948 }
949 qdf_export_symbol(__qdf_mempool_init);
950 
951 /**
952  * __qdf_mempool_destroy() - Destroy memory pool
953  * @osdev: platform device object
954  * @Handle: to memory pool
955  *
956  * Returns: none
957  */
958 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
959 {
960 	int pool_id = 0;
961 
962 	if (!pool)
963 		return;
964 
965 	if (prealloc_disabled) {
966 		kfree(pool);
967 		return;
968 	}
969 
970 	pool_id = pool->pool_id;
971 
972 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
973 	kfree(pool->pool_mem);
974 	kfree(pool);
975 	osdev->mem_pool[pool_id] = NULL;
976 }
977 qdf_export_symbol(__qdf_mempool_destroy);
978 
979 /**
980  * __qdf_mempool_alloc() - Allocate an element memory pool
981  *
982  * @osdev: platform device object
983  * @Handle: to memory pool
984  *
985  * Return: Pointer to the allocated element or NULL if the pool is empty
986  */
987 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
988 {
989 	void *buf = NULL;
990 
991 	if (!pool)
992 		return NULL;
993 
994 	if (prealloc_disabled)
995 		return  qdf_mem_malloc(pool->elem_size);
996 
997 	spin_lock_bh(&pool->lock);
998 
999 	buf = STAILQ_FIRST(&pool->free_list);
1000 	if (buf) {
1001 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1002 		pool->free_cnt--;
1003 	}
1004 
1005 	/* TBD: Update free count if debug is enabled */
1006 	spin_unlock_bh(&pool->lock);
1007 
1008 	return buf;
1009 }
1010 qdf_export_symbol(__qdf_mempool_alloc);
1011 
1012 /**
1013  * __qdf_mempool_free() - Free a memory pool element
1014  * @osdev: Platform device object
1015  * @pool: Handle to memory pool
1016  * @buf: Element to be freed
1017  *
1018  * Returns: none
1019  */
1020 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1021 {
1022 	if (!pool)
1023 		return;
1024 
1025 
1026 	if (prealloc_disabled)
1027 		return qdf_mem_free(buf);
1028 
1029 	spin_lock_bh(&pool->lock);
1030 	pool->free_cnt++;
1031 
1032 	STAILQ_INSERT_TAIL
1033 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1034 	spin_unlock_bh(&pool->lock);
1035 }
1036 qdf_export_symbol(__qdf_mempool_free);
1037 
1038 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
1039 /**
1040  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1041  * @size: the number of bytes to allocate
1042  *
1043  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1044  * a chunk of pre-allocated memory. If size if less than or equal to
1045  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1046  *
1047  * Return: NULL on failure, non-NULL on success
1048  */
1049 static void *qdf_mem_prealloc_get(size_t size)
1050 {
1051 	void *ptr;
1052 
1053 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1054 		return NULL;
1055 
1056 	ptr = wcnss_prealloc_get(size);
1057 	if (!ptr)
1058 		return NULL;
1059 
1060 	memset(ptr, 0, size);
1061 
1062 	return ptr;
1063 }
1064 
1065 static inline bool qdf_mem_prealloc_put(void *ptr)
1066 {
1067 	return wcnss_prealloc_put(ptr);
1068 }
1069 #else
1070 static inline void *qdf_mem_prealloc_get(size_t size)
1071 {
1072 	return NULL;
1073 }
1074 
1075 static inline bool qdf_mem_prealloc_put(void *ptr)
1076 {
1077 	return false;
1078 }
1079 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1080 
1081 static int qdf_mem_malloc_flags(void)
1082 {
1083 	if (in_interrupt() || irqs_disabled() || in_atomic())
1084 		return GFP_ATOMIC;
1085 
1086 	return GFP_KERNEL;
1087 }
1088 
1089 /* External Function implementation */
1090 #ifdef MEMORY_DEBUG
1091 /**
1092  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1093  *
1094  * Return: value of mem_debug_disabled qdf module argument
1095  */
1096 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1097 bool qdf_mem_debug_config_get(void)
1098 {
1099 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1100 	return false;
1101 }
1102 #else
1103 bool qdf_mem_debug_config_get(void)
1104 {
1105 	return mem_debug_disabled;
1106 }
1107 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1108 
1109 /**
1110  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1111  *
1112  * Return: none
1113  */
1114 static void qdf_mem_debug_init(void)
1115 {
1116 	int i;
1117 
1118 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1119 
1120 	if (is_initial_mem_debug_disabled)
1121 		return;
1122 
1123 	/* Initalizing the list with maximum size of 60000 */
1124 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1125 		qdf_list_create(&qdf_mem_domains[i], 60000);
1126 	qdf_spinlock_create(&qdf_mem_list_lock);
1127 
1128 	/* dma */
1129 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1130 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1131 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1132 }
1133 
1134 static uint32_t
1135 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1136 			       qdf_list_t *mem_list)
1137 {
1138 	if (is_initial_mem_debug_disabled)
1139 		return 0;
1140 
1141 	if (qdf_list_empty(mem_list))
1142 		return 0;
1143 
1144 	qdf_err("Memory leaks detected in %s domain!",
1145 		qdf_debug_domain_name(domain));
1146 	qdf_mem_domain_print(mem_list,
1147 			     qdf_err_printer,
1148 			     NULL,
1149 			     0,
1150 			     qdf_mem_meta_table_print);
1151 
1152 	return mem_list->count;
1153 }
1154 
1155 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1156 {
1157 	uint32_t leak_count = 0;
1158 	int i;
1159 
1160 	if (is_initial_mem_debug_disabled)
1161 		return;
1162 
1163 	/* detect and print leaks */
1164 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1165 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1166 
1167 	if (leak_count)
1168 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1169 				   leak_count);
1170 }
1171 
1172 /**
1173  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1174  *
1175  * Return: none
1176  */
1177 static void qdf_mem_debug_exit(void)
1178 {
1179 	int i;
1180 
1181 	if (is_initial_mem_debug_disabled)
1182 		return;
1183 
1184 	/* mem */
1185 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1186 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1187 		qdf_list_destroy(qdf_mem_list_get(i));
1188 
1189 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1190 
1191 	/* dma */
1192 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1193 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1194 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1195 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1196 }
1197 
1198 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1199 			   void *caller, uint32_t flag)
1200 {
1201 	QDF_STATUS status;
1202 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1203 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1204 	struct qdf_mem_header *header;
1205 	void *ptr;
1206 	unsigned long start, duration;
1207 
1208 	if (is_initial_mem_debug_disabled)
1209 		return __qdf_mem_malloc(size, func, line);
1210 
1211 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1212 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1213 		return NULL;
1214 	}
1215 
1216 	ptr = qdf_mem_prealloc_get(size);
1217 	if (ptr)
1218 		return ptr;
1219 
1220 	if (!flag)
1221 		flag = qdf_mem_malloc_flags();
1222 
1223 	start = qdf_mc_timer_get_system_time();
1224 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1225 	duration = qdf_mc_timer_get_system_time() - start;
1226 
1227 	if (duration > QDF_MEM_WARN_THRESHOLD)
1228 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1229 			 duration, size, func, line);
1230 
1231 	if (!header) {
1232 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1233 		return NULL;
1234 	}
1235 
1236 	qdf_mem_header_init(header, size, func, line, caller);
1237 	qdf_mem_trailer_init(header);
1238 	ptr = qdf_mem_get_ptr(header);
1239 
1240 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1241 	status = qdf_list_insert_front(mem_list, &header->node);
1242 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1243 	if (QDF_IS_STATUS_ERROR(status))
1244 		qdf_err("Failed to insert memory header; status %d", status);
1245 
1246 	qdf_mem_kmalloc_inc(ksize(header));
1247 
1248 	return ptr;
1249 }
1250 qdf_export_symbol(qdf_mem_malloc_debug);
1251 
1252 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1253 {
1254 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1255 	struct qdf_mem_header *header;
1256 	enum qdf_mem_validation_bitmap error_bitmap;
1257 
1258 	if (is_initial_mem_debug_disabled) {
1259 		__qdf_mem_free(ptr);
1260 		return;
1261 	}
1262 
1263 	/* freeing a null pointer is valid */
1264 	if (qdf_unlikely(!ptr))
1265 		return;
1266 
1267 	if (qdf_mem_prealloc_put(ptr))
1268 		return;
1269 
1270 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1271 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1272 				   ptr);
1273 
1274 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1275 
1276 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1277 	header = qdf_mem_get_header(ptr);
1278 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1279 	error_bitmap |= qdf_mem_trailer_validate(header);
1280 
1281 	if (!error_bitmap) {
1282 		header->freed = true;
1283 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1284 				     &header->node);
1285 	}
1286 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1287 
1288 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1289 				    func, line);
1290 
1291 	qdf_mem_kmalloc_dec(ksize(header));
1292 	kfree(header);
1293 }
1294 qdf_export_symbol(qdf_mem_free_debug);
1295 
1296 void qdf_mem_check_for_leaks(void)
1297 {
1298 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1299 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1300 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1301 	uint32_t leaks_count = 0;
1302 
1303 	if (is_initial_mem_debug_disabled)
1304 		return;
1305 
1306 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1307 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1308 
1309 	if (leaks_count)
1310 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1311 				   leaks_count);
1312 }
1313 
1314 /**
1315  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1316  * qdf_mem_multi_pages_alloc
1317  * @osdev: OS device handle pointer
1318  * @pages: Multi page information storage
1319  * @element_size: Each element size
1320  * @element_num: Total number of elements should be allocated
1321  * @memctxt: Memory context
1322  * @cacheable: Coherent memory or cacheable memory
1323  * @func: Caller of this allocator
1324  * @line: Line number of the caller
1325  * @caller: Return address of the caller
1326  *
1327  * This function will allocate large size of memory over multiple pages.
1328  * Large size of contiguous memory allocation will fail frequently, then
1329  * instead of allocate large memory by one shot, allocate through multiple, non
1330  * contiguous memory and combine pages when actual usage
1331  *
1332  * Return: None
1333  */
1334 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1335 				     struct qdf_mem_multi_page_t *pages,
1336 				     size_t element_size, uint16_t element_num,
1337 				     qdf_dma_context_t memctxt, bool cacheable,
1338 				     const char *func, uint32_t line,
1339 				     void *caller)
1340 {
1341 	uint16_t page_idx;
1342 	struct qdf_mem_dma_page_t *dma_pages;
1343 	void **cacheable_pages = NULL;
1344 	uint16_t i;
1345 
1346 	pages->num_element_per_page = PAGE_SIZE / element_size;
1347 	if (!pages->num_element_per_page) {
1348 		qdf_print("Invalid page %d or element size %d",
1349 			  (int)PAGE_SIZE, (int)element_size);
1350 		goto out_fail;
1351 	}
1352 
1353 	pages->num_pages = element_num / pages->num_element_per_page;
1354 	if (element_num % pages->num_element_per_page)
1355 		pages->num_pages++;
1356 
1357 	if (cacheable) {
1358 		/* Pages information storage */
1359 		pages->cacheable_pages = qdf_mem_malloc_debug(
1360 			pages->num_pages * sizeof(pages->cacheable_pages),
1361 			func, line, caller, 0);
1362 		if (!pages->cacheable_pages)
1363 			goto out_fail;
1364 
1365 		cacheable_pages = pages->cacheable_pages;
1366 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1367 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1368 					PAGE_SIZE, func, line, caller, 0);
1369 			if (!cacheable_pages[page_idx])
1370 				goto page_alloc_fail;
1371 		}
1372 		pages->dma_pages = NULL;
1373 	} else {
1374 		pages->dma_pages = qdf_mem_malloc_debug(
1375 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1376 			func, line, caller, 0);
1377 		if (!pages->dma_pages)
1378 			goto out_fail;
1379 
1380 		dma_pages = pages->dma_pages;
1381 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1382 			dma_pages->page_v_addr_start =
1383 				qdf_mem_alloc_consistent_debug(
1384 					osdev, osdev->dev, PAGE_SIZE,
1385 					&dma_pages->page_p_addr,
1386 					func, line, caller);
1387 			if (!dma_pages->page_v_addr_start) {
1388 				qdf_print("dmaable page alloc fail pi %d",
1389 					  page_idx);
1390 				goto page_alloc_fail;
1391 			}
1392 			dma_pages->page_v_addr_end =
1393 				dma_pages->page_v_addr_start + PAGE_SIZE;
1394 			dma_pages++;
1395 		}
1396 		pages->cacheable_pages = NULL;
1397 	}
1398 	return;
1399 
1400 page_alloc_fail:
1401 	if (cacheable) {
1402 		for (i = 0; i < page_idx; i++)
1403 			qdf_mem_free_debug(pages->cacheable_pages[i],
1404 					   func, line);
1405 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1406 	} else {
1407 		dma_pages = pages->dma_pages;
1408 		for (i = 0; i < page_idx; i++) {
1409 			qdf_mem_free_consistent_debug(
1410 				osdev, osdev->dev,
1411 				PAGE_SIZE, dma_pages->page_v_addr_start,
1412 				dma_pages->page_p_addr, memctxt, func, line);
1413 			dma_pages++;
1414 		}
1415 		qdf_mem_free_debug(pages->dma_pages, func, line);
1416 	}
1417 
1418 out_fail:
1419 	pages->cacheable_pages = NULL;
1420 	pages->dma_pages = NULL;
1421 	pages->num_pages = 0;
1422 }
1423 
1424 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1425 
1426 /**
1427  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1428  * @osdev: OS device handle pointer
1429  * @pages: Multi page information storage
1430  * @memctxt: Memory context
1431  * @cacheable: Coherent memory or cacheable memory
1432  * @func: Caller of this allocator
1433  * @line: Line number of the caller
1434  *
1435  * This function will free large size of memory over multiple pages.
1436  *
1437  * Return: None
1438  */
1439 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1440 				    struct qdf_mem_multi_page_t *pages,
1441 				    qdf_dma_context_t memctxt, bool cacheable,
1442 				    const char *func, uint32_t line)
1443 {
1444 	unsigned int page_idx;
1445 	struct qdf_mem_dma_page_t *dma_pages;
1446 
1447 	if (cacheable) {
1448 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1449 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1450 					   func, line);
1451 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1452 	} else {
1453 		dma_pages = pages->dma_pages;
1454 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1455 			qdf_mem_free_consistent_debug(
1456 				osdev, osdev->dev, PAGE_SIZE,
1457 				dma_pages->page_v_addr_start,
1458 				dma_pages->page_p_addr, memctxt, func, line);
1459 			dma_pages++;
1460 		}
1461 		qdf_mem_free_debug(pages->dma_pages, func, line);
1462 	}
1463 
1464 	pages->cacheable_pages = NULL;
1465 	pages->dma_pages = NULL;
1466 	pages->num_pages = 0;
1467 }
1468 
1469 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1470 
1471 #else
1472 static void qdf_mem_debug_init(void) {}
1473 
1474 static void qdf_mem_debug_exit(void) {}
1475 
1476 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1477 {
1478 	void *ptr;
1479 
1480 	ptr = qdf_mem_prealloc_get(size);
1481 	if (ptr)
1482 		return ptr;
1483 
1484 	ptr = kzalloc(size, GFP_ATOMIC);
1485 	if (!ptr) {
1486 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1487 			      size, func, line);
1488 		return NULL;
1489 	}
1490 
1491 	qdf_mem_kmalloc_inc(ksize(ptr));
1492 
1493 	return ptr;
1494 }
1495 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1496 
1497 /**
1498  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1499  * @osdev: OS device handle pointer
1500  * @pages: Multi page information storage
1501  * @element_size: Each element size
1502  * @element_num: Total number of elements should be allocated
1503  * @memctxt: Memory context
1504  * @cacheable: Coherent memory or cacheable memory
1505  *
1506  * This function will allocate large size of memory over multiple pages.
1507  * Large size of contiguous memory allocation will fail frequently, then
1508  * instead of allocate large memory by one shot, allocate through multiple, non
1509  * contiguous memory and combine pages when actual usage
1510  *
1511  * Return: None
1512  */
1513 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1514 			       struct qdf_mem_multi_page_t *pages,
1515 			       size_t element_size, uint16_t element_num,
1516 			       qdf_dma_context_t memctxt, bool cacheable)
1517 {
1518 	uint16_t page_idx;
1519 	struct qdf_mem_dma_page_t *dma_pages;
1520 	void **cacheable_pages = NULL;
1521 	uint16_t i;
1522 
1523 	pages->num_element_per_page = PAGE_SIZE / element_size;
1524 	if (!pages->num_element_per_page) {
1525 		qdf_print("Invalid page %d or element size %d",
1526 			  (int)PAGE_SIZE, (int)element_size);
1527 		goto out_fail;
1528 	}
1529 
1530 	pages->num_pages = element_num / pages->num_element_per_page;
1531 	if (element_num % pages->num_element_per_page)
1532 		pages->num_pages++;
1533 
1534 	if (cacheable) {
1535 		/* Pages information storage */
1536 		pages->cacheable_pages = qdf_mem_malloc(
1537 			pages->num_pages * sizeof(pages->cacheable_pages));
1538 		if (!pages->cacheable_pages)
1539 			goto out_fail;
1540 
1541 		cacheable_pages = pages->cacheable_pages;
1542 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1543 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1544 			if (!cacheable_pages[page_idx])
1545 				goto page_alloc_fail;
1546 		}
1547 		pages->dma_pages = NULL;
1548 	} else {
1549 		pages->dma_pages = qdf_mem_malloc(
1550 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1551 		if (!pages->dma_pages)
1552 			goto out_fail;
1553 
1554 		dma_pages = pages->dma_pages;
1555 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1556 			dma_pages->page_v_addr_start =
1557 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1558 					 PAGE_SIZE,
1559 					&dma_pages->page_p_addr);
1560 			if (!dma_pages->page_v_addr_start) {
1561 				qdf_print("dmaable page alloc fail pi %d",
1562 					page_idx);
1563 				goto page_alloc_fail;
1564 			}
1565 			dma_pages->page_v_addr_end =
1566 				dma_pages->page_v_addr_start + PAGE_SIZE;
1567 			dma_pages++;
1568 		}
1569 		pages->cacheable_pages = NULL;
1570 	}
1571 	return;
1572 
1573 page_alloc_fail:
1574 	if (cacheable) {
1575 		for (i = 0; i < page_idx; i++)
1576 			qdf_mem_free(pages->cacheable_pages[i]);
1577 		qdf_mem_free(pages->cacheable_pages);
1578 	} else {
1579 		dma_pages = pages->dma_pages;
1580 		for (i = 0; i < page_idx; i++) {
1581 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1582 				dma_pages->page_v_addr_start,
1583 				dma_pages->page_p_addr, memctxt);
1584 			dma_pages++;
1585 		}
1586 		qdf_mem_free(pages->dma_pages);
1587 	}
1588 
1589 out_fail:
1590 	pages->cacheable_pages = NULL;
1591 	pages->dma_pages = NULL;
1592 	pages->num_pages = 0;
1593 	return;
1594 }
1595 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1596 
1597 /**
1598  * qdf_mem_multi_pages_free() - free large size of kernel memory
1599  * @osdev: OS device handle pointer
1600  * @pages: Multi page information storage
1601  * @memctxt: Memory context
1602  * @cacheable: Coherent memory or cacheable memory
1603  *
1604  * This function will free large size of memory over multiple pages.
1605  *
1606  * Return: None
1607  */
1608 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1609 			      struct qdf_mem_multi_page_t *pages,
1610 			      qdf_dma_context_t memctxt, bool cacheable)
1611 {
1612 	unsigned int page_idx;
1613 	struct qdf_mem_dma_page_t *dma_pages;
1614 
1615 	if (cacheable) {
1616 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1617 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1618 		qdf_mem_free(pages->cacheable_pages);
1619 	} else {
1620 		dma_pages = pages->dma_pages;
1621 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1622 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1623 				dma_pages->page_v_addr_start,
1624 				dma_pages->page_p_addr, memctxt);
1625 			dma_pages++;
1626 		}
1627 		qdf_mem_free(pages->dma_pages);
1628 	}
1629 
1630 	pages->cacheable_pages = NULL;
1631 	pages->dma_pages = NULL;
1632 	pages->num_pages = 0;
1633 	return;
1634 }
1635 qdf_export_symbol(qdf_mem_multi_pages_free);
1636 #endif
1637 
1638 void __qdf_mem_free(void *ptr)
1639 {
1640 	if (!ptr)
1641 		return;
1642 
1643 	if (qdf_mem_prealloc_put(ptr))
1644 		return;
1645 
1646 	qdf_mem_kmalloc_dec(ksize(ptr));
1647 
1648 	kfree(ptr);
1649 }
1650 
1651 qdf_export_symbol(__qdf_mem_free);
1652 
1653 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
1654 {
1655 	void *ptr;
1656 
1657 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1658 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1659 			     line);
1660 		return NULL;
1661 	}
1662 
1663 	ptr = qdf_mem_prealloc_get(size);
1664 	if (ptr)
1665 		return ptr;
1666 
1667 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1668 	if (!ptr)
1669 		return NULL;
1670 
1671 	qdf_mem_kmalloc_inc(ksize(ptr));
1672 
1673 	return ptr;
1674 }
1675 
1676 qdf_export_symbol(__qdf_mem_malloc);
1677 
1678 void *qdf_aligned_malloc_fl(uint32_t *size,
1679 			    void **vaddr_unaligned,
1680 				qdf_dma_addr_t *paddr_unaligned,
1681 				qdf_dma_addr_t *paddr_aligned,
1682 				uint32_t align,
1683 			    const char *func, uint32_t line)
1684 {
1685 	void *vaddr_aligned;
1686 	uint32_t align_alloc_size;
1687 
1688 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
1689 			line);
1690 	if (!*vaddr_unaligned) {
1691 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
1692 		return NULL;
1693 	}
1694 
1695 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
1696 
1697 	/* Re-allocate additional bytes to align base address only if
1698 	 * above allocation returns unaligned address. Reason for
1699 	 * trying exact size allocation above is, OS tries to allocate
1700 	 * blocks of size power-of-2 pages and then free extra pages.
1701 	 * e.g., of a ring size of 1MB, the allocation below will
1702 	 * request 1MB plus 7 bytes for alignment, which will cause a
1703 	 * 2MB block allocation,and that is failing sometimes due to
1704 	 * memory fragmentation.
1705 	 */
1706 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1707 		align_alloc_size = *size + align - 1;
1708 
1709 		qdf_mem_free(*vaddr_unaligned);
1710 		*vaddr_unaligned = qdf_mem_malloc_fl(
1711 				(qdf_size_t)align_alloc_size, func, line);
1712 		if (!*vaddr_unaligned) {
1713 			qdf_warn("Failed to alloc %uB @ %s:%d",
1714 				 align_alloc_size, func, line);
1715 			return NULL;
1716 		}
1717 
1718 		*paddr_unaligned = qdf_mem_virt_to_phys(
1719 				*vaddr_unaligned);
1720 		*size = align_alloc_size;
1721 	}
1722 
1723 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
1724 		((unsigned long)(*paddr_unaligned), align);
1725 
1726 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1727 			((unsigned long)(*paddr_aligned) -
1728 			 (unsigned long)(*paddr_unaligned)));
1729 
1730 	return vaddr_aligned;
1731 }
1732 
1733 qdf_export_symbol(qdf_aligned_malloc_fl);
1734 
1735 /**
1736  * qdf_mem_multi_page_link() - Make links for multi page elements
1737  * @osdev: OS device handle pointer
1738  * @pages: Multi page information storage
1739  * @elem_size: Single element size
1740  * @elem_count: elements count should be linked
1741  * @cacheable: Coherent memory or cacheable memory
1742  *
1743  * This function will make links for multi page allocated structure
1744  *
1745  * Return: 0 success
1746  */
1747 int qdf_mem_multi_page_link(qdf_device_t osdev,
1748 		struct qdf_mem_multi_page_t *pages,
1749 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1750 {
1751 	uint16_t i, i_int;
1752 	void *page_info;
1753 	void **c_elem = NULL;
1754 	uint32_t num_link = 0;
1755 
1756 	for (i = 0; i < pages->num_pages; i++) {
1757 		if (cacheable)
1758 			page_info = pages->cacheable_pages[i];
1759 		else
1760 			page_info = pages->dma_pages[i].page_v_addr_start;
1761 
1762 		if (!page_info)
1763 			return -ENOMEM;
1764 
1765 		c_elem = (void **)page_info;
1766 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1767 			if (i_int == (pages->num_element_per_page - 1)) {
1768 				if (cacheable)
1769 					*c_elem = pages->
1770 						cacheable_pages[i + 1];
1771 				else
1772 					*c_elem = pages->
1773 						dma_pages[i + 1].
1774 							page_v_addr_start;
1775 				num_link++;
1776 				break;
1777 			} else {
1778 				*c_elem =
1779 					(void *)(((char *)c_elem) + elem_size);
1780 			}
1781 			num_link++;
1782 			c_elem = (void **)*c_elem;
1783 
1784 			/* Last link established exit */
1785 			if (num_link == (elem_count - 1))
1786 				break;
1787 		}
1788 	}
1789 
1790 	if (c_elem)
1791 		*c_elem = NULL;
1792 
1793 	return 0;
1794 }
1795 qdf_export_symbol(qdf_mem_multi_page_link);
1796 
1797 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1798 {
1799 	/* special case where dst_addr or src_addr can be NULL */
1800 	if (!num_bytes)
1801 		return;
1802 
1803 	QDF_BUG(dst_addr);
1804 	QDF_BUG(src_addr);
1805 	if (!dst_addr || !src_addr)
1806 		return;
1807 
1808 	memcpy(dst_addr, src_addr, num_bytes);
1809 }
1810 qdf_export_symbol(qdf_mem_copy);
1811 
1812 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1813 {
1814 	qdf_shared_mem_t *shared_mem;
1815 	qdf_dma_addr_t dma_addr, paddr;
1816 	int ret;
1817 
1818 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1819 	if (!shared_mem)
1820 		return NULL;
1821 
1822 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1823 				size, qdf_mem_get_dma_addr_ptr(osdev,
1824 						&shared_mem->mem_info));
1825 	if (!shared_mem->vaddr) {
1826 		qdf_err("Unable to allocate DMA memory for shared resource");
1827 		qdf_mem_free(shared_mem);
1828 		return NULL;
1829 	}
1830 
1831 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1832 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1833 
1834 	qdf_mem_zero(shared_mem->vaddr, size);
1835 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1836 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1837 
1838 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1839 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1840 				      shared_mem->vaddr, dma_addr, size);
1841 	if (ret) {
1842 		qdf_err("Unable to get DMA sgtable");
1843 		qdf_mem_free_consistent(osdev, osdev->dev,
1844 					shared_mem->mem_info.size,
1845 					shared_mem->vaddr,
1846 					dma_addr,
1847 					qdf_get_dma_mem_context(shared_mem,
1848 								memctx));
1849 		qdf_mem_free(shared_mem);
1850 		return NULL;
1851 	}
1852 
1853 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1854 
1855 	return shared_mem;
1856 }
1857 
1858 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1859 
1860 /**
1861  * qdf_mem_copy_toio() - copy memory
1862  * @dst_addr: Pointer to destination memory location (to copy to)
1863  * @src_addr: Pointer to source memory location (to copy from)
1864  * @num_bytes: Number of bytes to copy.
1865  *
1866  * Return: none
1867  */
1868 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1869 {
1870 	if (0 == num_bytes) {
1871 		/* special case where dst_addr or src_addr can be NULL */
1872 		return;
1873 	}
1874 
1875 	if ((!dst_addr) || (!src_addr)) {
1876 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1877 			  "%s called with NULL parameter, source:%pK destination:%pK",
1878 			  __func__, src_addr, dst_addr);
1879 		QDF_ASSERT(0);
1880 		return;
1881 	}
1882 	memcpy_toio(dst_addr, src_addr, num_bytes);
1883 }
1884 
1885 qdf_export_symbol(qdf_mem_copy_toio);
1886 
1887 /**
1888  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1889  * @ptr: Pointer to memory that will be set
1890  * @value: Byte set in memory
1891  * @num_bytes: Number of bytes to be set
1892  *
1893  * Return: None
1894  */
1895 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1896 {
1897 	if (!ptr) {
1898 		qdf_print("%s called with NULL parameter ptr", __func__);
1899 		return;
1900 	}
1901 	memset_io(ptr, value, num_bytes);
1902 }
1903 
1904 qdf_export_symbol(qdf_mem_set_io);
1905 
1906 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1907 {
1908 	QDF_BUG(ptr);
1909 	if (!ptr)
1910 		return;
1911 
1912 	memset(ptr, value, num_bytes);
1913 }
1914 qdf_export_symbol(qdf_mem_set);
1915 
1916 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1917 {
1918 	/* special case where dst_addr or src_addr can be NULL */
1919 	if (!num_bytes)
1920 		return;
1921 
1922 	QDF_BUG(dst_addr);
1923 	QDF_BUG(src_addr);
1924 	if (!dst_addr || !src_addr)
1925 		return;
1926 
1927 	memmove(dst_addr, src_addr, num_bytes);
1928 }
1929 qdf_export_symbol(qdf_mem_move);
1930 
1931 int qdf_mem_cmp(const void *left, const void *right, size_t size)
1932 {
1933 	QDF_BUG(left);
1934 	QDF_BUG(right);
1935 
1936 	return memcmp(left, right, size);
1937 }
1938 qdf_export_symbol(qdf_mem_cmp);
1939 
1940 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1941 /**
1942  * qdf_mem_dma_alloc() - allocates memory for dma
1943  * @osdev: OS device handle
1944  * @dev: Pointer to device handle
1945  * @size: Size to be allocated
1946  * @phy_addr: Physical address
1947  *
1948  * Return: pointer of allocated memory or null if memory alloc fails
1949  */
1950 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1951 				      qdf_size_t size,
1952 				      qdf_dma_addr_t *phy_addr)
1953 {
1954 	void *vaddr;
1955 
1956 	vaddr = qdf_mem_malloc(size);
1957 	*phy_addr = ((uintptr_t) vaddr);
1958 	/* using this type conversion to suppress "cast from pointer to integer
1959 	 * of different size" warning on some platforms
1960 	 */
1961 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1962 	return vaddr;
1963 }
1964 
1965 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
1966 	!defined(QCA_WIFI_QCN9000)
1967 
1968 #define QCA8074_RAM_BASE 0x50000000
1969 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1970 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1971 			qdf_dma_addr_t *phy_addr)
1972 {
1973 	void *vaddr = NULL;
1974 	int i;
1975 
1976 	*phy_addr = 0;
1977 
1978 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1979 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1980 					   qdf_mem_malloc_flags());
1981 
1982 		if (!vaddr) {
1983 			qdf_err("%s failed , size: %zu!", __func__, size);
1984 			return NULL;
1985 		}
1986 
1987 		if (*phy_addr >= QCA8074_RAM_BASE)
1988 			return vaddr;
1989 
1990 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1991 	}
1992 
1993 	return NULL;
1994 }
1995 
1996 #else
1997 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1998 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1999 {
2000 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2001 }
2002 #endif
2003 
2004 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2005 static inline void
2006 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2007 {
2008 	qdf_mem_free(vaddr);
2009 }
2010 #else
2011 
2012 static inline void
2013 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2014 {
2015 	dma_free_coherent(dev, size, vaddr, paddr);
2016 }
2017 #endif
2018 
2019 #ifdef MEMORY_DEBUG
2020 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2021 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2022 				     const char *func, uint32_t line,
2023 				     void *caller)
2024 {
2025 	QDF_STATUS status;
2026 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2027 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2028 	struct qdf_mem_header *header;
2029 	void *vaddr;
2030 
2031 	if (is_initial_mem_debug_disabled)
2032 		return __qdf_mem_alloc_consistent(osdev, dev,
2033 						  size, paddr,
2034 						  func, line);
2035 
2036 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2037 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2038 		return NULL;
2039 	}
2040 
2041 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2042 				   paddr);
2043 
2044 	if (!vaddr) {
2045 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2046 		return NULL;
2047 	}
2048 
2049 	header = qdf_mem_dma_get_header(vaddr, size);
2050 	/* For DMA buffers we only add trailers, this function will init
2051 	 * the header structure at the tail
2052 	 * Prefix the header into DMA buffer causes SMMU faults, so
2053 	 * do not prefix header into the DMA buffers
2054 	 */
2055 	qdf_mem_header_init(header, size, func, line, caller);
2056 
2057 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2058 	status = qdf_list_insert_front(mem_list, &header->node);
2059 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2060 	if (QDF_IS_STATUS_ERROR(status))
2061 		qdf_err("Failed to insert memory header; status %d", status);
2062 
2063 	qdf_mem_dma_inc(size);
2064 
2065 	return vaddr;
2066 }
2067 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2068 
2069 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2070 				   qdf_size_t size, void *vaddr,
2071 				   qdf_dma_addr_t paddr,
2072 				   qdf_dma_context_t memctx,
2073 				   const char *func, uint32_t line)
2074 {
2075 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2076 	struct qdf_mem_header *header;
2077 	enum qdf_mem_validation_bitmap error_bitmap;
2078 
2079 	if (is_initial_mem_debug_disabled) {
2080 		__qdf_mem_free_consistent(
2081 					  osdev, dev,
2082 					  size, vaddr,
2083 					  paddr, memctx);
2084 		return;
2085 	}
2086 
2087 	/* freeing a null pointer is valid */
2088 	if (qdf_unlikely(!vaddr))
2089 		return;
2090 
2091 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2092 
2093 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2094 	/* For DMA buffers we only add trailers, this function will retrieve
2095 	 * the header structure at the tail
2096 	 * Prefix the header into DMA buffer causes SMMU faults, so
2097 	 * do not prefix header into the DMA buffers
2098 	 */
2099 	header = qdf_mem_dma_get_header(vaddr, size);
2100 	error_bitmap = qdf_mem_header_validate(header, domain);
2101 	if (!error_bitmap) {
2102 		header->freed = true;
2103 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2104 				     &header->node);
2105 	}
2106 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2107 
2108 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2109 
2110 	qdf_mem_dma_dec(header->size);
2111 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2112 }
2113 qdf_export_symbol(qdf_mem_free_consistent_debug);
2114 #endif /* MEMORY_DEBUG */
2115 
2116 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2117 			       qdf_size_t size, void *vaddr,
2118 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2119 {
2120 	qdf_mem_dma_dec(size);
2121 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2122 }
2123 
2124 qdf_export_symbol(__qdf_mem_free_consistent);
2125 
2126 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2127 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2128 				 const char *func, uint32_t line)
2129 {
2130 	void *vaddr;
2131 
2132 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2133 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2134 			     size, func, line);
2135 		return NULL;
2136 	}
2137 
2138 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2139 
2140 	if (vaddr)
2141 		qdf_mem_dma_inc(size);
2142 
2143 	return vaddr;
2144 }
2145 
2146 qdf_export_symbol(__qdf_mem_alloc_consistent);
2147 
2148 void *qdf_aligned_mem_alloc_consistent_fl(
2149 	qdf_device_t osdev, uint32_t *size,
2150 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2151 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2152 	const char *func, uint32_t line)
2153 {
2154 	void *vaddr_aligned;
2155 	uint32_t align_alloc_size;
2156 
2157 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2158 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2159 	if (!*vaddr_unaligned) {
2160 		qdf_warn("Failed to alloc %uB @ %s:%d",
2161 			 *size, func, line);
2162 		return NULL;
2163 	}
2164 
2165 	/* Re-allocate additional bytes to align base address only if
2166 	 * above allocation returns unaligned address. Reason for
2167 	 * trying exact size allocation above is, OS tries to allocate
2168 	 * blocks of size power-of-2 pages and then free extra pages.
2169 	 * e.g., of a ring size of 1MB, the allocation below will
2170 	 * request 1MB plus 7 bytes for alignment, which will cause a
2171 	 * 2MB block allocation,and that is failing sometimes due to
2172 	 * memory fragmentation.
2173 	 */
2174 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2175 		align_alloc_size = *size + align - 1;
2176 
2177 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2178 					*vaddr_unaligned,
2179 					*paddr_unaligned, 0);
2180 
2181 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2182 				osdev, osdev->dev, align_alloc_size,
2183 				paddr_unaligned);
2184 		if (!*vaddr_unaligned) {
2185 			qdf_warn("Failed to alloc %uB @ %s:%d",
2186 				 align_alloc_size, func, line);
2187 			return NULL;
2188 		}
2189 
2190 		*size = align_alloc_size;
2191 	}
2192 
2193 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2194 			(unsigned long)(*paddr_unaligned), align);
2195 
2196 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2197 				 ((unsigned long)(*paddr_aligned) -
2198 				  (unsigned long)(*paddr_unaligned)));
2199 
2200 	return vaddr_aligned;
2201 }
2202 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2203 
2204 /**
2205  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2206  * @osdev: OS device handle
2207  * @bus_addr: dma address to give to the device
2208  * @size: Size of the memory block
2209  * @direction: direction data will be DMAed
2210  *
2211  * Assign memory to the remote device.
2212  * The cache lines are flushed to ram or invalidated as needed.
2213  *
2214  * Return: none
2215  */
2216 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2217 					qdf_dma_addr_t bus_addr,
2218 					qdf_size_t size,
2219 					enum dma_data_direction direction)
2220 {
2221 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2222 }
2223 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2224 
2225 /**
2226  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2227  * @osdev: OS device handle
2228  * @bus_addr: dma address to give to the cpu
2229  * @size: Size of the memory block
2230  * @direction: direction data will be DMAed
2231  *
2232  * Assign memory to the CPU.
2233  *
2234  * Return: none
2235  */
2236 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2237 				     qdf_dma_addr_t bus_addr,
2238 				     qdf_size_t size,
2239 				     enum dma_data_direction direction)
2240 {
2241 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2242 }
2243 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2244 
2245 void qdf_mem_init(void)
2246 {
2247 	qdf_mem_debug_init();
2248 	qdf_net_buf_debug_init();
2249 	qdf_mem_debugfs_init();
2250 	qdf_mem_debug_debugfs_init();
2251 }
2252 qdf_export_symbol(qdf_mem_init);
2253 
2254 void qdf_mem_exit(void)
2255 {
2256 	qdf_mem_debug_debugfs_exit();
2257 	qdf_mem_debugfs_exit();
2258 	qdf_net_buf_debug_exit();
2259 	qdf_mem_debug_exit();
2260 }
2261 qdf_export_symbol(qdf_mem_exit);
2262 
2263 /**
2264  * qdf_ether_addr_copy() - copy an Ethernet address
2265  *
2266  * @dst_addr: A six-byte array Ethernet address destination
2267  * @src_addr: A six-byte array Ethernet address source
2268  *
2269  * Please note: dst & src must both be aligned to u16.
2270  *
2271  * Return: none
2272  */
2273 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2274 {
2275 	if ((!dst_addr) || (!src_addr)) {
2276 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2277 			  "%s called with NULL parameter, source:%pK destination:%pK",
2278 			  __func__, src_addr, dst_addr);
2279 		QDF_ASSERT(0);
2280 		return;
2281 	}
2282 	ether_addr_copy(dst_addr, src_addr);
2283 }
2284 qdf_export_symbol(qdf_ether_addr_copy);
2285 
2286