xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #if defined(CONFIG_CNSS)
40 #include <net/cnss.h>
41 #endif
42 
43 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
44 #include <net/cnss_prealloc.h>
45 #endif
46 
47 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
48 static bool mem_debug_disabled;
49 qdf_declare_param(mem_debug_disabled, bool);
50 qdf_export_symbol(mem_debug_disabled);
51 #endif
52 
53 #ifdef MEMORY_DEBUG
54 static bool is_initial_mem_debug_disabled;
55 #endif
56 
57 /* Preprocessor Definitions and Constants */
58 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
59 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
60 #define QDF_DEBUG_STRING_SIZE 512
61 
62 /**
63  * struct __qdf_mem_stat - qdf memory statistics
64  * @kmalloc: total kmalloc allocations
65  * @dma: total dma allocations
66  * @skb: total skb allocations
67  */
68 static struct __qdf_mem_stat {
69 	qdf_atomic_t kmalloc;
70 	qdf_atomic_t dma;
71 	qdf_atomic_t skb;
72 } qdf_mem_stat;
73 
74 #ifdef MEMORY_DEBUG
75 #include "qdf_debug_domain.h"
76 
77 enum list_type {
78 	LIST_TYPE_MEM = 0,
79 	LIST_TYPE_DMA = 1,
80 	LIST_TYPE_MAX,
81 };
82 
83 /**
84  * major_alloc_priv: private data registered to debugfs entry created to list
85  *                   the list major allocations
86  * @type:            type of the list to be parsed
87  * @threshold:       configured by user by overwriting the respective debugfs
88  *                   sys entry. This is to list the functions which requested
89  *                   memory/dma allocations more than threshold nubmer of times.
90  */
91 struct major_alloc_priv {
92 	enum list_type type;
93 	uint32_t threshold;
94 };
95 
96 static struct major_alloc_priv mem_priv = {
97 	/* List type set to mem */
98 	LIST_TYPE_MEM,
99 	/* initial threshold to list APIs which allocates mem >= 50 times */
100 	50
101 };
102 
103 static struct major_alloc_priv dma_priv = {
104 	/* List type set to DMA */
105 	LIST_TYPE_DMA,
106 	/* initial threshold to list APIs which allocates dma >= 50 times */
107 	50
108 };
109 
110 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
111 static qdf_spinlock_t qdf_mem_list_lock;
112 
113 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
114 static qdf_spinlock_t qdf_mem_dma_list_lock;
115 
116 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
117 {
118 	return &qdf_mem_domains[domain];
119 }
120 
121 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
122 {
123 	return &qdf_mem_dma_domains[domain];
124 }
125 
126 /**
127  * struct qdf_mem_header - memory object to dubug
128  * @node: node to the list
129  * @domain: the active memory domain at time of allocation
130  * @freed: flag set during free, used to detect double frees
131  *	Use uint8_t so we can detect corruption
132  * @func: name of the function the allocation was made from
133  * @line: line number of the file the allocation was made from
134  * @size: size of the allocation in bytes
135  * @caller: Caller of the function for which memory is allocated
136  * @header: a known value, used to detect out-of-bounds access
137  * @time: timestamp at which allocation was made
138  */
139 struct qdf_mem_header {
140 	qdf_list_node_t node;
141 	enum qdf_debug_domain domain;
142 	uint8_t freed;
143 	char func[QDF_MEM_FUNC_NAME_SIZE];
144 	uint32_t line;
145 	uint32_t size;
146 	void *caller;
147 	uint64_t header;
148 	uint64_t time;
149 };
150 
151 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
152 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
153 
154 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
155 {
156 	return (struct qdf_mem_header *)ptr - 1;
157 }
158 
159 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
160 							    qdf_size_t size)
161 {
162 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
163 }
164 
165 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
166 {
167 	return (uint64_t *)((void *)(header + 1) + header->size);
168 }
169 
170 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
171 {
172 	return (void *)(header + 1);
173 }
174 
175 /* number of bytes needed for the qdf memory debug information */
176 #define QDF_MEM_DEBUG_SIZE \
177 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
178 
179 /* number of bytes needed for the qdf dma memory debug information */
180 #define QDF_DMA_MEM_DEBUG_SIZE \
181 	(sizeof(struct qdf_mem_header))
182 
183 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
184 {
185 	QDF_BUG(header);
186 	if (!header)
187 		return;
188 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
189 }
190 
191 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
192 				const char *func, uint32_t line, void *caller)
193 {
194 	QDF_BUG(header);
195 	if (!header)
196 		return;
197 
198 	header->domain = qdf_debug_domain_get();
199 	header->freed = false;
200 
201 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
202 
203 	header->line = line;
204 	header->size = size;
205 	header->caller = caller;
206 	header->header = WLAN_MEM_HEADER;
207 	header->time = qdf_get_log_timestamp();
208 }
209 
210 enum qdf_mem_validation_bitmap {
211 	QDF_MEM_BAD_HEADER = 1 << 0,
212 	QDF_MEM_BAD_TRAILER = 1 << 1,
213 	QDF_MEM_BAD_SIZE = 1 << 2,
214 	QDF_MEM_DOUBLE_FREE = 1 << 3,
215 	QDF_MEM_BAD_FREED = 1 << 4,
216 	QDF_MEM_BAD_NODE = 1 << 5,
217 	QDF_MEM_BAD_DOMAIN = 1 << 6,
218 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
219 };
220 
221 static enum qdf_mem_validation_bitmap
222 qdf_mem_trailer_validate(struct qdf_mem_header *header)
223 {
224 	enum qdf_mem_validation_bitmap error_bitmap = 0;
225 
226 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
227 		error_bitmap |= QDF_MEM_BAD_TRAILER;
228 	return error_bitmap;
229 }
230 
231 static enum qdf_mem_validation_bitmap
232 qdf_mem_header_validate(struct qdf_mem_header *header,
233 			enum qdf_debug_domain domain)
234 {
235 	enum qdf_mem_validation_bitmap error_bitmap = 0;
236 
237 	if (header->header != WLAN_MEM_HEADER)
238 		error_bitmap |= QDF_MEM_BAD_HEADER;
239 
240 	if (header->size > QDF_MEM_MAX_MALLOC)
241 		error_bitmap |= QDF_MEM_BAD_SIZE;
242 
243 	if (header->freed == true)
244 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
245 	else if (header->freed)
246 		error_bitmap |= QDF_MEM_BAD_FREED;
247 
248 	if (!qdf_list_node_in_any_list(&header->node))
249 		error_bitmap |= QDF_MEM_BAD_NODE;
250 
251 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
252 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
253 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
254 	else if (header->domain != domain)
255 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
256 
257 	return error_bitmap;
258 }
259 
260 static void
261 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
262 			    enum qdf_debug_domain current_domain,
263 			    enum qdf_mem_validation_bitmap error_bitmap,
264 			    const char *func,
265 			    uint32_t line)
266 {
267 	if (!error_bitmap)
268 		return;
269 
270 	if (error_bitmap & QDF_MEM_BAD_HEADER)
271 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
272 			header->header, WLAN_MEM_HEADER);
273 
274 	if (error_bitmap & QDF_MEM_BAD_SIZE)
275 		qdf_err("Corrupted memory size %u (expected < %d)",
276 			header->size, QDF_MEM_MAX_MALLOC);
277 
278 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
279 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
280 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
281 
282 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
283 		qdf_err("Memory has previously been freed");
284 
285 	if (error_bitmap & QDF_MEM_BAD_FREED)
286 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
287 
288 	if (error_bitmap & QDF_MEM_BAD_NODE)
289 		qdf_err("Corrupted memory header node or double free");
290 
291 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
292 		qdf_err("Corrupted memory domain 0x%x", header->domain);
293 
294 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
295 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
296 			qdf_debug_domain_name(header->domain), header->domain,
297 			qdf_debug_domain_name(current_domain), current_domain);
298 
299 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
300 }
301 #endif /* MEMORY_DEBUG */
302 
303 u_int8_t prealloc_disabled = 1;
304 qdf_declare_param(prealloc_disabled, byte);
305 qdf_export_symbol(prealloc_disabled);
306 
307 #if defined WLAN_DEBUGFS
308 
309 /* Debugfs root directory for qdf_mem */
310 static struct dentry *qdf_mem_debugfs_root;
311 
312 #ifdef MEMORY_DEBUG
313 static int qdf_err_printer(void *priv, const char *fmt, ...)
314 {
315 	va_list args;
316 
317 	va_start(args, fmt);
318 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
319 	va_end(args);
320 
321 	return 0;
322 }
323 
324 static int seq_printf_printer(void *priv, const char *fmt, ...)
325 {
326 	struct seq_file *file = priv;
327 	va_list args;
328 
329 	va_start(args, fmt);
330 	seq_vprintf(file, fmt, args);
331 	seq_puts(file, "\n");
332 	va_end(args);
333 
334 	return 0;
335 }
336 
337 /**
338  * struct __qdf_mem_info - memory statistics
339  * @func: the function which allocated memory
340  * @line: the line at which allocation happened
341  * @size: the size of allocation
342  * @caller: Address of the caller function
343  * @count: how many allocations of same type
344  * @time: timestamp at which allocation happened
345  */
346 struct __qdf_mem_info {
347 	char func[QDF_MEM_FUNC_NAME_SIZE];
348 	uint32_t line;
349 	uint32_t size;
350 	void *caller;
351 	uint32_t count;
352 	uint64_t time;
353 };
354 
355 /*
356  * The table depth defines the de-duplication proximity scope.
357  * A deeper table takes more time, so choose any optimum value.
358  */
359 #define QDF_MEM_STAT_TABLE_SIZE 8
360 
361 /**
362  * qdf_mem_debug_print_header() - memory debug header print logic
363  * @print: the print adapter function
364  * @print_priv: the private data to be consumed by @print
365  * @threshold: the threshold value set by user to list top allocations
366  *
367  * Return: None
368  */
369 static void qdf_mem_debug_print_header(qdf_abstract_print print,
370 				       void *print_priv,
371 				       uint32_t threshold)
372 {
373 	if (threshold)
374 		print(print_priv, "APIs requested allocations >= %u no of time",
375 		      threshold);
376 	print(print_priv,
377 	      "--------------------------------------------------------------");
378 	print(print_priv,
379 	      " count    size     total    filename     caller    timestamp");
380 	print(print_priv,
381 	      "--------------------------------------------------------------");
382 }
383 
384 /**
385  * qdf_mem_meta_table_print() - memory metadata table print logic
386  * @table: the memory metadata table to print
387  * @print: the print adapter function
388  * @print_priv: the private data to be consumed by @print
389  * @threshold: the threshold value set by user to list top allocations
390  *
391  * Return: None
392  */
393 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
394 				     qdf_abstract_print print,
395 				     void *print_priv,
396 				     uint32_t threshold)
397 {
398 	int i;
399 	char debug_str[QDF_DEBUG_STRING_SIZE];
400 	size_t len = 0;
401 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
402 
403 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
404 			     "%s", debug_prefix);
405 
406 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
407 		if (!table[i].count)
408 			break;
409 
410 		print(print_priv,
411 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
412 		      table[i].count,
413 		      table[i].size,
414 		      table[i].count * table[i].size,
415 		      table[i].func,
416 		      table[i].line, table[i].caller,
417 		      table[i].time);
418 		len += qdf_scnprintf(debug_str + len,
419 				     sizeof(debug_str) - len,
420 				     " @ %s:%u %pS",
421 				     table[i].func,
422 				     table[i].line,
423 				     table[i].caller);
424 	}
425 	print(print_priv, "%s", debug_str);
426 }
427 
428 /**
429  * qdf_print_major_alloc() - memory metadata table print logic
430  * @table: the memory metadata table to print
431  * @print: the print adapter function
432  * @print_priv: the private data to be consumed by @print
433  * @threshold: the threshold value set by uset to list top allocations
434  *
435  * Return: None
436  */
437 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
438 				  qdf_abstract_print print,
439 				  void *print_priv,
440 				  uint32_t threshold)
441 {
442 	int i;
443 
444 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
445 		if (!table[i].count)
446 			break;
447 		if (table[i].count >= threshold)
448 			print(print_priv,
449 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
450 			      table[i].count,
451 			      table[i].size,
452 			      table[i].count * table[i].size,
453 			      table[i].func,
454 			      table[i].line, table[i].caller,
455 			      table[i].time);
456 	}
457 }
458 
459 /**
460  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
461  * @table: the memory metadata table to insert into
462  * @meta: the memory metadata to insert
463  *
464  * Return: true if the table is full after inserting, false otherwise
465  */
466 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
467 				      struct qdf_mem_header *meta)
468 {
469 	int i;
470 
471 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 		if (!table[i].count) {
473 			qdf_str_lcopy(table[i].func, meta->func,
474 				      QDF_MEM_FUNC_NAME_SIZE);
475 			table[i].line = meta->line;
476 			table[i].size = meta->size;
477 			table[i].count = 1;
478 			table[i].caller = meta->caller;
479 			table[i].time = meta->time;
480 			break;
481 		}
482 
483 		if (qdf_str_eq(table[i].func, meta->func) &&
484 		    table[i].line == meta->line &&
485 		    table[i].size == meta->size &&
486 		    table[i].caller == meta->caller) {
487 			table[i].count++;
488 			break;
489 		}
490 	}
491 
492 	/* return true if the table is now full */
493 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
494 }
495 
496 /**
497  * qdf_mem_domain_print() - output agnostic memory domain print logic
498  * @domain: the memory domain to print
499  * @print: the print adapter function
500  * @print_priv: the private data to be consumed by @print
501  * @threshold: the threshold value set by uset to list top allocations
502  * @mem_print: pointer to function which prints the memory allocation data
503  *
504  * Return: None
505  */
506 static void qdf_mem_domain_print(qdf_list_t *domain,
507 				 qdf_abstract_print print,
508 				 void *print_priv,
509 				 uint32_t threshold,
510 				 void (*mem_print)(struct __qdf_mem_info *,
511 						   qdf_abstract_print,
512 						   void *, uint32_t))
513 {
514 	QDF_STATUS status;
515 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
516 	qdf_list_node_t *node;
517 
518 	qdf_mem_zero(table, sizeof(table));
519 	qdf_mem_debug_print_header(print, print_priv, threshold);
520 
521 	/* hold lock while inserting to avoid use-after free of the metadata */
522 	qdf_spin_lock(&qdf_mem_list_lock);
523 	status = qdf_list_peek_front(domain, &node);
524 	while (QDF_IS_STATUS_SUCCESS(status)) {
525 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
526 		bool is_full = qdf_mem_meta_table_insert(table, meta);
527 
528 		qdf_spin_unlock(&qdf_mem_list_lock);
529 
530 		if (is_full) {
531 			(*mem_print)(table, print, print_priv, threshold);
532 			qdf_mem_zero(table, sizeof(table));
533 		}
534 
535 		qdf_spin_lock(&qdf_mem_list_lock);
536 		status = qdf_list_peek_next(domain, node, &node);
537 	}
538 	qdf_spin_unlock(&qdf_mem_list_lock);
539 
540 	(*mem_print)(table, print, print_priv, threshold);
541 }
542 
543 /**
544  * qdf_mem_seq_start() - sequential callback to start
545  * @seq: seq_file handle
546  * @pos: The start position of the sequence
547  *
548  * Return: iterator pointer, or NULL if iteration is complete
549  */
550 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
551 {
552 	enum qdf_debug_domain domain = *pos;
553 
554 	if (!qdf_debug_domain_valid(domain))
555 		return NULL;
556 
557 	/* just use the current position as our iterator */
558 	return pos;
559 }
560 
561 /**
562  * qdf_mem_seq_next() - next sequential callback
563  * @seq: seq_file handle
564  * @v: the current iterator
565  * @pos: the current position
566  *
567  * Get the next node and release previous node.
568  *
569  * Return: iterator pointer, or NULL if iteration is complete
570  */
571 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
572 {
573 	++*pos;
574 
575 	return qdf_mem_seq_start(seq, pos);
576 }
577 
578 /**
579  * qdf_mem_seq_stop() - stop sequential callback
580  * @seq: seq_file handle
581  * @v: current iterator
582  *
583  * Return: None
584  */
585 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
586 
587 /**
588  * qdf_mem_seq_show() - print sequential callback
589  * @seq: seq_file handle
590  * @v: current iterator
591  *
592  * Return: 0 - success
593  */
594 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
595 {
596 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
597 
598 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
599 		   qdf_debug_domain_name(domain_id), domain_id);
600 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
601 			     seq_printf_printer,
602 			     seq,
603 			     0,
604 			     qdf_mem_meta_table_print);
605 
606 	return 0;
607 }
608 
609 /* sequential file operation table */
610 static const struct seq_operations qdf_mem_seq_ops = {
611 	.start = qdf_mem_seq_start,
612 	.next  = qdf_mem_seq_next,
613 	.stop  = qdf_mem_seq_stop,
614 	.show  = qdf_mem_seq_show,
615 };
616 
617 
618 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
619 {
620 	return seq_open(file, &qdf_mem_seq_ops);
621 }
622 
623 /**
624  * qdf_major_alloc_show() - print sequential callback
625  * @seq: seq_file handle
626  * @v: current iterator
627  *
628  * Return: 0 - success
629  */
630 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
631 {
632 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
633 	struct major_alloc_priv *priv;
634 	qdf_list_t *list;
635 
636 	priv = (struct major_alloc_priv *)seq->private;
637 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
638 		   qdf_debug_domain_name(domain_id), domain_id);
639 
640 	switch (priv->type) {
641 	case LIST_TYPE_MEM:
642 		list = qdf_mem_list_get(domain_id);
643 		break;
644 	case LIST_TYPE_DMA:
645 		list = qdf_mem_dma_list(domain_id);
646 		break;
647 	default:
648 		list = NULL;
649 		break;
650 	}
651 
652 	if (list)
653 		qdf_mem_domain_print(list,
654 				     seq_printf_printer,
655 				     seq,
656 				     priv->threshold,
657 				     qdf_print_major_alloc);
658 
659 	return 0;
660 }
661 
662 /* sequential file operation table created to track major allocs */
663 static const struct seq_operations qdf_major_allocs_seq_ops = {
664 	.start = qdf_mem_seq_start,
665 	.next = qdf_mem_seq_next,
666 	.stop = qdf_mem_seq_stop,
667 	.show = qdf_major_alloc_show,
668 };
669 
670 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
671 {
672 	void *private = inode->i_private;
673 	struct seq_file *seq;
674 	int rc;
675 
676 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
677 	if (rc == 0) {
678 		seq = file->private_data;
679 		seq->private = private;
680 	}
681 	return rc;
682 }
683 
684 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
685 					     const char __user *user_buf,
686 					     size_t count,
687 					     loff_t *pos)
688 {
689 	char buf[32];
690 	ssize_t buf_size;
691 	uint32_t threshold;
692 	struct seq_file *seq = file->private_data;
693 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
694 
695 	buf_size = min(count, (sizeof(buf) - 1));
696 	if (buf_size <= 0)
697 		return 0;
698 	if (copy_from_user(buf, user_buf, buf_size))
699 		return -EFAULT;
700 	buf[buf_size] = '\0';
701 	if (!kstrtou32(buf, 10, &threshold))
702 		priv->threshold = threshold;
703 	return buf_size;
704 }
705 
706 /* file operation table for listing major allocs */
707 static const struct file_operations fops_qdf_major_allocs = {
708 	.owner = THIS_MODULE,
709 	.open = qdf_major_allocs_open,
710 	.read = seq_read,
711 	.llseek = seq_lseek,
712 	.release = seq_release,
713 	.write = qdf_major_alloc_set_threshold,
714 };
715 
716 /* debugfs file operation table */
717 static const struct file_operations fops_qdf_mem_debugfs = {
718 	.owner = THIS_MODULE,
719 	.open = qdf_mem_debugfs_open,
720 	.read = seq_read,
721 	.llseek = seq_lseek,
722 	.release = seq_release,
723 };
724 
725 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
726 {
727 	if (is_initial_mem_debug_disabled)
728 		return QDF_STATUS_SUCCESS;
729 
730 	if (!qdf_mem_debugfs_root)
731 		return QDF_STATUS_E_FAILURE;
732 
733 	debugfs_create_file("list",
734 			    S_IRUSR,
735 			    qdf_mem_debugfs_root,
736 			    NULL,
737 			    &fops_qdf_mem_debugfs);
738 
739 	debugfs_create_file("major_mem_allocs",
740 			    0600,
741 			    qdf_mem_debugfs_root,
742 			    &mem_priv,
743 			    &fops_qdf_major_allocs);
744 
745 	debugfs_create_file("major_dma_allocs",
746 			    0600,
747 			    qdf_mem_debugfs_root,
748 			    &dma_priv,
749 			    &fops_qdf_major_allocs);
750 
751 	return QDF_STATUS_SUCCESS;
752 }
753 
754 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
755 {
756 	return QDF_STATUS_SUCCESS;
757 }
758 
759 #else /* MEMORY_DEBUG */
760 
761 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
762 {
763 	return QDF_STATUS_E_NOSUPPORT;
764 }
765 
766 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
767 {
768 	return QDF_STATUS_E_NOSUPPORT;
769 }
770 
771 #endif /* MEMORY_DEBUG */
772 
773 
774 static void qdf_mem_debugfs_exit(void)
775 {
776 	debugfs_remove_recursive(qdf_mem_debugfs_root);
777 	qdf_mem_debugfs_root = NULL;
778 }
779 
780 static QDF_STATUS qdf_mem_debugfs_init(void)
781 {
782 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
783 
784 	if (!qdf_debugfs_root)
785 		return QDF_STATUS_E_FAILURE;
786 
787 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
788 
789 	if (!qdf_mem_debugfs_root)
790 		return QDF_STATUS_E_FAILURE;
791 
792 
793 	debugfs_create_atomic_t("kmalloc",
794 				S_IRUSR,
795 				qdf_mem_debugfs_root,
796 				&qdf_mem_stat.kmalloc);
797 
798 	debugfs_create_atomic_t("dma",
799 				S_IRUSR,
800 				qdf_mem_debugfs_root,
801 				&qdf_mem_stat.dma);
802 
803 	debugfs_create_atomic_t("skb",
804 				S_IRUSR,
805 				qdf_mem_debugfs_root,
806 				&qdf_mem_stat.skb);
807 
808 	return QDF_STATUS_SUCCESS;
809 }
810 
811 #else /* WLAN_DEBUGFS */
812 
813 static QDF_STATUS qdf_mem_debugfs_init(void)
814 {
815 	return QDF_STATUS_E_NOSUPPORT;
816 }
817 static void qdf_mem_debugfs_exit(void) {}
818 
819 
820 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
821 {
822 	return QDF_STATUS_E_NOSUPPORT;
823 }
824 
825 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
826 {
827 	return QDF_STATUS_E_NOSUPPORT;
828 }
829 
830 #endif /* WLAN_DEBUGFS */
831 
832 void qdf_mem_kmalloc_inc(qdf_size_t size)
833 {
834 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
835 }
836 
837 static void qdf_mem_dma_inc(qdf_size_t size)
838 {
839 	qdf_atomic_add(size, &qdf_mem_stat.dma);
840 }
841 
842 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
843 void qdf_mem_skb_inc(qdf_size_t size)
844 {
845 	qdf_atomic_add(size, &qdf_mem_stat.skb);
846 }
847 
848 void qdf_mem_skb_dec(qdf_size_t size)
849 {
850 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
851 }
852 #endif
853 
854 void qdf_mem_kmalloc_dec(qdf_size_t size)
855 {
856 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
857 }
858 
859 static inline void qdf_mem_dma_dec(qdf_size_t size)
860 {
861 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
862 }
863 
864 /**
865  * __qdf_mempool_init() - Create and initialize memory pool
866  *
867  * @osdev: platform device object
868  * @pool_addr: address of the pool created
869  * @elem_cnt: no. of elements in pool
870  * @elem_size: size of each pool element in bytes
871  * @flags: flags
872  *
873  * return: Handle to memory pool or NULL if allocation failed
874  */
875 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
876 		       int elem_cnt, size_t elem_size, u_int32_t flags)
877 {
878 	__qdf_mempool_ctxt_t *new_pool = NULL;
879 	u_int32_t align = L1_CACHE_BYTES;
880 	unsigned long aligned_pool_mem;
881 	int pool_id;
882 	int i;
883 
884 	if (prealloc_disabled) {
885 		/* TBD: We can maintain a list of pools in qdf_device_t
886 		 * to help debugging
887 		 * when pre-allocation is not enabled
888 		 */
889 		new_pool = (__qdf_mempool_ctxt_t *)
890 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
891 		if (!new_pool)
892 			return QDF_STATUS_E_NOMEM;
893 
894 		memset(new_pool, 0, sizeof(*new_pool));
895 		/* TBD: define flags for zeroing buffers etc */
896 		new_pool->flags = flags;
897 		new_pool->elem_size = elem_size;
898 		new_pool->max_elem = elem_cnt;
899 		*pool_addr = new_pool;
900 		return 0;
901 	}
902 
903 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
904 		if (!osdev->mem_pool[pool_id])
905 			break;
906 	}
907 
908 	if (pool_id == MAX_MEM_POOLS)
909 		return -ENOMEM;
910 
911 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
912 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
913 	if (!new_pool)
914 		return -ENOMEM;
915 
916 	memset(new_pool, 0, sizeof(*new_pool));
917 	/* TBD: define flags for zeroing buffers etc */
918 	new_pool->flags = flags;
919 	new_pool->pool_id = pool_id;
920 
921 	/* Round up the element size to cacheline */
922 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
923 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
924 				((align)?(align - 1):0);
925 
926 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
927 	if (!new_pool->pool_mem) {
928 			/* TBD: Check if we need get_free_pages above */
929 		kfree(new_pool);
930 		osdev->mem_pool[pool_id] = NULL;
931 		return -ENOMEM;
932 	}
933 
934 	spin_lock_init(&new_pool->lock);
935 
936 	/* Initialize free list */
937 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
938 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
939 	STAILQ_INIT(&new_pool->free_list);
940 
941 	for (i = 0; i < elem_cnt; i++)
942 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
943 			(mempool_elem_t *)(aligned_pool_mem +
944 			(new_pool->elem_size * i)), mempool_entry);
945 
946 
947 	new_pool->free_cnt = elem_cnt;
948 	*pool_addr = new_pool;
949 	return 0;
950 }
951 qdf_export_symbol(__qdf_mempool_init);
952 
953 /**
954  * __qdf_mempool_destroy() - Destroy memory pool
955  * @osdev: platform device object
956  * @Handle: to memory pool
957  *
958  * Returns: none
959  */
960 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
961 {
962 	int pool_id = 0;
963 
964 	if (!pool)
965 		return;
966 
967 	if (prealloc_disabled) {
968 		kfree(pool);
969 		return;
970 	}
971 
972 	pool_id = pool->pool_id;
973 
974 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
975 	kfree(pool->pool_mem);
976 	kfree(pool);
977 	osdev->mem_pool[pool_id] = NULL;
978 }
979 qdf_export_symbol(__qdf_mempool_destroy);
980 
981 /**
982  * __qdf_mempool_alloc() - Allocate an element memory pool
983  *
984  * @osdev: platform device object
985  * @Handle: to memory pool
986  *
987  * Return: Pointer to the allocated element or NULL if the pool is empty
988  */
989 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
990 {
991 	void *buf = NULL;
992 
993 	if (!pool)
994 		return NULL;
995 
996 	if (prealloc_disabled)
997 		return  qdf_mem_malloc(pool->elem_size);
998 
999 	spin_lock_bh(&pool->lock);
1000 
1001 	buf = STAILQ_FIRST(&pool->free_list);
1002 	if (buf) {
1003 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1004 		pool->free_cnt--;
1005 	}
1006 
1007 	/* TBD: Update free count if debug is enabled */
1008 	spin_unlock_bh(&pool->lock);
1009 
1010 	return buf;
1011 }
1012 qdf_export_symbol(__qdf_mempool_alloc);
1013 
1014 /**
1015  * __qdf_mempool_free() - Free a memory pool element
1016  * @osdev: Platform device object
1017  * @pool: Handle to memory pool
1018  * @buf: Element to be freed
1019  *
1020  * Returns: none
1021  */
1022 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1023 {
1024 	if (!pool)
1025 		return;
1026 
1027 
1028 	if (prealloc_disabled)
1029 		return qdf_mem_free(buf);
1030 
1031 	spin_lock_bh(&pool->lock);
1032 	pool->free_cnt++;
1033 
1034 	STAILQ_INSERT_TAIL
1035 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1036 	spin_unlock_bh(&pool->lock);
1037 }
1038 qdf_export_symbol(__qdf_mempool_free);
1039 
1040 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
1041 /**
1042  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1043  * @size: the number of bytes to allocate
1044  *
1045  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1046  * a chunk of pre-allocated memory. If size if less than or equal to
1047  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1048  *
1049  * Return: NULL on failure, non-NULL on success
1050  */
1051 static void *qdf_mem_prealloc_get(size_t size)
1052 {
1053 	void *ptr;
1054 
1055 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1056 		return NULL;
1057 
1058 	ptr = wcnss_prealloc_get(size);
1059 	if (!ptr)
1060 		return NULL;
1061 
1062 	memset(ptr, 0, size);
1063 
1064 	return ptr;
1065 }
1066 
1067 static inline bool qdf_mem_prealloc_put(void *ptr)
1068 {
1069 	return wcnss_prealloc_put(ptr);
1070 }
1071 #else
1072 static inline void *qdf_mem_prealloc_get(size_t size)
1073 {
1074 	return NULL;
1075 }
1076 
1077 static inline bool qdf_mem_prealloc_put(void *ptr)
1078 {
1079 	return false;
1080 }
1081 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
1082 
1083 static int qdf_mem_malloc_flags(void)
1084 {
1085 	if (in_interrupt() || irqs_disabled() || in_atomic())
1086 		return GFP_ATOMIC;
1087 
1088 	return GFP_KERNEL;
1089 }
1090 
1091 /* External Function implementation */
1092 #ifdef MEMORY_DEBUG
1093 /**
1094  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1095  *
1096  * Return: value of mem_debug_disabled qdf module argument
1097  */
1098 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1099 bool qdf_mem_debug_config_get(void)
1100 {
1101 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1102 	return false;
1103 }
1104 #else
1105 bool qdf_mem_debug_config_get(void)
1106 {
1107 	return mem_debug_disabled;
1108 }
1109 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1110 
1111 /**
1112  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1113  *
1114  * Return: none
1115  */
1116 static void qdf_mem_debug_init(void)
1117 {
1118 	int i;
1119 
1120 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1121 
1122 	if (is_initial_mem_debug_disabled)
1123 		return;
1124 
1125 	/* Initalizing the list with maximum size of 60000 */
1126 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1127 		qdf_list_create(&qdf_mem_domains[i], 60000);
1128 	qdf_spinlock_create(&qdf_mem_list_lock);
1129 
1130 	/* dma */
1131 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1132 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1133 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1134 }
1135 
1136 static uint32_t
1137 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1138 			       qdf_list_t *mem_list)
1139 {
1140 	if (is_initial_mem_debug_disabled)
1141 		return 0;
1142 
1143 	if (qdf_list_empty(mem_list))
1144 		return 0;
1145 
1146 	qdf_err("Memory leaks detected in %s domain!",
1147 		qdf_debug_domain_name(domain));
1148 	qdf_mem_domain_print(mem_list,
1149 			     qdf_err_printer,
1150 			     NULL,
1151 			     0,
1152 			     qdf_mem_meta_table_print);
1153 
1154 	return mem_list->count;
1155 }
1156 
1157 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1158 {
1159 	uint32_t leak_count = 0;
1160 	int i;
1161 
1162 	if (is_initial_mem_debug_disabled)
1163 		return;
1164 
1165 	/* detect and print leaks */
1166 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1167 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1168 
1169 	if (leak_count)
1170 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1171 				   leak_count);
1172 }
1173 
1174 /**
1175  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1176  *
1177  * Return: none
1178  */
1179 static void qdf_mem_debug_exit(void)
1180 {
1181 	int i;
1182 
1183 	if (is_initial_mem_debug_disabled)
1184 		return;
1185 
1186 	/* mem */
1187 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1188 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1189 		qdf_list_destroy(qdf_mem_list_get(i));
1190 
1191 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1192 
1193 	/* dma */
1194 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1195 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1196 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1197 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1198 }
1199 
1200 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1201 			   void *caller, uint32_t flag)
1202 {
1203 	QDF_STATUS status;
1204 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1205 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1206 	struct qdf_mem_header *header;
1207 	void *ptr;
1208 	unsigned long start, duration;
1209 
1210 	if (is_initial_mem_debug_disabled)
1211 		return __qdf_mem_malloc(size, func, line);
1212 
1213 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1214 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1215 		return NULL;
1216 	}
1217 
1218 	ptr = qdf_mem_prealloc_get(size);
1219 	if (ptr)
1220 		return ptr;
1221 
1222 	if (!flag)
1223 		flag = qdf_mem_malloc_flags();
1224 
1225 	start = qdf_mc_timer_get_system_time();
1226 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1227 	duration = qdf_mc_timer_get_system_time() - start;
1228 
1229 	if (duration > QDF_MEM_WARN_THRESHOLD)
1230 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1231 			 duration, size, func, line);
1232 
1233 	if (!header) {
1234 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1235 		return NULL;
1236 	}
1237 
1238 	qdf_mem_header_init(header, size, func, line, caller);
1239 	qdf_mem_trailer_init(header);
1240 	ptr = qdf_mem_get_ptr(header);
1241 
1242 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1243 	status = qdf_list_insert_front(mem_list, &header->node);
1244 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1245 	if (QDF_IS_STATUS_ERROR(status))
1246 		qdf_err("Failed to insert memory header; status %d", status);
1247 
1248 	qdf_mem_kmalloc_inc(ksize(header));
1249 
1250 	return ptr;
1251 }
1252 qdf_export_symbol(qdf_mem_malloc_debug);
1253 
1254 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1255 {
1256 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1257 	struct qdf_mem_header *header;
1258 	enum qdf_mem_validation_bitmap error_bitmap;
1259 
1260 	if (is_initial_mem_debug_disabled) {
1261 		__qdf_mem_free(ptr);
1262 		return;
1263 	}
1264 
1265 	/* freeing a null pointer is valid */
1266 	if (qdf_unlikely(!ptr))
1267 		return;
1268 
1269 	if (qdf_mem_prealloc_put(ptr))
1270 		return;
1271 
1272 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1273 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1274 				   ptr);
1275 
1276 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1277 
1278 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1279 	header = qdf_mem_get_header(ptr);
1280 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1281 	error_bitmap |= qdf_mem_trailer_validate(header);
1282 
1283 	if (!error_bitmap) {
1284 		header->freed = true;
1285 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1286 				     &header->node);
1287 	}
1288 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1289 
1290 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1291 				    func, line);
1292 
1293 	qdf_mem_kmalloc_dec(ksize(header));
1294 	kfree(header);
1295 }
1296 qdf_export_symbol(qdf_mem_free_debug);
1297 
1298 void qdf_mem_check_for_leaks(void)
1299 {
1300 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1301 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1302 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1303 	uint32_t leaks_count = 0;
1304 
1305 	if (is_initial_mem_debug_disabled)
1306 		return;
1307 
1308 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1309 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1310 
1311 	if (leaks_count)
1312 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1313 				   leaks_count);
1314 }
1315 
1316 /**
1317  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1318  * qdf_mem_multi_pages_alloc
1319  * @osdev: OS device handle pointer
1320  * @pages: Multi page information storage
1321  * @element_size: Each element size
1322  * @element_num: Total number of elements should be allocated
1323  * @memctxt: Memory context
1324  * @cacheable: Coherent memory or cacheable memory
1325  * @func: Caller of this allocator
1326  * @line: Line number of the caller
1327  * @caller: Return address of the caller
1328  *
1329  * This function will allocate large size of memory over multiple pages.
1330  * Large size of contiguous memory allocation will fail frequently, then
1331  * instead of allocate large memory by one shot, allocate through multiple, non
1332  * contiguous memory and combine pages when actual usage
1333  *
1334  * Return: None
1335  */
1336 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1337 				     struct qdf_mem_multi_page_t *pages,
1338 				     size_t element_size, uint16_t element_num,
1339 				     qdf_dma_context_t memctxt, bool cacheable,
1340 				     const char *func, uint32_t line,
1341 				     void *caller)
1342 {
1343 	uint16_t page_idx;
1344 	struct qdf_mem_dma_page_t *dma_pages;
1345 	void **cacheable_pages = NULL;
1346 	uint16_t i;
1347 
1348 	if (!pages->page_size)
1349 		pages->page_size = qdf_page_size;
1350 
1351 	pages->num_element_per_page = pages->page_size / element_size;
1352 	if (!pages->num_element_per_page) {
1353 		qdf_print("Invalid page %d or element size %d",
1354 			  (int)pages->page_size, (int)element_size);
1355 		goto out_fail;
1356 	}
1357 
1358 	pages->num_pages = element_num / pages->num_element_per_page;
1359 	if (element_num % pages->num_element_per_page)
1360 		pages->num_pages++;
1361 
1362 	if (cacheable) {
1363 		/* Pages information storage */
1364 		pages->cacheable_pages = qdf_mem_malloc_debug(
1365 			pages->num_pages * sizeof(pages->cacheable_pages),
1366 			func, line, caller, 0);
1367 		if (!pages->cacheable_pages)
1368 			goto out_fail;
1369 
1370 		cacheable_pages = pages->cacheable_pages;
1371 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1372 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1373 				pages->page_size, func, line, caller, 0);
1374 			if (!cacheable_pages[page_idx])
1375 				goto page_alloc_fail;
1376 		}
1377 		pages->dma_pages = NULL;
1378 	} else {
1379 		pages->dma_pages = qdf_mem_malloc_debug(
1380 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1381 			func, line, caller, 0);
1382 		if (!pages->dma_pages)
1383 			goto out_fail;
1384 
1385 		dma_pages = pages->dma_pages;
1386 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1387 			dma_pages->page_v_addr_start =
1388 				qdf_mem_alloc_consistent_debug(
1389 					osdev, osdev->dev, pages->page_size,
1390 					&dma_pages->page_p_addr,
1391 					func, line, caller);
1392 			if (!dma_pages->page_v_addr_start) {
1393 				qdf_print("dmaable page alloc fail pi %d",
1394 					  page_idx);
1395 				goto page_alloc_fail;
1396 			}
1397 			dma_pages->page_v_addr_end =
1398 				dma_pages->page_v_addr_start + pages->page_size;
1399 			dma_pages++;
1400 		}
1401 		pages->cacheable_pages = NULL;
1402 	}
1403 	return;
1404 
1405 page_alloc_fail:
1406 	if (cacheable) {
1407 		for (i = 0; i < page_idx; i++)
1408 			qdf_mem_free_debug(pages->cacheable_pages[i],
1409 					   func, line);
1410 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1411 	} else {
1412 		dma_pages = pages->dma_pages;
1413 		for (i = 0; i < page_idx; i++) {
1414 			qdf_mem_free_consistent_debug(
1415 				osdev, osdev->dev,
1416 				pages->page_size, dma_pages->page_v_addr_start,
1417 				dma_pages->page_p_addr, memctxt, func, line);
1418 			dma_pages++;
1419 		}
1420 		qdf_mem_free_debug(pages->dma_pages, func, line);
1421 	}
1422 
1423 out_fail:
1424 	pages->cacheable_pages = NULL;
1425 	pages->dma_pages = NULL;
1426 	pages->num_pages = 0;
1427 }
1428 
1429 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1430 
1431 /**
1432  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1433  * @osdev: OS device handle pointer
1434  * @pages: Multi page information storage
1435  * @memctxt: Memory context
1436  * @cacheable: Coherent memory or cacheable memory
1437  * @func: Caller of this allocator
1438  * @line: Line number of the caller
1439  *
1440  * This function will free large size of memory over multiple pages.
1441  *
1442  * Return: None
1443  */
1444 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1445 				    struct qdf_mem_multi_page_t *pages,
1446 				    qdf_dma_context_t memctxt, bool cacheable,
1447 				    const char *func, uint32_t line)
1448 {
1449 	unsigned int page_idx;
1450 	struct qdf_mem_dma_page_t *dma_pages;
1451 
1452 	if (!pages->page_size)
1453 		pages->page_size = qdf_page_size;
1454 
1455 	if (cacheable) {
1456 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1457 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1458 					   func, line);
1459 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1460 	} else {
1461 		dma_pages = pages->dma_pages;
1462 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1463 			qdf_mem_free_consistent_debug(
1464 				osdev, osdev->dev, pages->page_size,
1465 				dma_pages->page_v_addr_start,
1466 				dma_pages->page_p_addr, memctxt, func, line);
1467 			dma_pages++;
1468 		}
1469 		qdf_mem_free_debug(pages->dma_pages, func, line);
1470 	}
1471 
1472 	pages->cacheable_pages = NULL;
1473 	pages->dma_pages = NULL;
1474 	pages->num_pages = 0;
1475 }
1476 
1477 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1478 
1479 #else
1480 static void qdf_mem_debug_init(void) {}
1481 
1482 static void qdf_mem_debug_exit(void) {}
1483 
1484 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1485 {
1486 	void *ptr;
1487 
1488 	ptr = qdf_mem_prealloc_get(size);
1489 	if (ptr)
1490 		return ptr;
1491 
1492 	ptr = kzalloc(size, GFP_ATOMIC);
1493 	if (!ptr) {
1494 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1495 			      size, func, line);
1496 		return NULL;
1497 	}
1498 
1499 	qdf_mem_kmalloc_inc(ksize(ptr));
1500 
1501 	return ptr;
1502 }
1503 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1504 
1505 /**
1506  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1507  * @osdev: OS device handle pointer
1508  * @pages: Multi page information storage
1509  * @element_size: Each element size
1510  * @element_num: Total number of elements should be allocated
1511  * @memctxt: Memory context
1512  * @cacheable: Coherent memory or cacheable memory
1513  *
1514  * This function will allocate large size of memory over multiple pages.
1515  * Large size of contiguous memory allocation will fail frequently, then
1516  * instead of allocate large memory by one shot, allocate through multiple, non
1517  * contiguous memory and combine pages when actual usage
1518  *
1519  * Return: None
1520  */
1521 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1522 			       struct qdf_mem_multi_page_t *pages,
1523 			       size_t element_size, uint16_t element_num,
1524 			       qdf_dma_context_t memctxt, bool cacheable)
1525 {
1526 	uint16_t page_idx;
1527 	struct qdf_mem_dma_page_t *dma_pages;
1528 	void **cacheable_pages = NULL;
1529 	uint16_t i;
1530 
1531 	if (!pages->page_size)
1532 		pages->page_size = qdf_page_size;
1533 
1534 	pages->num_element_per_page = pages->page_size / element_size;
1535 	if (!pages->num_element_per_page) {
1536 		qdf_print("Invalid page %d or element size %d",
1537 			  (int)pages->page_size, (int)element_size);
1538 		goto out_fail;
1539 	}
1540 
1541 	pages->num_pages = element_num / pages->num_element_per_page;
1542 	if (element_num % pages->num_element_per_page)
1543 		pages->num_pages++;
1544 
1545 	if (cacheable) {
1546 		/* Pages information storage */
1547 		pages->cacheable_pages = qdf_mem_malloc(
1548 			pages->num_pages * sizeof(pages->cacheable_pages));
1549 		if (!pages->cacheable_pages)
1550 			goto out_fail;
1551 
1552 		cacheable_pages = pages->cacheable_pages;
1553 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1554 			cacheable_pages[page_idx] =
1555 				qdf_mem_malloc(pages->page_size);
1556 			if (!cacheable_pages[page_idx])
1557 				goto page_alloc_fail;
1558 		}
1559 		pages->dma_pages = NULL;
1560 	} else {
1561 		pages->dma_pages = qdf_mem_malloc(
1562 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1563 		if (!pages->dma_pages)
1564 			goto out_fail;
1565 
1566 		dma_pages = pages->dma_pages;
1567 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1568 			dma_pages->page_v_addr_start =
1569 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1570 					 pages->page_size,
1571 					&dma_pages->page_p_addr);
1572 			if (!dma_pages->page_v_addr_start) {
1573 				qdf_print("dmaable page alloc fail pi %d",
1574 					page_idx);
1575 				goto page_alloc_fail;
1576 			}
1577 			dma_pages->page_v_addr_end =
1578 				dma_pages->page_v_addr_start + pages->page_size;
1579 			dma_pages++;
1580 		}
1581 		pages->cacheable_pages = NULL;
1582 	}
1583 	return;
1584 
1585 page_alloc_fail:
1586 	if (cacheable) {
1587 		for (i = 0; i < page_idx; i++)
1588 			qdf_mem_free(pages->cacheable_pages[i]);
1589 		qdf_mem_free(pages->cacheable_pages);
1590 	} else {
1591 		dma_pages = pages->dma_pages;
1592 		for (i = 0; i < page_idx; i++) {
1593 			qdf_mem_free_consistent(
1594 				osdev, osdev->dev, pages->page_size,
1595 				dma_pages->page_v_addr_start,
1596 				dma_pages->page_p_addr, memctxt);
1597 			dma_pages++;
1598 		}
1599 		qdf_mem_free(pages->dma_pages);
1600 	}
1601 
1602 out_fail:
1603 	pages->cacheable_pages = NULL;
1604 	pages->dma_pages = NULL;
1605 	pages->num_pages = 0;
1606 	return;
1607 }
1608 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1609 
1610 /**
1611  * qdf_mem_multi_pages_free() - free large size of kernel memory
1612  * @osdev: OS device handle pointer
1613  * @pages: Multi page information storage
1614  * @memctxt: Memory context
1615  * @cacheable: Coherent memory or cacheable memory
1616  *
1617  * This function will free large size of memory over multiple pages.
1618  *
1619  * Return: None
1620  */
1621 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1622 			      struct qdf_mem_multi_page_t *pages,
1623 			      qdf_dma_context_t memctxt, bool cacheable)
1624 {
1625 	unsigned int page_idx;
1626 	struct qdf_mem_dma_page_t *dma_pages;
1627 
1628 	if (!pages->page_size)
1629 		pages->page_size = qdf_page_size;
1630 
1631 	if (cacheable) {
1632 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1633 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1634 		qdf_mem_free(pages->cacheable_pages);
1635 	} else {
1636 		dma_pages = pages->dma_pages;
1637 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1638 			qdf_mem_free_consistent(
1639 				osdev, osdev->dev, pages->page_size,
1640 				dma_pages->page_v_addr_start,
1641 				dma_pages->page_p_addr, memctxt);
1642 			dma_pages++;
1643 		}
1644 		qdf_mem_free(pages->dma_pages);
1645 	}
1646 
1647 	pages->cacheable_pages = NULL;
1648 	pages->dma_pages = NULL;
1649 	pages->num_pages = 0;
1650 	return;
1651 }
1652 qdf_export_symbol(qdf_mem_multi_pages_free);
1653 #endif
1654 
1655 void __qdf_mem_free(void *ptr)
1656 {
1657 	if (!ptr)
1658 		return;
1659 
1660 	if (qdf_mem_prealloc_put(ptr))
1661 		return;
1662 
1663 	qdf_mem_kmalloc_dec(ksize(ptr));
1664 
1665 	kfree(ptr);
1666 }
1667 
1668 qdf_export_symbol(__qdf_mem_free);
1669 
1670 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
1671 {
1672 	void *ptr;
1673 
1674 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1675 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1676 			     line);
1677 		return NULL;
1678 	}
1679 
1680 	ptr = qdf_mem_prealloc_get(size);
1681 	if (ptr)
1682 		return ptr;
1683 
1684 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1685 	if (!ptr)
1686 		return NULL;
1687 
1688 	qdf_mem_kmalloc_inc(ksize(ptr));
1689 
1690 	return ptr;
1691 }
1692 
1693 qdf_export_symbol(__qdf_mem_malloc);
1694 
1695 void *qdf_aligned_malloc_fl(uint32_t *size,
1696 			    void **vaddr_unaligned,
1697 				qdf_dma_addr_t *paddr_unaligned,
1698 				qdf_dma_addr_t *paddr_aligned,
1699 				uint32_t align,
1700 			    const char *func, uint32_t line)
1701 {
1702 	void *vaddr_aligned;
1703 	uint32_t align_alloc_size;
1704 
1705 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
1706 			line);
1707 	if (!*vaddr_unaligned) {
1708 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
1709 		return NULL;
1710 	}
1711 
1712 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
1713 
1714 	/* Re-allocate additional bytes to align base address only if
1715 	 * above allocation returns unaligned address. Reason for
1716 	 * trying exact size allocation above is, OS tries to allocate
1717 	 * blocks of size power-of-2 pages and then free extra pages.
1718 	 * e.g., of a ring size of 1MB, the allocation below will
1719 	 * request 1MB plus 7 bytes for alignment, which will cause a
1720 	 * 2MB block allocation,and that is failing sometimes due to
1721 	 * memory fragmentation.
1722 	 */
1723 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1724 		align_alloc_size = *size + align - 1;
1725 
1726 		qdf_mem_free(*vaddr_unaligned);
1727 		*vaddr_unaligned = qdf_mem_malloc_fl(
1728 				(qdf_size_t)align_alloc_size, func, line);
1729 		if (!*vaddr_unaligned) {
1730 			qdf_warn("Failed to alloc %uB @ %s:%d",
1731 				 align_alloc_size, func, line);
1732 			return NULL;
1733 		}
1734 
1735 		*paddr_unaligned = qdf_mem_virt_to_phys(
1736 				*vaddr_unaligned);
1737 		*size = align_alloc_size;
1738 	}
1739 
1740 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
1741 		((unsigned long)(*paddr_unaligned), align);
1742 
1743 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1744 			((unsigned long)(*paddr_aligned) -
1745 			 (unsigned long)(*paddr_unaligned)));
1746 
1747 	return vaddr_aligned;
1748 }
1749 
1750 qdf_export_symbol(qdf_aligned_malloc_fl);
1751 
1752 /**
1753  * qdf_mem_multi_page_link() - Make links for multi page elements
1754  * @osdev: OS device handle pointer
1755  * @pages: Multi page information storage
1756  * @elem_size: Single element size
1757  * @elem_count: elements count should be linked
1758  * @cacheable: Coherent memory or cacheable memory
1759  *
1760  * This function will make links for multi page allocated structure
1761  *
1762  * Return: 0 success
1763  */
1764 int qdf_mem_multi_page_link(qdf_device_t osdev,
1765 		struct qdf_mem_multi_page_t *pages,
1766 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1767 {
1768 	uint16_t i, i_int;
1769 	void *page_info;
1770 	void **c_elem = NULL;
1771 	uint32_t num_link = 0;
1772 
1773 	for (i = 0; i < pages->num_pages; i++) {
1774 		if (cacheable)
1775 			page_info = pages->cacheable_pages[i];
1776 		else
1777 			page_info = pages->dma_pages[i].page_v_addr_start;
1778 
1779 		if (!page_info)
1780 			return -ENOMEM;
1781 
1782 		c_elem = (void **)page_info;
1783 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1784 			if (i_int == (pages->num_element_per_page - 1)) {
1785 				if (cacheable)
1786 					*c_elem = pages->
1787 						cacheable_pages[i + 1];
1788 				else
1789 					*c_elem = pages->
1790 						dma_pages[i + 1].
1791 							page_v_addr_start;
1792 				num_link++;
1793 				break;
1794 			} else {
1795 				*c_elem =
1796 					(void *)(((char *)c_elem) + elem_size);
1797 			}
1798 			num_link++;
1799 			c_elem = (void **)*c_elem;
1800 
1801 			/* Last link established exit */
1802 			if (num_link == (elem_count - 1))
1803 				break;
1804 		}
1805 	}
1806 
1807 	if (c_elem)
1808 		*c_elem = NULL;
1809 
1810 	return 0;
1811 }
1812 qdf_export_symbol(qdf_mem_multi_page_link);
1813 
1814 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1815 {
1816 	/* special case where dst_addr or src_addr can be NULL */
1817 	if (!num_bytes)
1818 		return;
1819 
1820 	QDF_BUG(dst_addr);
1821 	QDF_BUG(src_addr);
1822 	if (!dst_addr || !src_addr)
1823 		return;
1824 
1825 	memcpy(dst_addr, src_addr, num_bytes);
1826 }
1827 qdf_export_symbol(qdf_mem_copy);
1828 
1829 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1830 {
1831 	qdf_shared_mem_t *shared_mem;
1832 	qdf_dma_addr_t dma_addr, paddr;
1833 	int ret;
1834 
1835 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1836 	if (!shared_mem)
1837 		return NULL;
1838 
1839 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1840 				size, qdf_mem_get_dma_addr_ptr(osdev,
1841 						&shared_mem->mem_info));
1842 	if (!shared_mem->vaddr) {
1843 		qdf_err("Unable to allocate DMA memory for shared resource");
1844 		qdf_mem_free(shared_mem);
1845 		return NULL;
1846 	}
1847 
1848 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1849 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1850 
1851 	qdf_mem_zero(shared_mem->vaddr, size);
1852 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1853 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1854 
1855 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1856 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1857 				      shared_mem->vaddr, dma_addr, size);
1858 	if (ret) {
1859 		qdf_err("Unable to get DMA sgtable");
1860 		qdf_mem_free_consistent(osdev, osdev->dev,
1861 					shared_mem->mem_info.size,
1862 					shared_mem->vaddr,
1863 					dma_addr,
1864 					qdf_get_dma_mem_context(shared_mem,
1865 								memctx));
1866 		qdf_mem_free(shared_mem);
1867 		return NULL;
1868 	}
1869 
1870 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1871 
1872 	return shared_mem;
1873 }
1874 
1875 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1876 
1877 /**
1878  * qdf_mem_copy_toio() - copy memory
1879  * @dst_addr: Pointer to destination memory location (to copy to)
1880  * @src_addr: Pointer to source memory location (to copy from)
1881  * @num_bytes: Number of bytes to copy.
1882  *
1883  * Return: none
1884  */
1885 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1886 {
1887 	if (0 == num_bytes) {
1888 		/* special case where dst_addr or src_addr can be NULL */
1889 		return;
1890 	}
1891 
1892 	if ((!dst_addr) || (!src_addr)) {
1893 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1894 			  "%s called with NULL parameter, source:%pK destination:%pK",
1895 			  __func__, src_addr, dst_addr);
1896 		QDF_ASSERT(0);
1897 		return;
1898 	}
1899 	memcpy_toio(dst_addr, src_addr, num_bytes);
1900 }
1901 
1902 qdf_export_symbol(qdf_mem_copy_toio);
1903 
1904 /**
1905  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1906  * @ptr: Pointer to memory that will be set
1907  * @value: Byte set in memory
1908  * @num_bytes: Number of bytes to be set
1909  *
1910  * Return: None
1911  */
1912 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1913 {
1914 	if (!ptr) {
1915 		qdf_print("%s called with NULL parameter ptr", __func__);
1916 		return;
1917 	}
1918 	memset_io(ptr, value, num_bytes);
1919 }
1920 
1921 qdf_export_symbol(qdf_mem_set_io);
1922 
1923 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1924 {
1925 	QDF_BUG(ptr);
1926 	if (!ptr)
1927 		return;
1928 
1929 	memset(ptr, value, num_bytes);
1930 }
1931 qdf_export_symbol(qdf_mem_set);
1932 
1933 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1934 {
1935 	/* special case where dst_addr or src_addr can be NULL */
1936 	if (!num_bytes)
1937 		return;
1938 
1939 	QDF_BUG(dst_addr);
1940 	QDF_BUG(src_addr);
1941 	if (!dst_addr || !src_addr)
1942 		return;
1943 
1944 	memmove(dst_addr, src_addr, num_bytes);
1945 }
1946 qdf_export_symbol(qdf_mem_move);
1947 
1948 int qdf_mem_cmp(const void *left, const void *right, size_t size)
1949 {
1950 	QDF_BUG(left);
1951 	QDF_BUG(right);
1952 
1953 	return memcmp(left, right, size);
1954 }
1955 qdf_export_symbol(qdf_mem_cmp);
1956 
1957 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1958 /**
1959  * qdf_mem_dma_alloc() - allocates memory for dma
1960  * @osdev: OS device handle
1961  * @dev: Pointer to device handle
1962  * @size: Size to be allocated
1963  * @phy_addr: Physical address
1964  *
1965  * Return: pointer of allocated memory or null if memory alloc fails
1966  */
1967 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1968 				      qdf_size_t size,
1969 				      qdf_dma_addr_t *phy_addr)
1970 {
1971 	void *vaddr;
1972 
1973 	vaddr = qdf_mem_malloc(size);
1974 	*phy_addr = ((uintptr_t) vaddr);
1975 	/* using this type conversion to suppress "cast from pointer to integer
1976 	 * of different size" warning on some platforms
1977 	 */
1978 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1979 	return vaddr;
1980 }
1981 
1982 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
1983 	!defined(QCA_WIFI_QCN9000)
1984 
1985 #define QCA8074_RAM_BASE 0x50000000
1986 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1987 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1988 			qdf_dma_addr_t *phy_addr)
1989 {
1990 	void *vaddr = NULL;
1991 	int i;
1992 
1993 	*phy_addr = 0;
1994 
1995 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1996 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1997 					   qdf_mem_malloc_flags());
1998 
1999 		if (!vaddr) {
2000 			qdf_err("%s failed , size: %zu!", __func__, size);
2001 			return NULL;
2002 		}
2003 
2004 		if (*phy_addr >= QCA8074_RAM_BASE)
2005 			return vaddr;
2006 
2007 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2008 	}
2009 
2010 	return NULL;
2011 }
2012 
2013 #else
2014 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2015 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2016 {
2017 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2018 }
2019 #endif
2020 
2021 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2022 static inline void
2023 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2024 {
2025 	qdf_mem_free(vaddr);
2026 }
2027 #else
2028 
2029 static inline void
2030 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2031 {
2032 	dma_free_coherent(dev, size, vaddr, paddr);
2033 }
2034 #endif
2035 
2036 #ifdef MEMORY_DEBUG
2037 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2038 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2039 				     const char *func, uint32_t line,
2040 				     void *caller)
2041 {
2042 	QDF_STATUS status;
2043 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2044 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2045 	struct qdf_mem_header *header;
2046 	void *vaddr;
2047 
2048 	if (is_initial_mem_debug_disabled)
2049 		return __qdf_mem_alloc_consistent(osdev, dev,
2050 						  size, paddr,
2051 						  func, line);
2052 
2053 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2054 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2055 		return NULL;
2056 	}
2057 
2058 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2059 				   paddr);
2060 
2061 	if (!vaddr) {
2062 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2063 		return NULL;
2064 	}
2065 
2066 	header = qdf_mem_dma_get_header(vaddr, size);
2067 	/* For DMA buffers we only add trailers, this function will init
2068 	 * the header structure at the tail
2069 	 * Prefix the header into DMA buffer causes SMMU faults, so
2070 	 * do not prefix header into the DMA buffers
2071 	 */
2072 	qdf_mem_header_init(header, size, func, line, caller);
2073 
2074 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2075 	status = qdf_list_insert_front(mem_list, &header->node);
2076 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2077 	if (QDF_IS_STATUS_ERROR(status))
2078 		qdf_err("Failed to insert memory header; status %d", status);
2079 
2080 	qdf_mem_dma_inc(size);
2081 
2082 	return vaddr;
2083 }
2084 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2085 
2086 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2087 				   qdf_size_t size, void *vaddr,
2088 				   qdf_dma_addr_t paddr,
2089 				   qdf_dma_context_t memctx,
2090 				   const char *func, uint32_t line)
2091 {
2092 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2093 	struct qdf_mem_header *header;
2094 	enum qdf_mem_validation_bitmap error_bitmap;
2095 
2096 	if (is_initial_mem_debug_disabled) {
2097 		__qdf_mem_free_consistent(
2098 					  osdev, dev,
2099 					  size, vaddr,
2100 					  paddr, memctx);
2101 		return;
2102 	}
2103 
2104 	/* freeing a null pointer is valid */
2105 	if (qdf_unlikely(!vaddr))
2106 		return;
2107 
2108 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2109 
2110 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2111 	/* For DMA buffers we only add trailers, this function will retrieve
2112 	 * the header structure at the tail
2113 	 * Prefix the header into DMA buffer causes SMMU faults, so
2114 	 * do not prefix header into the DMA buffers
2115 	 */
2116 	header = qdf_mem_dma_get_header(vaddr, size);
2117 	error_bitmap = qdf_mem_header_validate(header, domain);
2118 	if (!error_bitmap) {
2119 		header->freed = true;
2120 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2121 				     &header->node);
2122 	}
2123 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2124 
2125 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2126 
2127 	qdf_mem_dma_dec(header->size);
2128 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2129 }
2130 qdf_export_symbol(qdf_mem_free_consistent_debug);
2131 #endif /* MEMORY_DEBUG */
2132 
2133 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2134 			       qdf_size_t size, void *vaddr,
2135 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2136 {
2137 	qdf_mem_dma_dec(size);
2138 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2139 }
2140 
2141 qdf_export_symbol(__qdf_mem_free_consistent);
2142 
2143 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2144 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2145 				 const char *func, uint32_t line)
2146 {
2147 	void *vaddr;
2148 
2149 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2150 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2151 			     size, func, line);
2152 		return NULL;
2153 	}
2154 
2155 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2156 
2157 	if (vaddr)
2158 		qdf_mem_dma_inc(size);
2159 
2160 	return vaddr;
2161 }
2162 
2163 qdf_export_symbol(__qdf_mem_alloc_consistent);
2164 
2165 void *qdf_aligned_mem_alloc_consistent_fl(
2166 	qdf_device_t osdev, uint32_t *size,
2167 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2168 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2169 	const char *func, uint32_t line)
2170 {
2171 	void *vaddr_aligned;
2172 	uint32_t align_alloc_size;
2173 
2174 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2175 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2176 	if (!*vaddr_unaligned) {
2177 		qdf_warn("Failed to alloc %uB @ %s:%d",
2178 			 *size, func, line);
2179 		return NULL;
2180 	}
2181 
2182 	/* Re-allocate additional bytes to align base address only if
2183 	 * above allocation returns unaligned address. Reason for
2184 	 * trying exact size allocation above is, OS tries to allocate
2185 	 * blocks of size power-of-2 pages and then free extra pages.
2186 	 * e.g., of a ring size of 1MB, the allocation below will
2187 	 * request 1MB plus 7 bytes for alignment, which will cause a
2188 	 * 2MB block allocation,and that is failing sometimes due to
2189 	 * memory fragmentation.
2190 	 */
2191 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2192 		align_alloc_size = *size + align - 1;
2193 
2194 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2195 					*vaddr_unaligned,
2196 					*paddr_unaligned, 0);
2197 
2198 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2199 				osdev, osdev->dev, align_alloc_size,
2200 				paddr_unaligned);
2201 		if (!*vaddr_unaligned) {
2202 			qdf_warn("Failed to alloc %uB @ %s:%d",
2203 				 align_alloc_size, func, line);
2204 			return NULL;
2205 		}
2206 
2207 		*size = align_alloc_size;
2208 	}
2209 
2210 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2211 			(unsigned long)(*paddr_unaligned), align);
2212 
2213 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2214 				 ((unsigned long)(*paddr_aligned) -
2215 				  (unsigned long)(*paddr_unaligned)));
2216 
2217 	return vaddr_aligned;
2218 }
2219 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2220 
2221 /**
2222  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2223  * @osdev: OS device handle
2224  * @bus_addr: dma address to give to the device
2225  * @size: Size of the memory block
2226  * @direction: direction data will be DMAed
2227  *
2228  * Assign memory to the remote device.
2229  * The cache lines are flushed to ram or invalidated as needed.
2230  *
2231  * Return: none
2232  */
2233 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2234 					qdf_dma_addr_t bus_addr,
2235 					qdf_size_t size,
2236 					enum dma_data_direction direction)
2237 {
2238 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2239 }
2240 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2241 
2242 /**
2243  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2244  * @osdev: OS device handle
2245  * @bus_addr: dma address to give to the cpu
2246  * @size: Size of the memory block
2247  * @direction: direction data will be DMAed
2248  *
2249  * Assign memory to the CPU.
2250  *
2251  * Return: none
2252  */
2253 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2254 				     qdf_dma_addr_t bus_addr,
2255 				     qdf_size_t size,
2256 				     enum dma_data_direction direction)
2257 {
2258 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2259 }
2260 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2261 
2262 void qdf_mem_init(void)
2263 {
2264 	qdf_mem_debug_init();
2265 	qdf_net_buf_debug_init();
2266 	qdf_mem_debugfs_init();
2267 	qdf_mem_debug_debugfs_init();
2268 }
2269 qdf_export_symbol(qdf_mem_init);
2270 
2271 void qdf_mem_exit(void)
2272 {
2273 	qdf_mem_debug_debugfs_exit();
2274 	qdf_mem_debugfs_exit();
2275 	qdf_net_buf_debug_exit();
2276 	qdf_mem_debug_exit();
2277 }
2278 qdf_export_symbol(qdf_mem_exit);
2279 
2280 /**
2281  * qdf_ether_addr_copy() - copy an Ethernet address
2282  *
2283  * @dst_addr: A six-byte array Ethernet address destination
2284  * @src_addr: A six-byte array Ethernet address source
2285  *
2286  * Please note: dst & src must both be aligned to u16.
2287  *
2288  * Return: none
2289  */
2290 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2291 {
2292 	if ((!dst_addr) || (!src_addr)) {
2293 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2294 			  "%s called with NULL parameter, source:%pK destination:%pK",
2295 			  __func__, src_addr, dst_addr);
2296 		QDF_ASSERT(0);
2297 		return;
2298 	}
2299 	ether_addr_copy(dst_addr, src_addr);
2300 }
2301 qdf_export_symbol(qdf_ether_addr_copy);
2302 
2303 int32_t qdf_dma_mem_stats_read(void)
2304 {
2305 	return qdf_atomic_read(&qdf_mem_stat.dma);
2306 }
2307 
2308 qdf_export_symbol(qdf_dma_mem_stats_read);
2309 
2310 int32_t qdf_heap_mem_stats_read(void)
2311 {
2312 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2313 }
2314 
2315 qdf_export_symbol(qdf_heap_mem_stats_read);
2316 
2317 int32_t qdf_skb_mem_stats_read(void)
2318 {
2319 	return qdf_atomic_read(&qdf_mem_stat.skb);
2320 }
2321 
2322 qdf_export_symbol(qdf_skb_mem_stats_read);
2323 
2324