xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_mem
21  * This file provides OS dependent memory management APIs
22  */
23 
24 #include "qdf_debugfs.h"
25 #include "qdf_mem.h"
26 #include "qdf_nbuf.h"
27 #include "qdf_lock.h"
28 #include "qdf_mc_timer.h"
29 #include "qdf_module.h"
30 #include <qdf_trace.h>
31 #include "qdf_atomic.h"
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 
38 #if defined(CONFIG_CNSS)
39 #include <net/cnss.h>
40 #endif
41 
42 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
43 #include <net/cnss_prealloc.h>
44 #endif
45 
46 #ifdef MEMORY_DEBUG
47 #include "qdf_debug_domain.h"
48 #include <qdf_list.h>
49 
50 /* Preprocessor Definitions and Constants */
51 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
52 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
53 #define QDF_DEBUG_STRING_SIZE 512
54 
55 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
56 static qdf_spinlock_t qdf_mem_list_lock;
57 
58 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
59 static qdf_spinlock_t qdf_mem_dma_list_lock;
60 
61 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
62 {
63 	return &qdf_mem_domains[domain];
64 }
65 
66 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
67 {
68 	return &qdf_mem_dma_domains[domain];
69 }
70 
71 /**
72  * struct qdf_mem_header - memory object to dubug
73  * @node: node to the list
74  * @domain: the active memory domain at time of allocation
75  * @freed: flag set during free, used to detect double frees
76  *	Use uint8_t so we can detect corruption
77  * @func: name of the function the allocation was made from
78  * @line: line number of the file the allocation was made from
79  * @size: size of the allocation in bytes
80  * @caller: Caller of the function for which memory is allocated
81  * @header: a known value, used to detect out-of-bounds access
82  * @time: timestamp at which allocation was made
83  */
84 struct qdf_mem_header {
85 	qdf_list_node_t node;
86 	enum qdf_debug_domain domain;
87 	uint8_t freed;
88 	char func[QDF_MEM_FUNC_NAME_SIZE];
89 	uint32_t line;
90 	uint32_t size;
91 	void *caller;
92 	uint64_t header;
93 	uint64_t time;
94 };
95 
96 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
97 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
98 
99 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
100 {
101 	return (struct qdf_mem_header *)ptr - 1;
102 }
103 
104 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
105 							    qdf_size_t size)
106 {
107 	return (struct qdf_mem_header *) ((uint8_t *) ptr + size);
108 }
109 
110 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
111 {
112 	return (uint64_t *)((void *)(header + 1) + header->size);
113 }
114 
115 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
116 {
117 	return (void *)(header + 1);
118 }
119 
120 /* number of bytes needed for the qdf memory debug information */
121 #define QDF_MEM_DEBUG_SIZE \
122 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
123 
124 /* number of bytes needed for the qdf dma memory debug information */
125 #define QDF_DMA_MEM_DEBUG_SIZE \
126 	(sizeof(struct qdf_mem_header))
127 
128 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
129 {
130 	QDF_BUG(header);
131 	if (!header)
132 		return;
133 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
134 }
135 
136 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
137 				const char *func, uint32_t line, void *caller)
138 {
139 	QDF_BUG(header);
140 	if (!header)
141 		return;
142 
143 	header->domain = qdf_debug_domain_get();
144 	header->freed = false;
145 
146 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
147 
148 	header->line = line;
149 	header->size = size;
150 	header->caller = caller;
151 	header->header = WLAN_MEM_HEADER;
152 	header->time = qdf_get_log_timestamp();
153 }
154 
155 enum qdf_mem_validation_bitmap {
156 	QDF_MEM_BAD_HEADER = 1 << 0,
157 	QDF_MEM_BAD_TRAILER = 1 << 1,
158 	QDF_MEM_BAD_SIZE = 1 << 2,
159 	QDF_MEM_DOUBLE_FREE = 1 << 3,
160 	QDF_MEM_BAD_FREED = 1 << 4,
161 	QDF_MEM_BAD_NODE = 1 << 5,
162 	QDF_MEM_BAD_DOMAIN = 1 << 6,
163 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
164 };
165 
166 static enum qdf_mem_validation_bitmap
167 qdf_mem_trailer_validate(struct qdf_mem_header *header)
168 {
169 	enum qdf_mem_validation_bitmap error_bitmap = 0;
170 
171 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
172 		error_bitmap |= QDF_MEM_BAD_TRAILER;
173 	return error_bitmap;
174 }
175 
176 static enum qdf_mem_validation_bitmap
177 qdf_mem_header_validate(struct qdf_mem_header *header,
178 			enum qdf_debug_domain domain)
179 {
180 	enum qdf_mem_validation_bitmap error_bitmap = 0;
181 
182 	if (header->header != WLAN_MEM_HEADER)
183 		error_bitmap |= QDF_MEM_BAD_HEADER;
184 
185 	if (header->size > QDF_MEM_MAX_MALLOC)
186 		error_bitmap |= QDF_MEM_BAD_SIZE;
187 
188 	if (header->freed == true)
189 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
190 	else if (header->freed)
191 		error_bitmap |= QDF_MEM_BAD_FREED;
192 
193 	if (!qdf_list_node_in_any_list(&header->node))
194 		error_bitmap |= QDF_MEM_BAD_NODE;
195 
196 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
197 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
198 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
199 	else if (header->domain != domain)
200 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
201 
202 	return error_bitmap;
203 }
204 
205 static void
206 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
207 			    enum qdf_debug_domain current_domain,
208 			    enum qdf_mem_validation_bitmap error_bitmap,
209 			    const char *func,
210 			    uint32_t line)
211 {
212 	if (!error_bitmap)
213 		return;
214 
215 	if (error_bitmap & QDF_MEM_BAD_HEADER)
216 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
217 			header->header, WLAN_MEM_HEADER);
218 
219 	if (error_bitmap & QDF_MEM_BAD_SIZE)
220 		qdf_err("Corrupted memory size %u (expected < %d)",
221 			header->size, QDF_MEM_MAX_MALLOC);
222 
223 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
224 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
225 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
226 
227 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
228 		qdf_err("Memory has previously been freed");
229 
230 	if (error_bitmap & QDF_MEM_BAD_FREED)
231 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
232 
233 	if (error_bitmap & QDF_MEM_BAD_NODE)
234 		qdf_err("Corrupted memory header node or double free");
235 
236 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
237 		qdf_err("Corrupted memory domain 0x%x", header->domain);
238 
239 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
240 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
241 			qdf_debug_domain_name(header->domain), header->domain,
242 			qdf_debug_domain_name(current_domain), current_domain);
243 
244 	QDF_DEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
245 }
246 #endif /* MEMORY_DEBUG */
247 
248 u_int8_t prealloc_disabled = 1;
249 qdf_declare_param(prealloc_disabled, byte);
250 qdf_export_symbol(prealloc_disabled);
251 
252 #if defined WLAN_DEBUGFS
253 
254 /* Debugfs root directory for qdf_mem */
255 static struct dentry *qdf_mem_debugfs_root;
256 
257 /**
258  * struct __qdf_mem_stat - qdf memory statistics
259  * @kmalloc:	total kmalloc allocations
260  * @dma:	total dma allocations
261  * @skb:	total skb allocations
262  */
263 static struct __qdf_mem_stat {
264 	qdf_atomic_t kmalloc;
265 	qdf_atomic_t dma;
266 	qdf_atomic_t skb;
267 } qdf_mem_stat;
268 
269 void qdf_mem_kmalloc_inc(qdf_size_t size)
270 {
271 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
272 }
273 
274 static void qdf_mem_dma_inc(qdf_size_t size)
275 {
276 	qdf_atomic_add(size, &qdf_mem_stat.dma);
277 }
278 
279 void qdf_mem_skb_inc(qdf_size_t size)
280 {
281 	qdf_atomic_add(size, &qdf_mem_stat.skb);
282 }
283 
284 void qdf_mem_kmalloc_dec(qdf_size_t size)
285 {
286 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
287 }
288 
289 static inline void qdf_mem_dma_dec(qdf_size_t size)
290 {
291 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
292 }
293 
294 void qdf_mem_skb_dec(qdf_size_t size)
295 {
296 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
297 }
298 
299 #ifdef MEMORY_DEBUG
300 static int qdf_err_printer(void *priv, const char *fmt, ...)
301 {
302 	va_list args;
303 
304 	va_start(args, fmt);
305 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
306 	va_end(args);
307 
308 	return 0;
309 }
310 
311 static int seq_printf_printer(void *priv, const char *fmt, ...)
312 {
313 	struct seq_file *file = priv;
314 	va_list args;
315 
316 	va_start(args, fmt);
317 	seq_vprintf(file, fmt, args);
318 	seq_puts(file, "\n");
319 	va_end(args);
320 
321 	return 0;
322 }
323 
324 /**
325  * struct __qdf_mem_info - memory statistics
326  * @func: the function which allocated memory
327  * @line: the line at which allocation happened
328  * @size: the size of allocation
329  * @caller: Address of the caller function
330  * @count: how many allocations of same type
331  * @time: timestamp at which allocation happened
332  */
333 struct __qdf_mem_info {
334 	char func[QDF_MEM_FUNC_NAME_SIZE];
335 	uint32_t line;
336 	uint32_t size;
337 	void *caller;
338 	uint32_t count;
339 	uint64_t time;
340 };
341 
342 /*
343  * The table depth defines the de-duplication proximity scope.
344  * A deeper table takes more time, so choose any optimum value.
345  */
346 #define QDF_MEM_STAT_TABLE_SIZE 8
347 
348 /**
349  * qdf_mem_domain_print_header() - memory domain header print logic
350  * @print: the print adapter function
351  * @print_priv: the private data to be consumed by @print
352  *
353  * Return: None
354  */
355 static void qdf_mem_domain_print_header(qdf_abstract_print print,
356 					void *print_priv)
357 {
358 	print(print_priv,
359 	      "--------------------------------------------------------------");
360 	print(print_priv,
361 	      " count    size     total    filename     caller    timestamp");
362 	print(print_priv,
363 	      "--------------------------------------------------------------");
364 }
365 
366 /**
367  * qdf_mem_meta_table_print() - memory metadata table print logic
368  * @table: the memory metadata table to print
369  * @print: the print adapter function
370  * @print_priv: the private data to be consumed by @print
371  *
372  * Return: None
373  */
374 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
375 				     qdf_abstract_print print,
376 				     void *print_priv)
377 {
378 	int i;
379 	char debug_str[QDF_DEBUG_STRING_SIZE];
380 	size_t len = 0;
381 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
382 
383 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
384 			     "%s", debug_prefix);
385 
386 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
387 		if (!table[i].count)
388 			break;
389 
390 		print(print_priv,
391 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
392 		      table[i].count,
393 		      table[i].size,
394 		      table[i].count * table[i].size,
395 		      table[i].func,
396 		      table[i].line, table[i].caller,
397 		      table[i].time);
398 		len += qdf_scnprintf(debug_str + len,
399 				     sizeof(debug_str) - len,
400 				     " @ %s:%u %pS",
401 				     table[i].func,
402 				     table[i].line,
403 				     table[i].caller);
404 	}
405 	print(print_priv, "%s", debug_str);
406 }
407 
408 /**
409  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
410  * @table: the memory metadata table to insert into
411  * @meta: the memory metadata to insert
412  *
413  * Return: true if the table is full after inserting, false otherwise
414  */
415 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
416 				      struct qdf_mem_header *meta)
417 {
418 	int i;
419 
420 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
421 		if (!table[i].count) {
422 			qdf_str_lcopy(table[i].func, meta->func,
423 				      QDF_MEM_FUNC_NAME_SIZE);
424 			table[i].line = meta->line;
425 			table[i].size = meta->size;
426 			table[i].count = 1;
427 			table[i].caller = meta->caller;
428 			table[i].time = meta->time;
429 			break;
430 		}
431 
432 		if (qdf_str_eq(table[i].func, meta->func) &&
433 		    table[i].line == meta->line &&
434 		    table[i].size == meta->size &&
435 		    table[i].caller == meta->caller) {
436 			table[i].count++;
437 			break;
438 		}
439 	}
440 
441 	/* return true if the table is now full */
442 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
443 }
444 
445 /**
446  * qdf_mem_domain_print() - output agnostic memory domain print logic
447  * @domain: the memory domain to print
448  * @print: the print adapter function
449  * @print_priv: the private data to be consumed by @print
450  *
451  * Return: None
452  */
453 static void qdf_mem_domain_print(qdf_list_t *domain,
454 				 qdf_abstract_print print,
455 				 void *print_priv)
456 {
457 	QDF_STATUS status;
458 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
459 	qdf_list_node_t *node;
460 
461 	qdf_mem_zero(table, sizeof(table));
462 	qdf_mem_domain_print_header(print, print_priv);
463 
464 	/* hold lock while inserting to avoid use-after free of the metadata */
465 	qdf_spin_lock(&qdf_mem_list_lock);
466 	status = qdf_list_peek_front(domain, &node);
467 	while (QDF_IS_STATUS_SUCCESS(status)) {
468 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
469 		bool is_full = qdf_mem_meta_table_insert(table, meta);
470 
471 		qdf_spin_unlock(&qdf_mem_list_lock);
472 
473 		if (is_full) {
474 			qdf_mem_meta_table_print(table, print, print_priv);
475 			qdf_mem_zero(table, sizeof(table));
476 		}
477 
478 		qdf_spin_lock(&qdf_mem_list_lock);
479 		status = qdf_list_peek_next(domain, node, &node);
480 	}
481 	qdf_spin_unlock(&qdf_mem_list_lock);
482 
483 	qdf_mem_meta_table_print(table, print, print_priv);
484 }
485 
486 /**
487  * qdf_mem_seq_start() - sequential callback to start
488  * @seq: seq_file handle
489  * @pos: The start position of the sequence
490  *
491  * Return: iterator pointer, or NULL if iteration is complete
492  */
493 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
494 {
495 	enum qdf_debug_domain domain = *pos;
496 
497 	if (!qdf_debug_domain_valid(domain))
498 		return NULL;
499 
500 	/* just use the current position as our iterator */
501 	return pos;
502 }
503 
504 /**
505  * qdf_mem_seq_next() - next sequential callback
506  * @seq: seq_file handle
507  * @v: the current iterator
508  * @pos: the current position
509  *
510  * Get the next node and release previous node.
511  *
512  * Return: iterator pointer, or NULL if iteration is complete
513  */
514 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
515 {
516 	++*pos;
517 
518 	return qdf_mem_seq_start(seq, pos);
519 }
520 
521 /**
522  * qdf_mem_seq_stop() - stop sequential callback
523  * @seq: seq_file handle
524  * @v: current iterator
525  *
526  * Return: None
527  */
528 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
529 
530 /**
531  * qdf_mem_seq_show() - print sequential callback
532  * @seq: seq_file handle
533  * @v: current iterator
534  *
535  * Return: 0 - success
536  */
537 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
538 {
539 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
540 
541 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
542 		   qdf_debug_domain_name(domain_id), domain_id);
543 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
544 			     seq_printf_printer, seq);
545 
546 	return 0;
547 }
548 
549 /* sequential file operation table */
550 static const struct seq_operations qdf_mem_seq_ops = {
551 	.start = qdf_mem_seq_start,
552 	.next  = qdf_mem_seq_next,
553 	.stop  = qdf_mem_seq_stop,
554 	.show  = qdf_mem_seq_show,
555 };
556 
557 
558 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
559 {
560 	return seq_open(file, &qdf_mem_seq_ops);
561 }
562 
563 /* debugfs file operation table */
564 static const struct file_operations fops_qdf_mem_debugfs = {
565 	.owner = THIS_MODULE,
566 	.open = qdf_mem_debugfs_open,
567 	.read = seq_read,
568 	.llseek = seq_lseek,
569 	.release = seq_release,
570 };
571 
572 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
573 {
574 	if (!qdf_mem_debugfs_root)
575 		return QDF_STATUS_E_FAILURE;
576 
577 	debugfs_create_file("list",
578 			    S_IRUSR,
579 			    qdf_mem_debugfs_root,
580 			    NULL,
581 			    &fops_qdf_mem_debugfs);
582 
583 	return QDF_STATUS_SUCCESS;
584 }
585 
586 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
587 {
588 	return QDF_STATUS_SUCCESS;
589 }
590 
591 #else /* MEMORY_DEBUG */
592 
593 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
594 {
595 	return QDF_STATUS_E_NOSUPPORT;
596 }
597 
598 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
599 {
600 	return QDF_STATUS_E_NOSUPPORT;
601 }
602 
603 #endif /* MEMORY_DEBUG */
604 
605 
606 static void qdf_mem_debugfs_exit(void)
607 {
608 	debugfs_remove_recursive(qdf_mem_debugfs_root);
609 	qdf_mem_debugfs_root = NULL;
610 }
611 
612 static QDF_STATUS qdf_mem_debugfs_init(void)
613 {
614 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
615 
616 	if (!qdf_debugfs_root)
617 		return QDF_STATUS_E_FAILURE;
618 
619 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
620 
621 	if (!qdf_mem_debugfs_root)
622 		return QDF_STATUS_E_FAILURE;
623 
624 
625 	debugfs_create_atomic_t("kmalloc",
626 				S_IRUSR,
627 				qdf_mem_debugfs_root,
628 				&qdf_mem_stat.kmalloc);
629 
630 	debugfs_create_atomic_t("dma",
631 				S_IRUSR,
632 				qdf_mem_debugfs_root,
633 				&qdf_mem_stat.dma);
634 
635 	debugfs_create_atomic_t("skb",
636 				S_IRUSR,
637 				qdf_mem_debugfs_root,
638 				&qdf_mem_stat.skb);
639 
640 	return QDF_STATUS_SUCCESS;
641 }
642 
643 #else /* WLAN_DEBUGFS */
644 
645 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
646 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
647 
648 static QDF_STATUS qdf_mem_debugfs_init(void)
649 {
650 	return QDF_STATUS_E_NOSUPPORT;
651 }
652 static void qdf_mem_debugfs_exit(void) {}
653 
654 
655 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
656 {
657 	return QDF_STATUS_E_NOSUPPORT;
658 }
659 
660 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
661 {
662 	return QDF_STATUS_E_NOSUPPORT;
663 }
664 
665 #endif /* WLAN_DEBUGFS */
666 
667 /**
668  * __qdf_mempool_init() - Create and initialize memory pool
669  *
670  * @osdev: platform device object
671  * @pool_addr: address of the pool created
672  * @elem_cnt: no. of elements in pool
673  * @elem_size: size of each pool element in bytes
674  * @flags: flags
675  *
676  * return: Handle to memory pool or NULL if allocation failed
677  */
678 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
679 		       int elem_cnt, size_t elem_size, u_int32_t flags)
680 {
681 	__qdf_mempool_ctxt_t *new_pool = NULL;
682 	u_int32_t align = L1_CACHE_BYTES;
683 	unsigned long aligned_pool_mem;
684 	int pool_id;
685 	int i;
686 
687 	if (prealloc_disabled) {
688 		/* TBD: We can maintain a list of pools in qdf_device_t
689 		 * to help debugging
690 		 * when pre-allocation is not enabled
691 		 */
692 		new_pool = (__qdf_mempool_ctxt_t *)
693 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
694 		if (!new_pool)
695 			return QDF_STATUS_E_NOMEM;
696 
697 		memset(new_pool, 0, sizeof(*new_pool));
698 		/* TBD: define flags for zeroing buffers etc */
699 		new_pool->flags = flags;
700 		new_pool->elem_size = elem_size;
701 		new_pool->max_elem = elem_cnt;
702 		*pool_addr = new_pool;
703 		return 0;
704 	}
705 
706 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
707 		if (!osdev->mem_pool[pool_id])
708 			break;
709 	}
710 
711 	if (pool_id == MAX_MEM_POOLS)
712 		return -ENOMEM;
713 
714 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
715 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
716 	if (!new_pool)
717 		return -ENOMEM;
718 
719 	memset(new_pool, 0, sizeof(*new_pool));
720 	/* TBD: define flags for zeroing buffers etc */
721 	new_pool->flags = flags;
722 	new_pool->pool_id = pool_id;
723 
724 	/* Round up the element size to cacheline */
725 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
726 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
727 				((align)?(align - 1):0);
728 
729 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
730 	if (!new_pool->pool_mem) {
731 			/* TBD: Check if we need get_free_pages above */
732 		kfree(new_pool);
733 		osdev->mem_pool[pool_id] = NULL;
734 		return -ENOMEM;
735 	}
736 
737 	spin_lock_init(&new_pool->lock);
738 
739 	/* Initialize free list */
740 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
741 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
742 	STAILQ_INIT(&new_pool->free_list);
743 
744 	for (i = 0; i < elem_cnt; i++)
745 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
746 			(mempool_elem_t *)(aligned_pool_mem +
747 			(new_pool->elem_size * i)), mempool_entry);
748 
749 
750 	new_pool->free_cnt = elem_cnt;
751 	*pool_addr = new_pool;
752 	return 0;
753 }
754 qdf_export_symbol(__qdf_mempool_init);
755 
756 /**
757  * __qdf_mempool_destroy() - Destroy memory pool
758  * @osdev: platform device object
759  * @Handle: to memory pool
760  *
761  * Returns: none
762  */
763 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
764 {
765 	int pool_id = 0;
766 
767 	if (!pool)
768 		return;
769 
770 	if (prealloc_disabled) {
771 		kfree(pool);
772 		return;
773 	}
774 
775 	pool_id = pool->pool_id;
776 
777 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
778 	kfree(pool->pool_mem);
779 	kfree(pool);
780 	osdev->mem_pool[pool_id] = NULL;
781 }
782 qdf_export_symbol(__qdf_mempool_destroy);
783 
784 /**
785  * __qdf_mempool_alloc() - Allocate an element memory pool
786  *
787  * @osdev: platform device object
788  * @Handle: to memory pool
789  *
790  * Return: Pointer to the allocated element or NULL if the pool is empty
791  */
792 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
793 {
794 	void *buf = NULL;
795 
796 	if (!pool)
797 		return NULL;
798 
799 	if (prealloc_disabled)
800 		return  qdf_mem_malloc(pool->elem_size);
801 
802 	spin_lock_bh(&pool->lock);
803 
804 	buf = STAILQ_FIRST(&pool->free_list);
805 	if (buf) {
806 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
807 		pool->free_cnt--;
808 	}
809 
810 	/* TBD: Update free count if debug is enabled */
811 	spin_unlock_bh(&pool->lock);
812 
813 	return buf;
814 }
815 qdf_export_symbol(__qdf_mempool_alloc);
816 
817 /**
818  * __qdf_mempool_free() - Free a memory pool element
819  * @osdev: Platform device object
820  * @pool: Handle to memory pool
821  * @buf: Element to be freed
822  *
823  * Returns: none
824  */
825 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
826 {
827 	if (!pool)
828 		return;
829 
830 
831 	if (prealloc_disabled)
832 		return qdf_mem_free(buf);
833 
834 	spin_lock_bh(&pool->lock);
835 	pool->free_cnt++;
836 
837 	STAILQ_INSERT_TAIL
838 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
839 	spin_unlock_bh(&pool->lock);
840 }
841 qdf_export_symbol(__qdf_mempool_free);
842 
843 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
844 /**
845  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
846  * @size: the number of bytes to allocate
847  *
848  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
849  * a chunk of pre-allocated memory. If size if less than or equal to
850  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
851  *
852  * Return: NULL on failure, non-NULL on success
853  */
854 static void *qdf_mem_prealloc_get(size_t size)
855 {
856 	void *ptr;
857 
858 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
859 		return NULL;
860 
861 	ptr = wcnss_prealloc_get(size);
862 	if (!ptr)
863 		return NULL;
864 
865 	memset(ptr, 0, size);
866 
867 	return ptr;
868 }
869 
870 static inline bool qdf_mem_prealloc_put(void *ptr)
871 {
872 	return wcnss_prealloc_put(ptr);
873 }
874 #else
875 static inline void *qdf_mem_prealloc_get(size_t size)
876 {
877 	return NULL;
878 }
879 
880 static inline bool qdf_mem_prealloc_put(void *ptr)
881 {
882 	return false;
883 }
884 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
885 
886 static int qdf_mem_malloc_flags(void)
887 {
888 	if (in_interrupt() || irqs_disabled() || in_atomic())
889 		return GFP_ATOMIC;
890 
891 	return GFP_KERNEL;
892 }
893 
894 /* External Function implementation */
895 #ifdef MEMORY_DEBUG
896 
897 /**
898  * qdf_mem_debug_init() - initialize qdf memory debug functionality
899  *
900  * Return: none
901  */
902 static void qdf_mem_debug_init(void)
903 {
904 	int i;
905 
906 	/* Initalizing the list with maximum size of 60000 */
907 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
908 		qdf_list_create(&qdf_mem_domains[i], 60000);
909 	qdf_spinlock_create(&qdf_mem_list_lock);
910 
911 	/* dma */
912 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
913 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
914 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
915 }
916 
917 static uint32_t
918 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
919 			       qdf_list_t *mem_list)
920 {
921 	if (qdf_list_empty(mem_list))
922 		return 0;
923 
924 	qdf_err("Memory leaks detected in %s domain!",
925 		qdf_debug_domain_name(domain));
926 	qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
927 
928 	return mem_list->count;
929 }
930 
931 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
932 {
933 	uint32_t leak_count = 0;
934 	int i;
935 
936 	/* detect and print leaks */
937 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
938 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
939 
940 	if (leak_count)
941 		QDF_DEBUG_PANIC("%u fatal memory leaks detected!",
942 				leak_count);
943 }
944 
945 /**
946  * qdf_mem_debug_exit() - exit qdf memory debug functionality
947  *
948  * Return: none
949  */
950 static void qdf_mem_debug_exit(void)
951 {
952 	int i;
953 
954 	/* mem */
955 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
956 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
957 		qdf_list_destroy(qdf_mem_list_get(i));
958 
959 	qdf_spinlock_destroy(&qdf_mem_list_lock);
960 
961 	/* dma */
962 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
963 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
964 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
965 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
966 }
967 
968 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
969 			   void *caller, uint32_t flag)
970 {
971 	QDF_STATUS status;
972 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
973 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
974 	struct qdf_mem_header *header;
975 	void *ptr;
976 	unsigned long start, duration;
977 
978 	if (!size || size > QDF_MEM_MAX_MALLOC) {
979 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
980 		return NULL;
981 	}
982 
983 	ptr = qdf_mem_prealloc_get(size);
984 	if (ptr)
985 		return ptr;
986 
987 	if (!flag)
988 		flag = qdf_mem_malloc_flags();
989 
990 	start = qdf_mc_timer_get_system_time();
991 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
992 	duration = qdf_mc_timer_get_system_time() - start;
993 
994 	if (duration > QDF_MEM_WARN_THRESHOLD)
995 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
996 			 duration, size, func, line);
997 
998 	if (!header) {
999 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1000 		return NULL;
1001 	}
1002 
1003 	qdf_mem_header_init(header, size, func, line, caller);
1004 	qdf_mem_trailer_init(header);
1005 	ptr = qdf_mem_get_ptr(header);
1006 
1007 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1008 	status = qdf_list_insert_front(mem_list, &header->node);
1009 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1010 	if (QDF_IS_STATUS_ERROR(status))
1011 		qdf_err("Failed to insert memory header; status %d", status);
1012 
1013 	qdf_mem_kmalloc_inc(ksize(header));
1014 
1015 	return ptr;
1016 }
1017 qdf_export_symbol(qdf_mem_malloc_debug);
1018 
1019 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1020 {
1021 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1022 	struct qdf_mem_header *header;
1023 	enum qdf_mem_validation_bitmap error_bitmap;
1024 
1025 	/* freeing a null pointer is valid */
1026 	if (qdf_unlikely(!ptr))
1027 		return;
1028 
1029 	if (qdf_mem_prealloc_put(ptr))
1030 		return;
1031 
1032 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1033 		QDF_DEBUG_PANIC("Failed to free invalid memory location %pK",
1034 				ptr);
1035 
1036 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1037 
1038 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1039 	header = qdf_mem_get_header(ptr);
1040 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1041 	error_bitmap |= qdf_mem_trailer_validate(header);
1042 
1043 	if (!error_bitmap) {
1044 		header->freed = true;
1045 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1046 				     &header->node);
1047 	}
1048 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1049 
1050 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1051 				    func, line);
1052 
1053 	qdf_mem_kmalloc_dec(ksize(header));
1054 	kfree(header);
1055 }
1056 qdf_export_symbol(qdf_mem_free_debug);
1057 
1058 void qdf_mem_check_for_leaks(void)
1059 {
1060 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1061 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1062 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1063 	uint32_t leaks_count = 0;
1064 
1065 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1066 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1067 
1068 	if (leaks_count)
1069 		QDF_DEBUG_PANIC("%u fatal memory leaks detected!",
1070 				leaks_count);
1071 }
1072 
1073 /**
1074  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1075  * qdf_mem_multi_pages_alloc
1076  * @osdev: OS device handle pointer
1077  * @pages: Multi page information storage
1078  * @element_size: Each element size
1079  * @element_num: Total number of elements should be allocated
1080  * @memctxt: Memory context
1081  * @cacheable: Coherent memory or cacheable memory
1082  * @func: Caller of this allocator
1083  * @line: Line number of the caller
1084  * @caller: Return address of the caller
1085  *
1086  * This function will allocate large size of memory over multiple pages.
1087  * Large size of contiguous memory allocation will fail frequently, then
1088  * instead of allocate large memory by one shot, allocate through multiple, non
1089  * contiguous memory and combine pages when actual usage
1090  *
1091  * Return: None
1092  */
1093 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1094 				     struct qdf_mem_multi_page_t *pages,
1095 				     size_t element_size, uint16_t element_num,
1096 				     qdf_dma_context_t memctxt, bool cacheable,
1097 				     const char *func, uint32_t line,
1098 				     void *caller)
1099 {
1100 	uint16_t page_idx;
1101 	struct qdf_mem_dma_page_t *dma_pages;
1102 	void **cacheable_pages = NULL;
1103 	uint16_t i;
1104 
1105 	pages->num_element_per_page = PAGE_SIZE / element_size;
1106 	if (!pages->num_element_per_page) {
1107 		qdf_print("Invalid page %d or element size %d",
1108 			  (int)PAGE_SIZE, (int)element_size);
1109 		goto out_fail;
1110 	}
1111 
1112 	pages->num_pages = element_num / pages->num_element_per_page;
1113 	if (element_num % pages->num_element_per_page)
1114 		pages->num_pages++;
1115 
1116 	if (cacheable) {
1117 		/* Pages information storage */
1118 		pages->cacheable_pages = qdf_mem_malloc_debug(
1119 			pages->num_pages * sizeof(pages->cacheable_pages),
1120 			func, line, caller, 0);
1121 		if (!pages->cacheable_pages)
1122 			goto out_fail;
1123 
1124 		cacheable_pages = pages->cacheable_pages;
1125 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1126 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1127 					PAGE_SIZE, func, line, caller, 0);
1128 			if (!cacheable_pages[page_idx])
1129 				goto page_alloc_fail;
1130 		}
1131 		pages->dma_pages = NULL;
1132 	} else {
1133 		pages->dma_pages = qdf_mem_malloc_debug(
1134 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1135 			func, line, caller, 0);
1136 		if (!pages->dma_pages)
1137 			goto out_fail;
1138 
1139 		dma_pages = pages->dma_pages;
1140 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1141 			dma_pages->page_v_addr_start =
1142 				qdf_mem_alloc_consistent_debug(
1143 					osdev, osdev->dev, PAGE_SIZE,
1144 					&dma_pages->page_p_addr,
1145 					func, line, caller);
1146 			if (!dma_pages->page_v_addr_start) {
1147 				qdf_print("dmaable page alloc fail pi %d",
1148 					  page_idx);
1149 				goto page_alloc_fail;
1150 			}
1151 			dma_pages->page_v_addr_end =
1152 				dma_pages->page_v_addr_start + PAGE_SIZE;
1153 			dma_pages++;
1154 		}
1155 		pages->cacheable_pages = NULL;
1156 	}
1157 	return;
1158 
1159 page_alloc_fail:
1160 	if (cacheable) {
1161 		for (i = 0; i < page_idx; i++)
1162 			qdf_mem_free_debug(pages->cacheable_pages[i],
1163 					   func, line);
1164 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1165 	} else {
1166 		dma_pages = pages->dma_pages;
1167 		for (i = 0; i < page_idx; i++) {
1168 			qdf_mem_free_consistent_debug(
1169 				osdev, osdev->dev,
1170 				PAGE_SIZE, dma_pages->page_v_addr_start,
1171 				dma_pages->page_p_addr, memctxt, func, line);
1172 			dma_pages++;
1173 		}
1174 		qdf_mem_free_debug(pages->dma_pages, func, line);
1175 	}
1176 
1177 out_fail:
1178 	pages->cacheable_pages = NULL;
1179 	pages->dma_pages = NULL;
1180 	pages->num_pages = 0;
1181 }
1182 
1183 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1184 
1185 /**
1186  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1187  * @osdev: OS device handle pointer
1188  * @pages: Multi page information storage
1189  * @memctxt: Memory context
1190  * @cacheable: Coherent memory or cacheable memory
1191  * @func: Caller of this allocator
1192  * @line: Line number of the caller
1193  *
1194  * This function will free large size of memory over multiple pages.
1195  *
1196  * Return: None
1197  */
1198 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1199 				    struct qdf_mem_multi_page_t *pages,
1200 				    qdf_dma_context_t memctxt, bool cacheable,
1201 				    const char *func, uint32_t line)
1202 {
1203 	unsigned int page_idx;
1204 	struct qdf_mem_dma_page_t *dma_pages;
1205 
1206 	if (cacheable) {
1207 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1208 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1209 					   func, line);
1210 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1211 	} else {
1212 		dma_pages = pages->dma_pages;
1213 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1214 			qdf_mem_free_consistent_debug(
1215 				osdev, osdev->dev, PAGE_SIZE,
1216 				dma_pages->page_v_addr_start,
1217 				dma_pages->page_p_addr, memctxt, func, line);
1218 			dma_pages++;
1219 		}
1220 		qdf_mem_free_debug(pages->dma_pages, func, line);
1221 	}
1222 
1223 	pages->cacheable_pages = NULL;
1224 	pages->dma_pages = NULL;
1225 	pages->num_pages = 0;
1226 }
1227 
1228 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1229 
1230 #else
1231 static void qdf_mem_debug_init(void) {}
1232 
1233 static void qdf_mem_debug_exit(void) {}
1234 
1235 void *qdf_mem_malloc_fl(size_t size, const char *func, uint32_t line)
1236 {
1237 	void *ptr;
1238 
1239 	ptr = qdf_mem_prealloc_get(size);
1240 	if (ptr)
1241 		return ptr;
1242 
1243 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1244 	if (!ptr) {
1245 		qdf_nofl_err("Failed to malloc %zuB @ %s:%d",
1246 			     size, func, line);
1247 		return NULL;
1248 	}
1249 
1250 	qdf_mem_kmalloc_inc(ksize(ptr));
1251 
1252 	return ptr;
1253 }
1254 qdf_export_symbol(qdf_mem_malloc_fl);
1255 
1256 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1257 {
1258 	void *ptr;
1259 
1260 	ptr = qdf_mem_prealloc_get(size);
1261 	if (ptr)
1262 		return ptr;
1263 
1264 	ptr = kzalloc(size, GFP_ATOMIC);
1265 	if (!ptr) {
1266 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1267 			      size, func, line);
1268 		return NULL;
1269 	}
1270 
1271 	qdf_mem_kmalloc_inc(ksize(ptr));
1272 
1273 	return ptr;
1274 }
1275 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1276 
1277 /**
1278  * qdf_mem_free() - free QDF memory
1279  * @ptr: Pointer to the starting address of the memory to be free'd.
1280  *
1281  * This function will free the memory pointed to by 'ptr'.
1282  *
1283  * Return: None
1284  */
1285 void qdf_mem_free(void *ptr)
1286 {
1287 	if (!ptr)
1288 		return;
1289 
1290 	if (qdf_mem_prealloc_put(ptr))
1291 		return;
1292 
1293 	qdf_mem_kmalloc_dec(ksize(ptr));
1294 
1295 	kfree(ptr);
1296 }
1297 
1298 qdf_export_symbol(qdf_mem_free);
1299 
1300 /**
1301  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1302  * @osdev: OS device handle pointer
1303  * @pages: Multi page information storage
1304  * @element_size: Each element size
1305  * @element_num: Total number of elements should be allocated
1306  * @memctxt: Memory context
1307  * @cacheable: Coherent memory or cacheable memory
1308  *
1309  * This function will allocate large size of memory over multiple pages.
1310  * Large size of contiguous memory allocation will fail frequently, then
1311  * instead of allocate large memory by one shot, allocate through multiple, non
1312  * contiguous memory and combine pages when actual usage
1313  *
1314  * Return: None
1315  */
1316 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1317 			       struct qdf_mem_multi_page_t *pages,
1318 			       size_t element_size, uint16_t element_num,
1319 			       qdf_dma_context_t memctxt, bool cacheable)
1320 {
1321 	uint16_t page_idx;
1322 	struct qdf_mem_dma_page_t *dma_pages;
1323 	void **cacheable_pages = NULL;
1324 	uint16_t i;
1325 
1326 	pages->num_element_per_page = PAGE_SIZE / element_size;
1327 	if (!pages->num_element_per_page) {
1328 		qdf_print("Invalid page %d or element size %d",
1329 			  (int)PAGE_SIZE, (int)element_size);
1330 		goto out_fail;
1331 	}
1332 
1333 	pages->num_pages = element_num / pages->num_element_per_page;
1334 	if (element_num % pages->num_element_per_page)
1335 		pages->num_pages++;
1336 
1337 	if (cacheable) {
1338 		/* Pages information storage */
1339 		pages->cacheable_pages = qdf_mem_malloc(
1340 			pages->num_pages * sizeof(pages->cacheable_pages));
1341 		if (!pages->cacheable_pages)
1342 			goto out_fail;
1343 
1344 		cacheable_pages = pages->cacheable_pages;
1345 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1346 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1347 			if (!cacheable_pages[page_idx])
1348 				goto page_alloc_fail;
1349 		}
1350 		pages->dma_pages = NULL;
1351 	} else {
1352 		pages->dma_pages = qdf_mem_malloc(
1353 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1354 		if (!pages->dma_pages)
1355 			goto out_fail;
1356 
1357 		dma_pages = pages->dma_pages;
1358 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1359 			dma_pages->page_v_addr_start =
1360 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1361 					 PAGE_SIZE,
1362 					&dma_pages->page_p_addr);
1363 			if (!dma_pages->page_v_addr_start) {
1364 				qdf_print("dmaable page alloc fail pi %d",
1365 					page_idx);
1366 				goto page_alloc_fail;
1367 			}
1368 			dma_pages->page_v_addr_end =
1369 				dma_pages->page_v_addr_start + PAGE_SIZE;
1370 			dma_pages++;
1371 		}
1372 		pages->cacheable_pages = NULL;
1373 	}
1374 	return;
1375 
1376 page_alloc_fail:
1377 	if (cacheable) {
1378 		for (i = 0; i < page_idx; i++)
1379 			qdf_mem_free(pages->cacheable_pages[i]);
1380 		qdf_mem_free(pages->cacheable_pages);
1381 	} else {
1382 		dma_pages = pages->dma_pages;
1383 		for (i = 0; i < page_idx; i++) {
1384 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1385 				dma_pages->page_v_addr_start,
1386 				dma_pages->page_p_addr, memctxt);
1387 			dma_pages++;
1388 		}
1389 		qdf_mem_free(pages->dma_pages);
1390 	}
1391 
1392 out_fail:
1393 	pages->cacheable_pages = NULL;
1394 	pages->dma_pages = NULL;
1395 	pages->num_pages = 0;
1396 	return;
1397 }
1398 qdf_export_symbol(qdf_mem_multi_pages_alloc);
1399 
1400 /**
1401  * qdf_mem_multi_pages_free() - free large size of kernel memory
1402  * @osdev: OS device handle pointer
1403  * @pages: Multi page information storage
1404  * @memctxt: Memory context
1405  * @cacheable: Coherent memory or cacheable memory
1406  *
1407  * This function will free large size of memory over multiple pages.
1408  *
1409  * Return: None
1410  */
1411 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1412 			      struct qdf_mem_multi_page_t *pages,
1413 			      qdf_dma_context_t memctxt, bool cacheable)
1414 {
1415 	unsigned int page_idx;
1416 	struct qdf_mem_dma_page_t *dma_pages;
1417 
1418 	if (cacheable) {
1419 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1420 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1421 		qdf_mem_free(pages->cacheable_pages);
1422 	} else {
1423 		dma_pages = pages->dma_pages;
1424 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1425 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1426 				dma_pages->page_v_addr_start,
1427 				dma_pages->page_p_addr, memctxt);
1428 			dma_pages++;
1429 		}
1430 		qdf_mem_free(pages->dma_pages);
1431 	}
1432 
1433 	pages->cacheable_pages = NULL;
1434 	pages->dma_pages = NULL;
1435 	pages->num_pages = 0;
1436 	return;
1437 }
1438 qdf_export_symbol(qdf_mem_multi_pages_free);
1439 #endif
1440 
1441 void *qdf_aligned_malloc_fl(uint32_t *size,
1442 			    void **vaddr_unaligned,
1443 				qdf_dma_addr_t *paddr_unaligned,
1444 				qdf_dma_addr_t *paddr_aligned,
1445 				uint32_t align,
1446 			    const char *func, uint32_t line)
1447 {
1448 	void *vaddr_aligned;
1449 	uint32_t align_alloc_size;
1450 
1451 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
1452 			line);
1453 	if (!*vaddr_unaligned) {
1454 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
1455 		return NULL;
1456 	}
1457 
1458 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
1459 
1460 	/* Re-allocate additional bytes to align base address only if
1461 	 * above allocation returns unaligned address. Reason for
1462 	 * trying exact size allocation above is, OS tries to allocate
1463 	 * blocks of size power-of-2 pages and then free extra pages.
1464 	 * e.g., of a ring size of 1MB, the allocation below will
1465 	 * request 1MB plus 7 bytes for alignment, which will cause a
1466 	 * 2MB block allocation,and that is failing sometimes due to
1467 	 * memory fragmentation.
1468 	 */
1469 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1470 		align_alloc_size = *size + align - 1;
1471 
1472 		qdf_mem_free(*vaddr_unaligned);
1473 		*vaddr_unaligned = qdf_mem_malloc_fl(
1474 				(qdf_size_t)align_alloc_size, func, line);
1475 		if (!*vaddr_unaligned) {
1476 			qdf_warn("Failed to alloc %uB @ %s:%d",
1477 				 align_alloc_size, func, line);
1478 			return NULL;
1479 		}
1480 
1481 		*paddr_unaligned = qdf_mem_virt_to_phys(
1482 				*vaddr_unaligned);
1483 		*size = align_alloc_size;
1484 	}
1485 
1486 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
1487 		((unsigned long)(*paddr_unaligned), align);
1488 
1489 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1490 			((unsigned long)(*paddr_aligned) -
1491 			 (unsigned long)(*paddr_unaligned)));
1492 
1493 	return vaddr_aligned;
1494 }
1495 
1496 qdf_export_symbol(qdf_aligned_malloc_fl);
1497 
1498 /**
1499  * qdf_mem_multi_page_link() - Make links for multi page elements
1500  * @osdev: OS device handle pointer
1501  * @pages: Multi page information storage
1502  * @elem_size: Single element size
1503  * @elem_count: elements count should be linked
1504  * @cacheable: Coherent memory or cacheable memory
1505  *
1506  * This function will make links for multi page allocated structure
1507  *
1508  * Return: 0 success
1509  */
1510 int qdf_mem_multi_page_link(qdf_device_t osdev,
1511 		struct qdf_mem_multi_page_t *pages,
1512 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1513 {
1514 	uint16_t i, i_int;
1515 	void *page_info;
1516 	void **c_elem = NULL;
1517 	uint32_t num_link = 0;
1518 
1519 	for (i = 0; i < pages->num_pages; i++) {
1520 		if (cacheable)
1521 			page_info = pages->cacheable_pages[i];
1522 		else
1523 			page_info = pages->dma_pages[i].page_v_addr_start;
1524 
1525 		if (!page_info)
1526 			return -ENOMEM;
1527 
1528 		c_elem = (void **)page_info;
1529 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1530 			if (i_int == (pages->num_element_per_page - 1)) {
1531 				if (cacheable)
1532 					*c_elem = pages->
1533 						cacheable_pages[i + 1];
1534 				else
1535 					*c_elem = pages->
1536 						dma_pages[i + 1].
1537 							page_v_addr_start;
1538 				num_link++;
1539 				break;
1540 			} else {
1541 				*c_elem =
1542 					(void *)(((char *)c_elem) + elem_size);
1543 			}
1544 			num_link++;
1545 			c_elem = (void **)*c_elem;
1546 
1547 			/* Last link established exit */
1548 			if (num_link == (elem_count - 1))
1549 				break;
1550 		}
1551 	}
1552 
1553 	if (c_elem)
1554 		*c_elem = NULL;
1555 
1556 	return 0;
1557 }
1558 qdf_export_symbol(qdf_mem_multi_page_link);
1559 
1560 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1561 {
1562 	/* special case where dst_addr or src_addr can be NULL */
1563 	if (!num_bytes)
1564 		return;
1565 
1566 	QDF_BUG(dst_addr);
1567 	QDF_BUG(src_addr);
1568 	if (!dst_addr || !src_addr)
1569 		return;
1570 
1571 	memcpy(dst_addr, src_addr, num_bytes);
1572 }
1573 qdf_export_symbol(qdf_mem_copy);
1574 
1575 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
1576 {
1577 	qdf_shared_mem_t *shared_mem;
1578 	qdf_dma_addr_t dma_addr, paddr;
1579 	int ret;
1580 
1581 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
1582 	if (!shared_mem)
1583 		return NULL;
1584 
1585 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
1586 				size, qdf_mem_get_dma_addr_ptr(osdev,
1587 						&shared_mem->mem_info));
1588 	if (!shared_mem->vaddr) {
1589 		qdf_err("Unable to allocate DMA memory for shared resource");
1590 		qdf_mem_free(shared_mem);
1591 		return NULL;
1592 	}
1593 
1594 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
1595 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
1596 
1597 	qdf_mem_zero(shared_mem->vaddr, size);
1598 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
1599 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1600 
1601 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
1602 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
1603 				      shared_mem->vaddr, dma_addr, size);
1604 	if (ret) {
1605 		qdf_err("Unable to get DMA sgtable");
1606 		qdf_mem_free_consistent(osdev, osdev->dev,
1607 					shared_mem->mem_info.size,
1608 					shared_mem->vaddr,
1609 					dma_addr,
1610 					qdf_get_dma_mem_context(shared_mem,
1611 								memctx));
1612 		qdf_mem_free(shared_mem);
1613 		return NULL;
1614 	}
1615 
1616 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
1617 
1618 	return shared_mem;
1619 }
1620 
1621 qdf_export_symbol(qdf_mem_shared_mem_alloc);
1622 
1623 /**
1624  * qdf_mem_copy_toio() - copy memory
1625  * @dst_addr: Pointer to destination memory location (to copy to)
1626  * @src_addr: Pointer to source memory location (to copy from)
1627  * @num_bytes: Number of bytes to copy.
1628  *
1629  * Return: none
1630  */
1631 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1632 {
1633 	if (0 == num_bytes) {
1634 		/* special case where dst_addr or src_addr can be NULL */
1635 		return;
1636 	}
1637 
1638 	if ((!dst_addr) || (!src_addr)) {
1639 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1640 			  "%s called with NULL parameter, source:%pK destination:%pK",
1641 			  __func__, src_addr, dst_addr);
1642 		QDF_ASSERT(0);
1643 		return;
1644 	}
1645 	memcpy_toio(dst_addr, src_addr, num_bytes);
1646 }
1647 
1648 qdf_export_symbol(qdf_mem_copy_toio);
1649 
1650 /**
1651  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
1652  * @ptr: Pointer to memory that will be set
1653  * @value: Byte set in memory
1654  * @num_bytes: Number of bytes to be set
1655  *
1656  * Return: None
1657  */
1658 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
1659 {
1660 	if (!ptr) {
1661 		qdf_print("%s called with NULL parameter ptr", __func__);
1662 		return;
1663 	}
1664 	memset_io(ptr, value, num_bytes);
1665 }
1666 
1667 qdf_export_symbol(qdf_mem_set_io);
1668 
1669 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1670 {
1671 	QDF_BUG(ptr);
1672 	if (!ptr)
1673 		return;
1674 
1675 	memset(ptr, value, num_bytes);
1676 }
1677 qdf_export_symbol(qdf_mem_set);
1678 
1679 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1680 {
1681 	/* special case where dst_addr or src_addr can be NULL */
1682 	if (!num_bytes)
1683 		return;
1684 
1685 	QDF_BUG(dst_addr);
1686 	QDF_BUG(src_addr);
1687 	if (!dst_addr || !src_addr)
1688 		return;
1689 
1690 	memmove(dst_addr, src_addr, num_bytes);
1691 }
1692 qdf_export_symbol(qdf_mem_move);
1693 
1694 int qdf_mem_cmp(const void *left, const void *right, size_t size)
1695 {
1696 	QDF_BUG(left);
1697 	QDF_BUG(right);
1698 
1699 	return memcmp(left, right, size);
1700 }
1701 qdf_export_symbol(qdf_mem_cmp);
1702 
1703 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1704 /**
1705  * qdf_mem_dma_alloc() - allocates memory for dma
1706  * @osdev: OS device handle
1707  * @dev: Pointer to device handle
1708  * @size: Size to be allocated
1709  * @phy_addr: Physical address
1710  *
1711  * Return: pointer of allocated memory or null if memory alloc fails
1712  */
1713 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1714 				      qdf_size_t size,
1715 				      qdf_dma_addr_t *phy_addr)
1716 {
1717 	void *vaddr;
1718 
1719 	vaddr = qdf_mem_malloc(size);
1720 	*phy_addr = ((uintptr_t) vaddr);
1721 	/* using this type conversion to suppress "cast from pointer to integer
1722 	 * of different size" warning on some platforms
1723 	 */
1724 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1725 	return vaddr;
1726 }
1727 
1728 #elif defined(QCA_WIFI_QCA8074_VP) && defined(BUILD_X86) && \
1729 	!defined(QCA_WIFI_QCN9000)
1730 
1731 #define QCA8074_RAM_BASE 0x50000000
1732 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1733 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
1734 			qdf_dma_addr_t *phy_addr)
1735 {
1736 	void *vaddr = NULL;
1737 	int i;
1738 
1739 	*phy_addr = 0;
1740 
1741 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1742 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1743 					   qdf_mem_malloc_flags());
1744 
1745 		if (!vaddr) {
1746 			qdf_err("%s failed , size: %zu!", __func__, size);
1747 			return NULL;
1748 		}
1749 
1750 		if (*phy_addr >= QCA8074_RAM_BASE)
1751 			return vaddr;
1752 
1753 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1754 	}
1755 
1756 	return NULL;
1757 }
1758 
1759 #else
1760 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
1761 				      qdf_size_t size, qdf_dma_addr_t *paddr)
1762 {
1763 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
1764 }
1765 #endif
1766 
1767 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1768 static inline void
1769 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1770 {
1771 	qdf_mem_free(vaddr);
1772 }
1773 #else
1774 
1775 static inline void
1776 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
1777 {
1778 	dma_free_coherent(dev, size, vaddr, paddr);
1779 }
1780 #endif
1781 
1782 #ifdef MEMORY_DEBUG
1783 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
1784 				     qdf_size_t size, qdf_dma_addr_t *paddr,
1785 				     const char *func, uint32_t line,
1786 				     void *caller)
1787 {
1788 	QDF_STATUS status;
1789 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1790 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
1791 	struct qdf_mem_header *header;
1792 	void *vaddr;
1793 
1794 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1795 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1796 		return NULL;
1797 	}
1798 
1799 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
1800 				   paddr);
1801 
1802 	if (!vaddr) {
1803 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1804 		return NULL;
1805 	}
1806 
1807 	header = qdf_mem_dma_get_header(vaddr, size);
1808 	/* For DMA buffers we only add trailers, this function will init
1809 	 * the header structure at the tail
1810 	 * Prefix the header into DMA buffer causes SMMU faults, so
1811 	 * do not prefix header into the DMA buffers
1812 	 */
1813 	qdf_mem_header_init(header, size, func, line, caller);
1814 
1815 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1816 	status = qdf_list_insert_front(mem_list, &header->node);
1817 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1818 	if (QDF_IS_STATUS_ERROR(status))
1819 		qdf_err("Failed to insert memory header; status %d", status);
1820 
1821 	qdf_mem_dma_inc(size);
1822 
1823 	return vaddr;
1824 }
1825 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
1826 
1827 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
1828 				   qdf_size_t size, void *vaddr,
1829 				   qdf_dma_addr_t paddr,
1830 				   qdf_dma_context_t memctx,
1831 				   const char *func, uint32_t line)
1832 {
1833 	enum qdf_debug_domain domain = qdf_debug_domain_get();
1834 	struct qdf_mem_header *header;
1835 	enum qdf_mem_validation_bitmap error_bitmap;
1836 
1837 	/* freeing a null pointer is valid */
1838 	if (qdf_unlikely(!vaddr))
1839 		return;
1840 
1841 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
1842 
1843 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
1844 	/* For DMA buffers we only add trailers, this function will retrieve
1845 	 * the header structure at the tail
1846 	 * Prefix the header into DMA buffer causes SMMU faults, so
1847 	 * do not prefix header into the DMA buffers
1848 	 */
1849 	header = qdf_mem_dma_get_header(vaddr, size);
1850 	error_bitmap = qdf_mem_header_validate(header, domain);
1851 	if (!error_bitmap) {
1852 		header->freed = true;
1853 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
1854 				     &header->node);
1855 	}
1856 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
1857 
1858 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
1859 
1860 	qdf_mem_dma_dec(header->size);
1861 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
1862 }
1863 qdf_export_symbol(qdf_mem_free_consistent_debug);
1864 
1865 #else
1866 
1867 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
1868 			       qdf_size_t size, qdf_dma_addr_t *paddr)
1869 {
1870 	void *vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
1871 
1872 	if (vaddr)
1873 		qdf_mem_dma_inc(size);
1874 
1875 	return vaddr;
1876 }
1877 qdf_export_symbol(qdf_mem_alloc_consistent);
1878 
1879 void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1880 			     qdf_size_t size, void *vaddr,
1881 			     qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
1882 {
1883 	qdf_mem_dma_dec(size);
1884 	qdf_mem_dma_free(dev, size, vaddr, paddr);
1885 }
1886 qdf_export_symbol(qdf_mem_free_consistent);
1887 
1888 #endif /* MEMORY_DEBUG */
1889 
1890 void *qdf_aligned_mem_alloc_consistent_fl(
1891 	qdf_device_t osdev, uint32_t *size,
1892 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
1893 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
1894 	const char *func, uint32_t line)
1895 {
1896 	void *vaddr_aligned;
1897 	uint32_t align_alloc_size;
1898 
1899 	*vaddr_unaligned = qdf_mem_alloc_consistent(
1900 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
1901 	if (!*vaddr_unaligned) {
1902 		qdf_warn("Failed to alloc %uB @ %s:%d",
1903 			 *size, func, line);
1904 		return NULL;
1905 	}
1906 
1907 	/* Re-allocate additional bytes to align base address only if
1908 	 * above allocation returns unaligned address. Reason for
1909 	 * trying exact size allocation above is, OS tries to allocate
1910 	 * blocks of size power-of-2 pages and then free extra pages.
1911 	 * e.g., of a ring size of 1MB, the allocation below will
1912 	 * request 1MB plus 7 bytes for alignment, which will cause a
1913 	 * 2MB block allocation,and that is failing sometimes due to
1914 	 * memory fragmentation.
1915 	 */
1916 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
1917 		align_alloc_size = *size + align - 1;
1918 
1919 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
1920 					*vaddr_unaligned,
1921 					*paddr_unaligned, 0);
1922 
1923 		*vaddr_unaligned = qdf_mem_alloc_consistent(
1924 				osdev, osdev->dev, align_alloc_size,
1925 				paddr_unaligned);
1926 		if (!*vaddr_unaligned) {
1927 			qdf_warn("Failed to alloc %uB @ %s:%d",
1928 				 align_alloc_size, func, line);
1929 			return NULL;
1930 		}
1931 
1932 		*size = align_alloc_size;
1933 	}
1934 
1935 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
1936 			(unsigned long)(*paddr_unaligned), align);
1937 
1938 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
1939 				 ((unsigned long)(*paddr_aligned) -
1940 				  (unsigned long)(*paddr_unaligned)));
1941 
1942 	return vaddr_aligned;
1943 }
1944 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
1945 
1946 /**
1947  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1948  * @osdev: OS device handle
1949  * @bus_addr: dma address to give to the device
1950  * @size: Size of the memory block
1951  * @direction: direction data will be DMAed
1952  *
1953  * Assign memory to the remote device.
1954  * The cache lines are flushed to ram or invalidated as needed.
1955  *
1956  * Return: none
1957  */
1958 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1959 					qdf_dma_addr_t bus_addr,
1960 					qdf_size_t size,
1961 					enum dma_data_direction direction)
1962 {
1963 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1964 }
1965 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
1966 
1967 /**
1968  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1969  * @osdev: OS device handle
1970  * @bus_addr: dma address to give to the cpu
1971  * @size: Size of the memory block
1972  * @direction: direction data will be DMAed
1973  *
1974  * Assign memory to the CPU.
1975  *
1976  * Return: none
1977  */
1978 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1979 				     qdf_dma_addr_t bus_addr,
1980 				     qdf_size_t size,
1981 				     enum dma_data_direction direction)
1982 {
1983 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1984 }
1985 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
1986 
1987 void qdf_mem_init(void)
1988 {
1989 	qdf_mem_debug_init();
1990 	qdf_net_buf_debug_init();
1991 	qdf_mem_debugfs_init();
1992 	qdf_mem_debug_debugfs_init();
1993 }
1994 qdf_export_symbol(qdf_mem_init);
1995 
1996 void qdf_mem_exit(void)
1997 {
1998 	qdf_mem_debug_debugfs_exit();
1999 	qdf_mem_debugfs_exit();
2000 	qdf_net_buf_debug_exit();
2001 	qdf_mem_debug_exit();
2002 }
2003 qdf_export_symbol(qdf_mem_exit);
2004 
2005 /**
2006  * qdf_ether_addr_copy() - copy an Ethernet address
2007  *
2008  * @dst_addr: A six-byte array Ethernet address destination
2009  * @src_addr: A six-byte array Ethernet address source
2010  *
2011  * Please note: dst & src must both be aligned to u16.
2012  *
2013  * Return: none
2014  */
2015 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2016 {
2017 	if ((!dst_addr) || (!src_addr)) {
2018 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2019 			  "%s called with NULL parameter, source:%pK destination:%pK",
2020 			  __func__, src_addr, dst_addr);
2021 		QDF_ASSERT(0);
2022 		return;
2023 	}
2024 	ether_addr_copy(dst_addr, src_addr);
2025 }
2026 qdf_export_symbol(qdf_ether_addr_copy);
2027 
2028