xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision fffcebf2e926a46534518e770b63d1ab6574e139)
1 /*
2  * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_mem
30  * This file provides OS dependent memory management APIs
31  */
32 
33 #include "qdf_debugfs.h"
34 #include "qdf_mem.h"
35 #include "qdf_nbuf.h"
36 #include "qdf_lock.h"
37 #include "qdf_mc_timer.h"
38 #include "qdf_module.h"
39 #include <qdf_trace.h>
40 #include "qdf_atomic.h"
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/string.h>
44 
45 #ifdef CONFIG_MCL
46 #include <host_diag_core_event.h>
47 #else
48 #define host_log_low_resource_failure(code) do {} while (0)
49 #endif
50 
51 #if defined(CONFIG_CNSS)
52 #include <net/cnss.h>
53 #endif
54 
55 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
56 #include <net/cnss_prealloc.h>
57 #endif
58 
59 #ifdef MEMORY_DEBUG
60 #include "qdf_debug_domain.h"
61 #include <qdf_list.h>
62 
63 /* Preprocessor Definitions and Constants */
64 #define QDF_MEM_MAX_MALLOC (1024 * 1024) /* 1MiB */
65 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
66 
67 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
68 static qdf_spinlock_t qdf_mem_list_lock;
69 
70 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
71 {
72 	return &qdf_mem_domains[domain];
73 }
74 
75 /**
76  * struct qdf_mem_header - memory object to dubug
77  * @node: node to the list
78  * @domain: the active memory domain at time of allocation
79  * @freed: flag set during free, used to detect double frees
80  *	Use uint8_t so we can detect corruption
81  * @file: name of the file the allocation was made from
82  * @line: line number of the file the allocation was made from
83  * @size: size of the allocation in bytes
84  * @header: a known value, used to detect out-of-bounds access
85  */
86 struct qdf_mem_header {
87 	qdf_list_node_t node;
88 	enum qdf_debug_domain domain;
89 	uint8_t freed;
90 	const char *file;
91 	uint32_t line;
92 	uint32_t size;
93 	uint64_t header;
94 };
95 
96 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
97 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
98 
99 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
100 {
101 	return (struct qdf_mem_header *)ptr - 1;
102 }
103 
104 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
105 {
106 	return (uint64_t *)((void *)(header + 1) + header->size);
107 }
108 
109 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
110 {
111 	return (void *)(header + 1);
112 }
113 
114 /* number of bytes needed for the qdf memory debug information */
115 #define QDF_MEM_DEBUG_SIZE \
116 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
117 
118 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
119 				const char *file, uint32_t line)
120 {
121 	QDF_BUG(header);
122 	if (!header)
123 		return;
124 
125 	header->domain = qdf_debug_domain_get();
126 	header->freed = false;
127 	header->file = file;
128 	header->line = line;
129 	header->size = size;
130 	header->header = WLAN_MEM_HEADER;
131 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
132 }
133 
134 enum qdf_mem_validation_bitmap {
135 	QDF_MEM_BAD_HEADER = 1 << 0,
136 	QDF_MEM_BAD_TRAILER = 1 << 1,
137 	QDF_MEM_BAD_SIZE = 1 << 2,
138 	QDF_MEM_DOUBLE_FREE = 1 << 3,
139 	QDF_MEM_BAD_FREED = 1 << 4,
140 	QDF_MEM_BAD_NODE = 1 << 5,
141 	QDF_MEM_BAD_DOMAIN = 1 << 6,
142 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
143 };
144 
145 /**
146  * qdf_mem_validate_list_node() - validate that the node is in a list
147  * @qdf_node: node to check for being in a list
148  *
149  * Return: true if the node validly linked in an anchored doubly linked list
150  */
151 static bool qdf_mem_validate_list_node(qdf_list_node_t *qdf_node)
152 {
153 	struct list_head *node = qdf_node;
154 
155 	/*
156 	 * if the node is an empty list, it is not tied to an anchor node
157 	 * and must have been removed with list_del_init
158 	 */
159 	if (list_empty(node))
160 		return false;
161 
162 	if (!node->prev || !node->next)
163 		return false;
164 
165 	if (node->prev->next != node || node->next->prev != node)
166 		return false;
167 
168 	return true;
169 }
170 
171 static enum qdf_mem_validation_bitmap
172 qdf_mem_header_validate(struct qdf_mem_header *header,
173 			enum qdf_debug_domain domain)
174 {
175 	enum qdf_mem_validation_bitmap error_bitmap = 0;
176 
177 	if (header->header != WLAN_MEM_HEADER)
178 		error_bitmap |= QDF_MEM_BAD_HEADER;
179 
180 	if (header->size > QDF_MEM_MAX_MALLOC)
181 		error_bitmap |= QDF_MEM_BAD_SIZE;
182 	else if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
183 		error_bitmap |= QDF_MEM_BAD_TRAILER;
184 
185 	if (header->freed == true)
186 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
187 	else if (header->freed)
188 		error_bitmap |= QDF_MEM_BAD_FREED;
189 
190 	if (!qdf_mem_validate_list_node(&header->node))
191 		error_bitmap |= QDF_MEM_BAD_NODE;
192 
193 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
194 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
195 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
196 	else if (header->domain != domain)
197 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
198 
199 	return error_bitmap;
200 }
201 
202 static void
203 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
204 			    enum qdf_debug_domain current_domain,
205 			    enum qdf_mem_validation_bitmap error_bitmap,
206 			    const char *file,
207 			    uint32_t line)
208 {
209 	if (!error_bitmap)
210 		return;
211 
212 	if (error_bitmap & QDF_MEM_BAD_HEADER)
213 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
214 			header->header, WLAN_MEM_HEADER);
215 
216 	if (error_bitmap & QDF_MEM_BAD_SIZE)
217 		qdf_err("Corrupted memory size %u (expected < %d)",
218 			header->size, QDF_MEM_MAX_MALLOC);
219 
220 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
221 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
222 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
223 
224 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
225 		qdf_err("Memory has previously been freed");
226 
227 	if (error_bitmap & QDF_MEM_BAD_FREED)
228 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
229 
230 	if (error_bitmap & QDF_MEM_BAD_NODE)
231 		qdf_err("Corrupted memory header node or double free");
232 
233 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
234 		qdf_err("Corrupted memory domain 0x%x", header->domain);
235 
236 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
237 		qdf_err("Memory domain mismatch; found %s(%d), expected %s(%d)",
238 			qdf_debug_domain_name(header->domain), header->domain,
239 			qdf_debug_domain_name(current_domain), current_domain);
240 
241 	panic("A fatal memory error was detected @ %s:%d",
242 	      kbasename(file), line);
243 }
244 #endif /* MEMORY_DEBUG */
245 
246 u_int8_t prealloc_disabled = 1;
247 qdf_declare_param(prealloc_disabled, byte);
248 EXPORT_SYMBOL(prealloc_disabled);
249 
250 #if defined WLAN_DEBUGFS
251 
252 /* Debugfs root directory for qdf_mem */
253 static struct dentry *qdf_mem_debugfs_root;
254 
255 /**
256  * struct __qdf_mem_stat - qdf memory statistics
257  * @kmalloc:	total kmalloc allocations
258  * @dma:	total dma allocations
259  * @skb:	total skb allocations
260  */
261 static struct __qdf_mem_stat {
262 	qdf_atomic_t kmalloc;
263 	qdf_atomic_t dma;
264 	qdf_atomic_t skb;
265 } qdf_mem_stat;
266 
267 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
268 {
269 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
270 }
271 
272 static inline void qdf_mem_dma_inc(qdf_size_t size)
273 {
274 	qdf_atomic_add(size, &qdf_mem_stat.dma);
275 }
276 
277 void qdf_mem_skb_inc(qdf_size_t size)
278 {
279 	qdf_atomic_add(size, &qdf_mem_stat.skb);
280 }
281 
282 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
283 {
284 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
285 }
286 
287 static inline void qdf_mem_dma_dec(qdf_size_t size)
288 {
289 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
290 }
291 
292 void qdf_mem_skb_dec(qdf_size_t size)
293 {
294 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
295 }
296 
297 #ifdef MEMORY_DEBUG
298 static int qdf_err_printer(void *priv, const char *fmt, ...)
299 {
300 	va_list args;
301 
302 	va_start(args, fmt);
303 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
304 	va_end(args);
305 
306 	return 0;
307 }
308 
309 static int seq_printf_printer(void *priv, const char *fmt, ...)
310 {
311 	struct seq_file *file = priv;
312 	va_list args;
313 
314 	va_start(args, fmt);
315 	seq_vprintf(file, fmt, args);
316 	seq_puts(file, "\n");
317 	va_end(args);
318 
319 	return 0;
320 }
321 
322 /**
323  * struct __qdf_mem_info - memory statistics
324  * @file: the file which allocated memory
325  * @line: the line at which allocation happened
326  * @size: the size of allocation
327  * @count: how many allocations of same type
328  *
329  */
330 struct __qdf_mem_info {
331 	const char *file;
332 	uint32_t line;
333 	uint32_t size;
334 	uint32_t count;
335 };
336 
337 /*
338  * The table depth defines the de-duplication proximity scope.
339  * A deeper table takes more time, so choose any optimum value.
340  */
341 #define QDF_MEM_STAT_TABLE_SIZE 8
342 
343 /**
344  * qdf_mem_domain_print_header() - memory domain header print logic
345  * @print: the print adapter function
346  * @print_priv: the private data to be consumed by @print
347  *
348  * Return: None
349  */
350 static void qdf_mem_domain_print_header(qdf_abstract_print print,
351 					void *print_priv)
352 {
353 	print(print_priv,
354 	      "--------------------------------------------------------------");
355 	print(print_priv, " count    size     total    filename");
356 	print(print_priv,
357 	      "--------------------------------------------------------------");
358 }
359 
360 /**
361  * qdf_mem_meta_table_print() - memory metadata table print logic
362  * @table: the memory metadata table to print
363  * @print: the print adapter function
364  * @print_priv: the private data to be consumed by @print
365  *
366  * Return: None
367  */
368 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
369 				     qdf_abstract_print print,
370 				     void *print_priv)
371 {
372 	int i;
373 
374 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
375 		if (!table[i].count)
376 			break;
377 
378 		print(print_priv,
379 		      "%6u x %5u = %7uB @ %s:%u",
380 		      table[i].count,
381 		      table[i].size,
382 		      table[i].count * table[i].size,
383 		      kbasename(table[i].file),
384 		      table[i].line);
385 	}
386 }
387 
388 /**
389  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
390  * @table: the memory metadata table to insert into
391  * @meta: the memory metadata to insert
392  *
393  * Return: true if the table is full after inserting, false otherwise
394  */
395 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
396 				      struct qdf_mem_header *meta)
397 {
398 	int i;
399 
400 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
401 		if (!table[i].count) {
402 			table[i].file = meta->file;
403 			table[i].line = meta->line;
404 			table[i].size = meta->size;
405 			table[i].count = 1;
406 			break;
407 		}
408 
409 		if (table[i].file == meta->file &&
410 		    table[i].line == meta->line &&
411 		    table[i].size == meta->size) {
412 			table[i].count++;
413 			break;
414 		}
415 	}
416 
417 	/* return true if the table is now full */
418 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
419 }
420 
421 /**
422  * qdf_mem_domain_print() - output agnostic memory domain print logic
423  * @domain: the memory domain to print
424  * @print: the print adapter function
425  * @print_priv: the private data to be consumed by @print
426  *
427  * Return: None
428  */
429 static void qdf_mem_domain_print(qdf_list_t *domain,
430 				 qdf_abstract_print print,
431 				 void *print_priv)
432 {
433 	QDF_STATUS status;
434 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
435 	qdf_list_node_t *node;
436 
437 	qdf_mem_zero(table, sizeof(table));
438 	qdf_mem_domain_print_header(print, print_priv);
439 
440 	/* hold lock while inserting to avoid use-after free of the metadata */
441 	qdf_spin_lock(&qdf_mem_list_lock);
442 	status = qdf_list_peek_front(domain, &node);
443 	while (QDF_IS_STATUS_SUCCESS(status)) {
444 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
445 		bool is_full = qdf_mem_meta_table_insert(table, meta);
446 
447 		qdf_spin_unlock(&qdf_mem_list_lock);
448 
449 		if (is_full) {
450 			qdf_mem_meta_table_print(table, print, print_priv);
451 			qdf_mem_zero(table, sizeof(table));
452 		}
453 
454 		qdf_spin_lock(&qdf_mem_list_lock);
455 		status = qdf_list_peek_next(domain, node, &node);
456 	}
457 	qdf_spin_unlock(&qdf_mem_list_lock);
458 
459 	qdf_mem_meta_table_print(table, print, print_priv);
460 }
461 
462 /**
463  * qdf_mem_seq_start() - sequential callback to start
464  * @seq: seq_file handle
465  * @pos: The start position of the sequence
466  *
467  * Return: iterator pointer, or NULL if iteration is complete
468  */
469 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
470 {
471 	enum qdf_debug_domain domain = *pos;
472 
473 	if (!qdf_debug_domain_valid(domain))
474 		return NULL;
475 
476 	/* just use the current position as our iterator */
477 	return pos;
478 }
479 
480 /**
481  * qdf_mem_seq_next() - next sequential callback
482  * @seq: seq_file handle
483  * @v: the current iterator
484  * @pos: the current position
485  *
486  * Get the next node and release previous node.
487  *
488  * Return: iterator pointer, or NULL if iteration is complete
489  */
490 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
491 {
492 	++*pos;
493 
494 	return qdf_mem_seq_start(seq, pos);
495 }
496 
497 /**
498  * qdf_mem_seq_stop() - stop sequential callback
499  * @seq: seq_file handle
500  * @v: current iterator
501  *
502  * Return: None
503  */
504 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
505 
506 /**
507  * qdf_mem_seq_show() - print sequential callback
508  * @seq: seq_file handle
509  * @v: current iterator
510  *
511  * Return: 0 - success
512  */
513 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
514 {
515 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
516 
517 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
518 		   qdf_debug_domain_name(domain_id), domain_id);
519 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
520 			     seq_printf_printer, seq);
521 
522 	return 0;
523 }
524 
525 /* sequential file operation table */
526 static const struct seq_operations qdf_mem_seq_ops = {
527 	.start = qdf_mem_seq_start,
528 	.next  = qdf_mem_seq_next,
529 	.stop  = qdf_mem_seq_stop,
530 	.show  = qdf_mem_seq_show,
531 };
532 
533 
534 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
535 {
536 	return seq_open(file, &qdf_mem_seq_ops);
537 }
538 
539 /* debugfs file operation table */
540 static const struct file_operations fops_qdf_mem_debugfs = {
541 	.owner = THIS_MODULE,
542 	.open = qdf_mem_debugfs_open,
543 	.read = seq_read,
544 	.llseek = seq_lseek,
545 	.release = seq_release,
546 };
547 
548 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
549 {
550 	if (!qdf_mem_debugfs_root)
551 		return QDF_STATUS_E_FAILURE;
552 
553 	debugfs_create_file("list",
554 			    S_IRUSR,
555 			    qdf_mem_debugfs_root,
556 			    NULL,
557 			    &fops_qdf_mem_debugfs);
558 
559 	return QDF_STATUS_SUCCESS;
560 }
561 
562 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
563 {
564 	return QDF_STATUS_SUCCESS;
565 }
566 
567 #else /* MEMORY_DEBUG */
568 
569 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
570 {
571 	return QDF_STATUS_E_NOSUPPORT;
572 }
573 
574 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
575 {
576 	return QDF_STATUS_E_NOSUPPORT;
577 }
578 
579 #endif /* MEMORY_DEBUG */
580 
581 
582 static void qdf_mem_debugfs_exit(void)
583 {
584 	debugfs_remove_recursive(qdf_mem_debugfs_root);
585 	qdf_mem_debugfs_root = NULL;
586 }
587 
588 static QDF_STATUS qdf_mem_debugfs_init(void)
589 {
590 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
591 
592 	if (!qdf_debugfs_root)
593 		return QDF_STATUS_E_FAILURE;
594 
595 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
596 
597 	if (!qdf_mem_debugfs_root)
598 		return QDF_STATUS_E_FAILURE;
599 
600 
601 	debugfs_create_atomic_t("kmalloc",
602 				S_IRUSR,
603 				qdf_mem_debugfs_root,
604 				&qdf_mem_stat.kmalloc);
605 
606 	debugfs_create_atomic_t("dma",
607 				S_IRUSR,
608 				qdf_mem_debugfs_root,
609 				&qdf_mem_stat.dma);
610 
611 	debugfs_create_atomic_t("skb",
612 				S_IRUSR,
613 				qdf_mem_debugfs_root,
614 				&qdf_mem_stat.skb);
615 
616 	return QDF_STATUS_SUCCESS;
617 }
618 
619 #else /* WLAN_DEBUGFS */
620 
621 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
622 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
623 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
624 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
625 
626 
627 static QDF_STATUS qdf_mem_debugfs_init(void)
628 {
629 	return QDF_STATUS_E_NOSUPPORT;
630 }
631 static void qdf_mem_debugfs_exit(void) {}
632 
633 
634 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
635 {
636 	return QDF_STATUS_E_NOSUPPORT;
637 }
638 
639 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
640 {
641 	return QDF_STATUS_E_NOSUPPORT;
642 }
643 
644 #endif /* WLAN_DEBUGFS */
645 
646 /**
647  * __qdf_mempool_init() - Create and initialize memory pool
648  *
649  * @osdev: platform device object
650  * @pool_addr: address of the pool created
651  * @elem_cnt: no. of elements in pool
652  * @elem_size: size of each pool element in bytes
653  * @flags: flags
654  *
655  * return: Handle to memory pool or NULL if allocation failed
656  */
657 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
658 		       int elem_cnt, size_t elem_size, u_int32_t flags)
659 {
660 	__qdf_mempool_ctxt_t *new_pool = NULL;
661 	u_int32_t align = L1_CACHE_BYTES;
662 	unsigned long aligned_pool_mem;
663 	int pool_id;
664 	int i;
665 
666 	if (prealloc_disabled) {
667 		/* TBD: We can maintain a list of pools in qdf_device_t
668 		 * to help debugging
669 		 * when pre-allocation is not enabled
670 		 */
671 		new_pool = (__qdf_mempool_ctxt_t *)
672 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
673 		if (new_pool == NULL)
674 			return QDF_STATUS_E_NOMEM;
675 
676 		memset(new_pool, 0, sizeof(*new_pool));
677 		/* TBD: define flags for zeroing buffers etc */
678 		new_pool->flags = flags;
679 		new_pool->elem_size = elem_size;
680 		new_pool->max_elem = elem_cnt;
681 		*pool_addr = new_pool;
682 		return 0;
683 	}
684 
685 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
686 		if (osdev->mem_pool[pool_id] == NULL)
687 			break;
688 	}
689 
690 	if (pool_id == MAX_MEM_POOLS)
691 		return -ENOMEM;
692 
693 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
694 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
695 	if (new_pool == NULL)
696 		return -ENOMEM;
697 
698 	memset(new_pool, 0, sizeof(*new_pool));
699 	/* TBD: define flags for zeroing buffers etc */
700 	new_pool->flags = flags;
701 	new_pool->pool_id = pool_id;
702 
703 	/* Round up the element size to cacheline */
704 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
705 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
706 				((align)?(align - 1):0);
707 
708 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
709 	if (new_pool->pool_mem == NULL) {
710 			/* TBD: Check if we need get_free_pages above */
711 		kfree(new_pool);
712 		osdev->mem_pool[pool_id] = NULL;
713 		return -ENOMEM;
714 	}
715 
716 	spin_lock_init(&new_pool->lock);
717 
718 	/* Initialize free list */
719 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
720 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
721 	STAILQ_INIT(&new_pool->free_list);
722 
723 	for (i = 0; i < elem_cnt; i++)
724 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
725 			(mempool_elem_t *)(aligned_pool_mem +
726 			(new_pool->elem_size * i)), mempool_entry);
727 
728 
729 	new_pool->free_cnt = elem_cnt;
730 	*pool_addr = new_pool;
731 	return 0;
732 }
733 EXPORT_SYMBOL(__qdf_mempool_init);
734 
735 /**
736  * __qdf_mempool_destroy() - Destroy memory pool
737  * @osdev: platform device object
738  * @Handle: to memory pool
739  *
740  * Returns: none
741  */
742 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
743 {
744 	int pool_id = 0;
745 
746 	if (!pool)
747 		return;
748 
749 	if (prealloc_disabled) {
750 		kfree(pool);
751 		return;
752 	}
753 
754 	pool_id = pool->pool_id;
755 
756 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
757 	kfree(pool->pool_mem);
758 	kfree(pool);
759 	osdev->mem_pool[pool_id] = NULL;
760 }
761 EXPORT_SYMBOL(__qdf_mempool_destroy);
762 
763 /**
764  * __qdf_mempool_alloc() - Allocate an element memory pool
765  *
766  * @osdev: platform device object
767  * @Handle: to memory pool
768  *
769  * Return: Pointer to the allocated element or NULL if the pool is empty
770  */
771 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
772 {
773 	void *buf = NULL;
774 
775 	if (!pool)
776 		return NULL;
777 
778 	if (prealloc_disabled)
779 		return  qdf_mem_malloc(pool->elem_size);
780 
781 	spin_lock_bh(&pool->lock);
782 
783 	buf = STAILQ_FIRST(&pool->free_list);
784 	if (buf != NULL) {
785 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
786 		pool->free_cnt--;
787 	}
788 
789 	/* TBD: Update free count if debug is enabled */
790 	spin_unlock_bh(&pool->lock);
791 
792 	return buf;
793 }
794 EXPORT_SYMBOL(__qdf_mempool_alloc);
795 
796 /**
797  * __qdf_mempool_free() - Free a memory pool element
798  * @osdev: Platform device object
799  * @pool: Handle to memory pool
800  * @buf: Element to be freed
801  *
802  * Returns: none
803  */
804 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
805 {
806 	if (!pool)
807 		return;
808 
809 
810 	if (prealloc_disabled)
811 		return qdf_mem_free(buf);
812 
813 	spin_lock_bh(&pool->lock);
814 	pool->free_cnt++;
815 
816 	STAILQ_INSERT_TAIL
817 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
818 	spin_unlock_bh(&pool->lock);
819 }
820 EXPORT_SYMBOL(__qdf_mempool_free);
821 
822 /**
823  * qdf_mem_alloc_outline() - allocation QDF memory
824  * @osdev: platform device object
825  * @size: Number of bytes of memory to allocate.
826  *
827  * This function will dynamicallly allocate the specified number of bytes of
828  * memory.
829  *
830  * Return:
831  * Upon successful allocate, returns a non-NULL pointer to the allocated
832  * memory.  If this function is unable to allocate the amount of memory
833  * specified (for any reason) it returns NULL.
834  */
835 void *
836 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
837 {
838 	return qdf_mem_malloc(size);
839 }
840 EXPORT_SYMBOL(qdf_mem_alloc_outline);
841 
842 /**
843  * qdf_mem_free_outline() - QDF memory free API
844  * @ptr: Pointer to the starting address of the memory to be free'd.
845  *
846  * This function will free the memory pointed to by 'ptr'. It also checks
847  * is memory is corrupted or getting double freed and panic.
848  *
849  * Return: none
850  */
851 void
852 qdf_mem_free_outline(void *buf)
853 {
854 	qdf_mem_free(buf);
855 }
856 EXPORT_SYMBOL(qdf_mem_free_outline);
857 
858 /**
859  * qdf_mem_zero_outline() - zero out memory
860  * @buf: pointer to memory that will be set to zero
861  * @size: number of bytes zero
862  *
863  * This function sets the memory location to all zeros, essentially clearing
864  * the memory.
865  *
866  * Return: none
867  */
868 void
869 qdf_mem_zero_outline(void *buf, qdf_size_t size)
870 {
871 	qdf_mem_zero(buf, size);
872 }
873 EXPORT_SYMBOL(qdf_mem_zero_outline);
874 
875 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
876 /**
877  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
878  * @size: the number of bytes to allocate
879  *
880  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
881  * a chunk of pre-allocated memory. If size if less than or equal to
882  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
883  *
884  * Return: NULL on failure, non-NULL on success
885  */
886 static void *qdf_mem_prealloc_get(size_t size)
887 {
888 	void *ptr;
889 
890 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
891 		return NULL;
892 
893 	ptr = wcnss_prealloc_get(size);
894 	if (!ptr)
895 		return NULL;
896 
897 	memset(ptr, 0, size);
898 
899 	return ptr;
900 }
901 
902 static inline bool qdf_mem_prealloc_put(void *ptr)
903 {
904 	return wcnss_prealloc_put(ptr);
905 }
906 #else
907 static inline void *qdf_mem_prealloc_get(size_t size)
908 {
909 	return NULL;
910 }
911 
912 static inline bool qdf_mem_prealloc_put(void *ptr)
913 {
914 	return false;
915 }
916 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
917 
918 static int qdf_mem_malloc_flags(void)
919 {
920 	if (in_interrupt() || irqs_disabled() || in_atomic())
921 		return GFP_ATOMIC;
922 
923 	return GFP_KERNEL;
924 }
925 
926 /* External Function implementation */
927 #ifdef MEMORY_DEBUG
928 
929 /**
930  * qdf_mem_debug_init() - initialize qdf memory debug functionality
931  *
932  * Return: none
933  */
934 static void qdf_mem_debug_init(void)
935 {
936 	int i;
937 
938 	/* Initalizing the list with maximum size of 60000 */
939 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
940 		qdf_list_create(&qdf_mem_domains[i], 60000);
941 	qdf_spinlock_create(&qdf_mem_list_lock);
942 	qdf_net_buf_debug_init();
943 	return;
944 }
945 
946 #ifdef CONFIG_HALT_KMEMLEAK
947 /*
948  * There are two scenarios for handling memory leaks. We want to either:
949  *	1) Crash and not release memory for offline debugging (internal testing)
950  *	2) Clean up any leaks and continue (production devices)
951  */
952 
953 static inline void qdf_mem_leak_panic(void)
954 {
955 	QDF_BUG(0);
956 }
957 
958 static inline void qdf_mem_free_leaked_memory(qdf_list_t *domain) { }
959 #else
960 static inline void qdf_mem_leak_panic(void) { }
961 
962 static void qdf_mem_free_leaked_memory(qdf_list_t *domain)
963 {
964 	QDF_STATUS status;
965 	qdf_list_node_t *node;
966 
967 	qdf_spin_lock(&qdf_mem_list_lock);
968 	status = qdf_list_remove_front(domain, &node);
969 	while (QDF_IS_STATUS_SUCCESS(status)) {
970 		kfree(node);
971 		status = qdf_list_remove_front(domain, &node);
972 	}
973 	qdf_spin_unlock(&qdf_mem_list_lock);
974 }
975 #endif
976 
977 /**
978  * qdf_mem_debug_clean() - display memory leak debug info and free leaked
979  * pointers
980  *
981  * Return: none
982  */
983 static void qdf_mem_debug_clean(void)
984 {
985 	bool leaks_detected = false;
986 	int i;
987 
988 	/* detect and print leaks */
989 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i) {
990 		qdf_list_t *domain = qdf_mem_list_get(i);
991 
992 		if (qdf_list_empty(domain))
993 			continue;
994 
995 		leaks_detected = true;
996 
997 		qdf_err("\nMemory leaks detected in the %s (Id %d) domain!\n\n",
998 			qdf_debug_domain_name(i), i);
999 		qdf_mem_domain_print(domain, qdf_err_printer, NULL);
1000 	}
1001 
1002 	if (leaks_detected) {
1003 		/* panic, if enabled */
1004 		qdf_mem_leak_panic();
1005 
1006 		/* if we didn't crash, release the leaked memory */
1007 		for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1008 			qdf_mem_free_leaked_memory(qdf_mem_list_get(i));
1009 	}
1010 }
1011 
1012 /**
1013  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1014  *
1015  * Return: none
1016  */
1017 static void qdf_mem_debug_exit(void)
1018 {
1019 	int i;
1020 
1021 	qdf_net_buf_debug_exit();
1022 	qdf_mem_debug_clean();
1023 
1024 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1025 		qdf_list_destroy(qdf_mem_list_get(i));
1026 
1027 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1028 }
1029 
1030 void *qdf_mem_malloc_debug(size_t size, const char *file, uint32_t line)
1031 {
1032 	QDF_STATUS status;
1033 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1034 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1035 	struct qdf_mem_header *header;
1036 	void *ptr;
1037 	unsigned long start, duration;
1038 
1039 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1040 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, file, line);
1041 		return NULL;
1042 	}
1043 
1044 	ptr = qdf_mem_prealloc_get(size);
1045 	if (ptr)
1046 		return ptr;
1047 
1048 	start = qdf_mc_timer_get_system_time();
1049 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, qdf_mem_malloc_flags());
1050 	duration = qdf_mc_timer_get_system_time() - start;
1051 
1052 	if (duration > QDF_MEM_WARN_THRESHOLD)
1053 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1054 			 duration, size, file, line);
1055 
1056 	if (!header)
1057 		return NULL;
1058 
1059 	qdf_mem_header_init(header, size, file, line);
1060 	ptr = qdf_mem_get_ptr(header);
1061 
1062 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1063 	status = qdf_list_insert_front(mem_list, &header->node);
1064 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1065 	if (QDF_IS_STATUS_ERROR(status))
1066 		qdf_err("Failed to insert memory header; status %d", status);
1067 
1068 	qdf_mem_kmalloc_inc(size);
1069 
1070 	return ptr;
1071 }
1072 qdf_export_symbol(qdf_mem_malloc_debug);
1073 
1074 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line)
1075 {
1076 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1077 	struct qdf_mem_header *header;
1078 	enum qdf_mem_validation_bitmap error_bitmap;
1079 
1080 	/* freeing a null pointer is valid */
1081 	if (qdf_unlikely(!ptr))
1082 		return;
1083 
1084 	if (qdf_mem_prealloc_put(ptr))
1085 		return;
1086 
1087 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1088 		panic("Failed to free invalid memory location %pK", ptr);
1089 
1090 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1091 	header = qdf_mem_get_header(ptr);
1092 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1093 	if (!error_bitmap) {
1094 		header->freed = true;
1095 		list_del_init(&header->node);
1096 		qdf_mem_list_get(header->domain)->count--;
1097 	}
1098 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1099 
1100 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1101 				    file, line);
1102 
1103 	qdf_mem_kmalloc_dec(header->size);
1104 	kfree(header);
1105 }
1106 qdf_export_symbol(qdf_mem_free_debug);
1107 
1108 void qdf_mem_check_for_leaks(void)
1109 {
1110 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1111 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1112 
1113 	if (!qdf_list_empty(mem_list)) {
1114 		qdf_err("Memory leaks detected in %s domain!",
1115 			qdf_debug_domain_name(current_domain));
1116 		qdf_mem_domain_print(mem_list, qdf_err_printer, NULL);
1117 		qdf_mem_leak_panic();
1118 	}
1119 }
1120 
1121 #else
1122 static void qdf_mem_debug_init(void) {}
1123 
1124 static void qdf_mem_debug_exit(void) {}
1125 
1126 /**
1127  * qdf_mem_malloc() - allocation QDF memory
1128  * @size: Number of bytes of memory to allocate.
1129  *
1130  * This function will dynamicallly allocate the specified number of bytes of
1131  * memory.
1132  *
1133  * Return:
1134  * Upon successful allocate, returns a non-NULL pointer to the allocated
1135  * memory.  If this function is unable to allocate the amount of memory
1136  * specified (for any reason) it returns NULL.
1137  */
1138 void *qdf_mem_malloc(size_t size)
1139 {
1140 	void *ptr;
1141 
1142 	ptr = qdf_mem_prealloc_get(size);
1143 	if (ptr)
1144 		return ptr;
1145 
1146 	ptr = kzalloc(size, qdf_mem_malloc_flags());
1147 	if (!ptr)
1148 		return NULL;
1149 
1150 	qdf_mem_kmalloc_inc(ksize(ptr));
1151 
1152 	return ptr;
1153 }
1154 EXPORT_SYMBOL(qdf_mem_malloc);
1155 
1156 /**
1157  * qdf_mem_free() - free QDF memory
1158  * @ptr: Pointer to the starting address of the memory to be free'd.
1159  *
1160  * This function will free the memory pointed to by 'ptr'.
1161  *
1162  * Return: None
1163  */
1164 void qdf_mem_free(void *ptr)
1165 {
1166 	if (ptr == NULL)
1167 		return;
1168 
1169 	if (qdf_mem_prealloc_put(ptr))
1170 		return;
1171 
1172 	qdf_mem_kmalloc_dec(ksize(ptr));
1173 
1174 	kfree(ptr);
1175 }
1176 EXPORT_SYMBOL(qdf_mem_free);
1177 #endif
1178 
1179 /**
1180  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1181  * @osdev: OS device handle pointer
1182  * @pages: Multi page information storage
1183  * @element_size: Each element size
1184  * @element_num: Total number of elements should be allocated
1185  * @memctxt: Memory context
1186  * @cacheable: Coherent memory or cacheable memory
1187  *
1188  * This function will allocate large size of memory over multiple pages.
1189  * Large size of contiguous memory allocation will fail frequently, then
1190  * instead of allocate large memory by one shot, allocate through multiple, non
1191  * contiguous memory and combine pages when actual usage
1192  *
1193  * Return: None
1194  */
1195 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1196 			       struct qdf_mem_multi_page_t *pages,
1197 			       size_t element_size, uint16_t element_num,
1198 			       qdf_dma_context_t memctxt, bool cacheable)
1199 {
1200 	uint16_t page_idx;
1201 	struct qdf_mem_dma_page_t *dma_pages;
1202 	void **cacheable_pages = NULL;
1203 	uint16_t i;
1204 
1205 	pages->num_element_per_page = PAGE_SIZE / element_size;
1206 	if (!pages->num_element_per_page) {
1207 		qdf_print("Invalid page %d or element size %d",
1208 			  (int)PAGE_SIZE, (int)element_size);
1209 		goto out_fail;
1210 	}
1211 
1212 	pages->num_pages = element_num / pages->num_element_per_page;
1213 	if (element_num % pages->num_element_per_page)
1214 		pages->num_pages++;
1215 
1216 	if (cacheable) {
1217 		/* Pages information storage */
1218 		pages->cacheable_pages = qdf_mem_malloc(
1219 			pages->num_pages * sizeof(pages->cacheable_pages));
1220 		if (!pages->cacheable_pages) {
1221 			qdf_print("Cacheable page storage alloc fail");
1222 			goto out_fail;
1223 		}
1224 
1225 		cacheable_pages = pages->cacheable_pages;
1226 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1227 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1228 			if (!cacheable_pages[page_idx]) {
1229 				qdf_print("cacheable page alloc fail, pi %d",
1230 					  page_idx);
1231 				goto page_alloc_fail;
1232 			}
1233 		}
1234 		pages->dma_pages = NULL;
1235 	} else {
1236 		pages->dma_pages = qdf_mem_malloc(
1237 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1238 		if (!pages->dma_pages) {
1239 			qdf_print("dmaable page storage alloc fail");
1240 			goto out_fail;
1241 		}
1242 
1243 		dma_pages = pages->dma_pages;
1244 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1245 			dma_pages->page_v_addr_start =
1246 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1247 					 PAGE_SIZE,
1248 					&dma_pages->page_p_addr);
1249 			if (!dma_pages->page_v_addr_start) {
1250 				qdf_print("dmaable page alloc fail pi %d",
1251 					page_idx);
1252 				goto page_alloc_fail;
1253 			}
1254 			dma_pages->page_v_addr_end =
1255 				dma_pages->page_v_addr_start + PAGE_SIZE;
1256 			dma_pages++;
1257 		}
1258 		pages->cacheable_pages = NULL;
1259 	}
1260 	return;
1261 
1262 page_alloc_fail:
1263 	if (cacheable) {
1264 		for (i = 0; i < page_idx; i++)
1265 			qdf_mem_free(pages->cacheable_pages[i]);
1266 		qdf_mem_free(pages->cacheable_pages);
1267 	} else {
1268 		dma_pages = pages->dma_pages;
1269 		for (i = 0; i < page_idx; i++) {
1270 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1271 				dma_pages->page_v_addr_start,
1272 				dma_pages->page_p_addr, memctxt);
1273 			dma_pages++;
1274 		}
1275 		qdf_mem_free(pages->dma_pages);
1276 	}
1277 
1278 out_fail:
1279 	pages->cacheable_pages = NULL;
1280 	pages->dma_pages = NULL;
1281 	pages->num_pages = 0;
1282 	return;
1283 }
1284 EXPORT_SYMBOL(qdf_mem_multi_pages_alloc);
1285 
1286 /**
1287  * qdf_mem_multi_pages_free() - free large size of kernel memory
1288  * @osdev: OS device handle pointer
1289  * @pages: Multi page information storage
1290  * @memctxt: Memory context
1291  * @cacheable: Coherent memory or cacheable memory
1292  *
1293  * This function will free large size of memory over multiple pages.
1294  *
1295  * Return: None
1296  */
1297 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1298 			      struct qdf_mem_multi_page_t *pages,
1299 			      qdf_dma_context_t memctxt, bool cacheable)
1300 {
1301 	unsigned int page_idx;
1302 	struct qdf_mem_dma_page_t *dma_pages;
1303 
1304 	if (cacheable) {
1305 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1306 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1307 		qdf_mem_free(pages->cacheable_pages);
1308 	} else {
1309 		dma_pages = pages->dma_pages;
1310 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1311 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1312 				dma_pages->page_v_addr_start,
1313 				dma_pages->page_p_addr, memctxt);
1314 			dma_pages++;
1315 		}
1316 		qdf_mem_free(pages->dma_pages);
1317 	}
1318 
1319 	pages->cacheable_pages = NULL;
1320 	pages->dma_pages = NULL;
1321 	pages->num_pages = 0;
1322 	return;
1323 }
1324 EXPORT_SYMBOL(qdf_mem_multi_pages_free);
1325 
1326 /**
1327  * qdf_mem_multi_page_link() - Make links for multi page elements
1328  * @osdev: OS device handle pointer
1329  * @pages: Multi page information storage
1330  * @elem_size: Single element size
1331  * @elem_count: elements count should be linked
1332  * @cacheable: Coherent memory or cacheable memory
1333  *
1334  * This function will make links for multi page allocated structure
1335  *
1336  * Return: 0 success
1337  */
1338 int qdf_mem_multi_page_link(qdf_device_t osdev,
1339 		struct qdf_mem_multi_page_t *pages,
1340 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1341 {
1342 	uint16_t i, i_int;
1343 	void *page_info;
1344 	void **c_elem = NULL;
1345 	uint32_t num_link = 0;
1346 
1347 	for (i = 0; i < pages->num_pages; i++) {
1348 		if (cacheable)
1349 			page_info = pages->cacheable_pages[i];
1350 		else
1351 			page_info = pages->dma_pages[i].page_v_addr_start;
1352 
1353 		if (!page_info)
1354 			return -ENOMEM;
1355 
1356 		c_elem = (void **)page_info;
1357 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1358 			if (i_int == (pages->num_element_per_page - 1)) {
1359 				if (cacheable)
1360 					*c_elem = pages->
1361 						cacheable_pages[i + 1];
1362 				else
1363 					*c_elem = pages->
1364 						dma_pages[i + 1].
1365 							page_v_addr_start;
1366 				num_link++;
1367 				break;
1368 			} else {
1369 				*c_elem =
1370 					(void *)(((char *)c_elem) + elem_size);
1371 			}
1372 			num_link++;
1373 			c_elem = (void **)*c_elem;
1374 
1375 			/* Last link established exit */
1376 			if (num_link == (elem_count - 1))
1377 				break;
1378 		}
1379 	}
1380 
1381 	if (c_elem)
1382 		*c_elem = NULL;
1383 
1384 	return 0;
1385 }
1386 EXPORT_SYMBOL(qdf_mem_multi_page_link);
1387 
1388 /**
1389  * qdf_mem_copy() - copy memory
1390  * @dst_addr: Pointer to destination memory location (to copy to)
1391  * @src_addr: Pointer to source memory location (to copy from)
1392  * @num_bytes: Number of bytes to copy.
1393  *
1394  * Copy host memory from one location to another, similar to memcpy in
1395  * standard C.  Note this function does not specifically handle overlapping
1396  * source and destination memory locations.  Calling this function with
1397  * overlapping source and destination memory locations will result in
1398  * unpredictable results.  Use qdf_mem_move() if the memory locations
1399  * for the source and destination are overlapping (or could be overlapping!)
1400  *
1401  * Return: none
1402  */
1403 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1404 {
1405 	if (0 == num_bytes) {
1406 		/* special case where dst_addr or src_addr can be NULL */
1407 		return;
1408 	}
1409 
1410 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1411 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1412 			  "%s called with NULL parameter, source:%pK destination:%pK",
1413 			  __func__, src_addr, dst_addr);
1414 		QDF_ASSERT(0);
1415 		return;
1416 	}
1417 	memcpy(dst_addr, src_addr, num_bytes);
1418 }
1419 EXPORT_SYMBOL(qdf_mem_copy);
1420 
1421 /**
1422  * qdf_mem_zero() - zero out memory
1423  * @ptr: pointer to memory that will be set to zero
1424  * @num_bytes: number of bytes zero
1425  *
1426  * This function sets the memory location to all zeros, essentially clearing
1427  * the memory.
1428  *
1429  * Return: None
1430  */
1431 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1432 {
1433 	if (0 == num_bytes) {
1434 		/* special case where ptr can be NULL */
1435 		return;
1436 	}
1437 
1438 	if (ptr == NULL) {
1439 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1440 			  "%s called with NULL parameter ptr", __func__);
1441 		return;
1442 	}
1443 	memset(ptr, 0, num_bytes);
1444 }
1445 EXPORT_SYMBOL(qdf_mem_zero);
1446 
1447 /**
1448  * qdf_mem_set() - set (fill) memory with a specified byte value.
1449  * @ptr: Pointer to memory that will be set
1450  * @num_bytes: Number of bytes to be set
1451  * @value: Byte set in memory
1452  *
1453  * Return: None
1454  */
1455 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1456 {
1457 	if (ptr == NULL) {
1458 		qdf_print("%s called with NULL parameter ptr", __func__);
1459 		return;
1460 	}
1461 	memset(ptr, value, num_bytes);
1462 }
1463 EXPORT_SYMBOL(qdf_mem_set);
1464 
1465 /**
1466  * qdf_mem_move() - move memory
1467  * @dst_addr: pointer to destination memory location (to move to)
1468  * @src_addr: pointer to source memory location (to move from)
1469  * @num_bytes: number of bytes to move.
1470  *
1471  * Move host memory from one location to another, similar to memmove in
1472  * standard C.  Note this function *does* handle overlapping
1473  * source and destination memory locations.
1474 
1475  * Return: None
1476  */
1477 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1478 {
1479 	if (0 == num_bytes) {
1480 		/* special case where dst_addr or src_addr can be NULL */
1481 		return;
1482 	}
1483 
1484 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1485 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1486 			  "%s called with NULL parameter, source:%pK destination:%pK",
1487 			  __func__, src_addr, dst_addr);
1488 		QDF_ASSERT(0);
1489 		return;
1490 	}
1491 	memmove(dst_addr, src_addr, num_bytes);
1492 }
1493 EXPORT_SYMBOL(qdf_mem_move);
1494 
1495 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1496 /**
1497  * qdf_mem_alloc_consistent() - allocates consistent qdf memory
1498  * @osdev: OS device handle
1499  * @dev: Pointer to device handle
1500  * @size: Size to be allocated
1501  * @phy_addr: Physical address
1502  *
1503  * Return: pointer of allocated memory or null if memory alloc fails
1504  */
1505 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1506 			       qdf_dma_addr_t *phy_addr)
1507 {
1508 	void *vaddr;
1509 
1510 	vaddr = qdf_mem_malloc(size);
1511 	*phy_addr = ((uintptr_t) vaddr);
1512 	/* using this type conversion to suppress "cast from pointer to integer
1513 	 * of different size" warning on some platforms
1514 	 */
1515 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1516 	if (vaddr)
1517 		qdf_mem_dma_inc(ksize(vaddr));
1518 	return vaddr;
1519 }
1520 
1521 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1522 #define QCA8074_RAM_BASE 0x50000000
1523 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1524 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1525 			       qdf_dma_addr_t *phy_addr)
1526 {
1527 	void *vaddr = NULL;
1528 	int i;
1529 
1530 	*phy_addr = 0;
1531 
1532 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
1533 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
1534 					   qdf_mem_malloc_flags());
1535 
1536 		if (!vaddr) {
1537 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1538 			return NULL;
1539 		}
1540 
1541 		if (*phy_addr >= QCA8074_RAM_BASE)
1542 			return vaddr;
1543 
1544 		dma_free_coherent(dev, size, vaddr, *phy_addr);
1545 	}
1546 
1547 	return NULL;
1548 }
1549 #else
1550 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1551 			       qdf_dma_addr_t *phy_addr)
1552 {
1553 	void *ptr;
1554 
1555 	ptr = dma_alloc_coherent(dev, size, phy_addr, qdf_mem_malloc_flags());
1556 	if (!ptr) {
1557 		qdf_warn("Warning: unable to alloc consistent memory of size %zu!\n",
1558 			 size);
1559 		return NULL;
1560 	}
1561 
1562 	qdf_mem_dma_inc(size);
1563 
1564 	return ptr;
1565 }
1566 
1567 #endif
1568 EXPORT_SYMBOL(qdf_mem_alloc_consistent);
1569 
1570 #if defined(A_SIMOS_DEVHOST) ||  defined(HIF_SDIO) || defined(HIF_USB)
1571 /**
1572  * qdf_mem_free_consistent() - free consistent qdf memory
1573  * @osdev: OS device handle
1574  * @size: Size to be allocated
1575  * @vaddr: virtual address
1576  * @phy_addr: Physical address
1577  * @mctx: Pointer to DMA context
1578  *
1579  * Return: none
1580  */
1581 inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1582 				    qdf_size_t size, void *vaddr,
1583 				    qdf_dma_addr_t phy_addr,
1584 				    qdf_dma_context_t memctx)
1585 {
1586 	qdf_mem_dma_dec(ksize(vaddr));
1587 	qdf_mem_free(vaddr);
1588 	return;
1589 }
1590 
1591 #else
1592 inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1593 				    qdf_size_t size, void *vaddr,
1594 				    qdf_dma_addr_t phy_addr,
1595 				    qdf_dma_context_t memctx)
1596 {
1597 	dma_free_coherent(dev, size, vaddr, phy_addr);
1598 	qdf_mem_dma_dec(size);
1599 }
1600 
1601 #endif
1602 EXPORT_SYMBOL(qdf_mem_free_consistent);
1603 
1604 /**
1605  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1606  * @osdev: OS device handle
1607  * @bus_addr: dma address to give to the device
1608  * @size: Size of the memory block
1609  * @direction: direction data will be DMAed
1610  *
1611  * Assign memory to the remote device.
1612  * The cache lines are flushed to ram or invalidated as needed.
1613  *
1614  * Return: none
1615  */
1616 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1617 					qdf_dma_addr_t bus_addr,
1618 					qdf_size_t size,
1619 					enum dma_data_direction direction)
1620 {
1621 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1622 }
1623 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
1624 
1625 /**
1626  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1627  * @osdev: OS device handle
1628  * @bus_addr: dma address to give to the cpu
1629  * @size: Size of the memory block
1630  * @direction: direction data will be DMAed
1631  *
1632  * Assign memory to the CPU.
1633  *
1634  * Return: none
1635  */
1636 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1637 				     qdf_dma_addr_t bus_addr,
1638 				     qdf_size_t size,
1639 				     enum dma_data_direction direction)
1640 {
1641 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1642 }
1643 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_cpu);
1644 
1645 void qdf_mem_init(void)
1646 {
1647 	qdf_mem_debug_init();
1648 	qdf_mem_debugfs_init();
1649 	qdf_mem_debug_debugfs_init();
1650 }
1651 EXPORT_SYMBOL(qdf_mem_init);
1652 
1653 void qdf_mem_exit(void)
1654 {
1655 	qdf_mem_debug_debugfs_exit();
1656 	qdf_mem_debugfs_exit();
1657 	qdf_mem_debug_exit();
1658 }
1659 EXPORT_SYMBOL(qdf_mem_exit);
1660 
1661 /**
1662  * qdf_ether_addr_copy() - copy an Ethernet address
1663  *
1664  * @dst_addr: A six-byte array Ethernet address destination
1665  * @src_addr: A six-byte array Ethernet address source
1666  *
1667  * Please note: dst & src must both be aligned to u16.
1668  *
1669  * Return: none
1670  */
1671 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
1672 {
1673 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1674 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1675 			  "%s called with NULL parameter, source:%pK destination:%pK",
1676 			  __func__, src_addr, dst_addr);
1677 		QDF_ASSERT(0);
1678 		return;
1679 	}
1680 	ether_addr_copy(dst_addr, src_addr);
1681 }
1682 EXPORT_SYMBOL(qdf_ether_addr_copy);
1683 
1684