xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision e1d3d092f61a07549ab97f6f1f0c86554e0c642f)
1 /*
2  * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 /**
29  * DOC: qdf_mem
30  * This file provides OS dependent memory management APIs
31  */
32 
33 #include "qdf_debugfs.h"
34 #include "qdf_mem.h"
35 #include "qdf_nbuf.h"
36 #include "qdf_lock.h"
37 #include "qdf_mc_timer.h"
38 #include "qdf_module.h"
39 #include <qdf_trace.h>
40 #include "qdf_atomic.h"
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <linux/string.h>
44 
45 #ifdef CONFIG_MCL
46 #include <host_diag_core_event.h>
47 #else
48 #define host_log_low_resource_failure(code) do {} while (0)
49 #endif
50 
51 #if defined(CONFIG_CNSS)
52 #include <net/cnss.h>
53 #endif
54 
55 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
56 #include <net/cnss_prealloc.h>
57 #endif
58 
59 #ifdef MEMORY_DEBUG
60 #include <qdf_list.h>
61 qdf_list_t qdf_mem_list;
62 qdf_spinlock_t qdf_mem_list_lock;
63 
64 static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
65 					0x67, 0x68 };
66 static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
67 					0x86, 0x87 };
68 
69 /**
70  * struct s_qdf_mem_struct - memory object to dubug
71  * @node:	node to the list
72  * @filename:	name of file
73  * @line_num:	line number
74  * @size:	size of the file
75  * @header:	array that contains header
76  * @in_use:	memory usage count
77  */
78 struct s_qdf_mem_struct {
79 	qdf_list_node_t node;
80 	char *file_name;
81 	unsigned int line_num;
82 	unsigned int size;
83 	uint8_t header[8];
84 	qdf_atomic_t in_use;
85 };
86 #endif /* MEMORY_DEBUG */
87 
88 /* Preprocessor Definitions and Constants */
89 #define QDF_GET_MEMORY_TIME_THRESHOLD 300
90 
91 
92 u_int8_t prealloc_disabled = 1;
93 qdf_declare_param(prealloc_disabled, byte);
94 EXPORT_SYMBOL(prealloc_disabled);
95 
96 #if defined WLAN_DEBUGFS && defined MEMORY_DEBUG
97 
98 /**
99  * struct __qdf_mem_stat - qdf memory statistics
100  * @kmalloc:	total kmalloc allocations
101  * @dma:	total dma allocations
102  */
103 static struct __qdf_mem_stat {
104 	qdf_atomic_t kmalloc;
105 	qdf_atomic_t dma;
106 } qdf_mem_stat;
107 
108 
109 /**
110  * struct __qdf_mem_info - memory statistics
111  * @file_name:	the file which allocated memory
112  * @line_num:	the line at which allocation happened
113  * @size:	the size of allocation
114  * @count:	how many allocations of same type
115  *
116  */
117 struct __qdf_mem_info {
118 	char *file_name;
119 	unsigned int line_num;
120 	unsigned int size;
121 	unsigned int count;
122 };
123 
124 /* Debugfs root directory for qdf_mem */
125 static struct dentry *qdf_mem_debugfs_root;
126 
127 /*
128  * A table to identify duplicates in close proximity. The table depth defines
129  * the proximity scope. A deeper table takes more time. Chose any optimum value.
130  *
131  */
132 #define QDF_MEM_STAT_TABLE_SIZE 4
133 static struct __qdf_mem_info qdf_mem_info_table[QDF_MEM_STAT_TABLE_SIZE];
134 
135 static inline void qdf_mem_kmalloc_inc(qdf_size_t size)
136 {
137 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
138 }
139 
140 static inline void qdf_mem_dma_inc(qdf_size_t size)
141 {
142 	qdf_atomic_add(size, &qdf_mem_stat.dma);
143 }
144 
145 static inline void qdf_mem_kmalloc_dec(qdf_size_t size)
146 {
147 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
148 }
149 
150 static inline void qdf_mem_dma_dec(qdf_size_t size)
151 {
152 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
153 }
154 
155 
156 /**
157  * qdf_mem_info_table_init() - initialize the stat table
158  *
159  * Return: None
160  */
161 static void qdf_mem_info_table_init(void)
162 {
163 	memset(&qdf_mem_info_table, 0, sizeof(qdf_mem_info_table));
164 }
165 
166 /**
167  * qdf_mem_get_node() - increase the node usage count
168  * @n:	node
169  *
170  * An increased usage count will block the memory from getting released.
171  * Initially the usage count is incremented from qdf_mem_malloc_debug().
172  * Corresponding qdf_mem_free() will decrement the reference count and frees up
173  * the memory when the usage count reaches zero. Here decrement and test is an
174  * atomic operation in qdf_mem_free() to avoid any race condition.
175  *
176  * If a caller wants to take the ownership of an allocated memory, it can call
177  * this function with the associated node.
178  *
179  * Return: None
180  *
181  */
182 static void qdf_mem_get_node(qdf_list_node_t *n)
183 {
184 	struct s_qdf_mem_struct *m = container_of(n, typeof(*m), node);
185 
186 	qdf_atomic_inc(&m->in_use);
187 }
188 
189 /**
190  * qdf_mem_put_node_free() - decrease the node usage count and free memory
191  * @n:	node
192  *
193  * Additionally it releases the memory when the usage count reaches zero. Usage
194  * count is decremented and tested against zero in qdf_mem_free(). If the count
195  * is 0, the node and associated memory gets freed.
196  *
197  * Return: None
198  *
199  */
200 static void qdf_mem_put_node_free(qdf_list_node_t *n)
201 {
202 	struct s_qdf_mem_struct *m = container_of(n, typeof(*m), node);
203 
204 	/* qdf_mem_free() is expecting the same address returned by
205 	 * qdf_mem_malloc_debug(), which is 'm + sizeof(s_qdf_mem_struct)' */
206 	qdf_mem_free(m + 1);
207 }
208 
209 /**
210  * qdf_mem_get_first() - get the first node.
211  *
212  * Return: node
213  */
214 static qdf_list_node_t *qdf_mem_get_first(void)
215 {
216 	QDF_STATUS status;
217 	qdf_list_node_t *node = NULL;
218 
219 	qdf_spin_lock_bh(&qdf_mem_list_lock);
220 	status = qdf_list_peek_front(&qdf_mem_list, &node);
221 	if (QDF_STATUS_SUCCESS == status)
222 		qdf_mem_get_node(node);
223 	qdf_spin_unlock_bh(&qdf_mem_list_lock);
224 
225 	return node;
226 }
227 
228 /**
229  * qdf_mem_get_next() - get the next node
230  * @n: node
231  *
232  * Return: next node
233  */
234 static qdf_list_node_t *qdf_mem_get_next(qdf_list_node_t *n)
235 {
236 	QDF_STATUS status;
237 	qdf_list_node_t *node = NULL;
238 
239 	qdf_spin_lock_bh(&qdf_mem_list_lock);
240 	status = qdf_list_peek_next(&qdf_mem_list, n, &node);
241 	if (QDF_STATUS_SUCCESS == status)
242 		qdf_mem_get_node(node);
243 
244 	qdf_spin_unlock_bh(&qdf_mem_list_lock);
245 
246 	qdf_mem_put_node_free(n);
247 
248 	return node;
249 
250 }
251 
252 static void qdf_mem_seq_print_header(struct seq_file *seq)
253 {
254 	seq_puts(seq, "\n");
255 	seq_puts(seq, "filename                             line         size x    no  [ total ]\n");
256 	seq_puts(seq, "\n");
257 }
258 
259 /**
260  * qdf_mem_info_table_insert() - insert node into an array
261  * @n:	node
262  *
263  * Return:
264  *	true  - success
265  *	false - failure
266  */
267 static bool qdf_mem_info_table_insert(qdf_list_node_t *n)
268 {
269 	int i;
270 	struct __qdf_mem_info *t = qdf_mem_info_table;
271 	bool dup;
272 	bool consumed;
273 	struct s_qdf_mem_struct *m = (struct s_qdf_mem_struct *)n;
274 
275 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
276 		if (!t[i].count) {
277 			t[i].file_name = m->file_name;
278 			t[i].line_num = m->line_num;
279 			t[i].size = m->size;
280 			t[i].count++;
281 			break;
282 		}
283 		dup = !strcmp(t[i].file_name, m->file_name) &&
284 		      (t[i].line_num == m->line_num) &&
285 		      (t[i].size == m->size);
286 		if (dup) {
287 			t[i].count++;
288 			break;
289 		}
290 	}
291 
292 	consumed = (i < QDF_MEM_STAT_TABLE_SIZE);
293 
294 	return consumed;
295 }
296 
297 /**
298  * qdf_mem_seq_print() - print the table using seq_printf
299  * @seq:	seq_file handle
300  *
301  * Node table will be cleared once printed.
302  *
303  * Return: None
304  */
305 static void qdf_mem_seq_print(struct seq_file *seq)
306 {
307 	int i;
308 	struct __qdf_mem_info *t = qdf_mem_info_table;
309 
310 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE && t[i].count; i++) {
311 		seq_printf(seq,
312 			   "%-35s%6d\t%6d x %4d\t[%7d]\n",
313 			   kbasename(t[i].file_name),
314 			   t[i].line_num, t[i].size,
315 			   t[i].count,
316 			   t[i].size * t[i].count);
317 	}
318 
319 	qdf_mem_info_table_init();
320 }
321 
322 /**
323  * qdf_mem_seq_start() - sequential callback to start
324  * @seq: seq_file handle
325  * @pos: The start position of the sequence
326  *
327  * Return:
328  *	SEQ_START_TOKEN - Prints header
329  *	None zero value - Node
330  *	NULL		- End of the sequence
331  */
332 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
333 {
334 	if (*pos == 0) {
335 		qdf_mem_info_table_init();
336 		return SEQ_START_TOKEN;
337 	} else if (seq->private) {
338 		return qdf_mem_get_next(seq->private);
339 	}
340 
341 	return NULL;
342 }
343 
344 /**
345  * qdf_mem_seq_next() - next sequential callback
346  * @seq:	seq_file handle
347  * @v:		the current iterator
348  * @pos:	the current position [not used]
349  *
350  * Get the next node and release previous node.
351  *
352  * Return:
353  *	None zero value - Next node
354  *	NULL		- No more to process in the list
355  */
356 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
357 {
358 	qdf_list_node_t *node;
359 
360 	++*pos;
361 
362 	if (v == SEQ_START_TOKEN)
363 		node = qdf_mem_get_first();
364 	else
365 		node = qdf_mem_get_next(v);
366 
367 	return node;
368 }
369 
370 /**
371  * qdf_mem_seq_stop() - stop sequential callback
372  * @seq:	seq_file handle
373  * @v:		current iterator
374  *
375  * Return:	None
376  */
377 static void qdf_mem_seq_stop(struct seq_file *seq, void *v)
378 {
379 	seq->private = v;
380 }
381 
382 /**
383  * qdf_mem_seq_show() - print sequential callback
384  * @seq:	seq_file handle
385  * @v:		current iterator
386  *
387  * Return: 0 - success
388  */
389 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
390 {
391 
392 	if (v == SEQ_START_TOKEN) {
393 		qdf_mem_seq_print_header(seq);
394 		return 0;
395 	}
396 
397 	while (!qdf_mem_info_table_insert(v))
398 		qdf_mem_seq_print(seq);
399 
400 	return 0;
401 }
402 
403 /* sequential file operation table */
404 static const struct seq_operations qdf_mem_seq_ops = {
405 	.start = qdf_mem_seq_start,
406 	.next  = qdf_mem_seq_next,
407 	.stop  = qdf_mem_seq_stop,
408 	.show  = qdf_mem_seq_show,
409 };
410 
411 
412 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
413 {
414 	return seq_open(file, &qdf_mem_seq_ops);
415 }
416 
417 /* debugfs file operation table */
418 static const struct file_operations fops_qdf_mem_debugfs = {
419 	.owner = THIS_MODULE,
420 	.open = qdf_mem_debugfs_open,
421 	.read = seq_read,
422 	.llseek = seq_lseek,
423 	.release = seq_release,
424 };
425 
426 /**
427  * qdf_mem_debugfs_init() - initialize routine
428  *
429  * Return: QDF_STATUS
430  */
431 static QDF_STATUS qdf_mem_debugfs_init(void)
432 {
433 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
434 
435 	if (!qdf_debugfs_root)
436 		return QDF_STATUS_E_FAILURE;
437 
438 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
439 
440 	if (!qdf_mem_debugfs_root)
441 		return QDF_STATUS_E_FAILURE;
442 
443 	debugfs_create_file("list",
444 			    S_IRUSR | S_IWUSR,
445 			    qdf_mem_debugfs_root,
446 			    NULL,
447 			    &fops_qdf_mem_debugfs);
448 
449 	debugfs_create_atomic_t("kmalloc",
450 				S_IRUSR | S_IWUSR,
451 				qdf_mem_debugfs_root,
452 				&qdf_mem_stat.kmalloc);
453 
454 	debugfs_create_atomic_t("dma",
455 				S_IRUSR | S_IWUSR,
456 				qdf_mem_debugfs_root,
457 				&qdf_mem_stat.dma);
458 
459 	return QDF_STATUS_SUCCESS;
460 }
461 
462 
463 /**
464  * qdf_mem_debugfs_exit() - cleanup routine
465  *
466  * Return: None
467  */
468 static void qdf_mem_debugfs_exit(void)
469 {
470 	debugfs_remove_recursive(qdf_mem_debugfs_root);
471 	qdf_mem_debugfs_root = NULL;
472 }
473 
474 #else /* WLAN_DEBUGFS && MEMORY_DEBUG */
475 
476 static inline void qdf_mem_kmalloc_inc(qdf_size_t size) {}
477 static inline void qdf_mem_dma_inc(qdf_size_t size) {}
478 static inline void qdf_mem_kmalloc_dec(qdf_size_t size) {}
479 static inline void qdf_mem_dma_dec(qdf_size_t size) {}
480 
481 #ifdef MEMORY_DEBUG
482 
483 static QDF_STATUS qdf_mem_debugfs_init(void)
484 {
485 	return QDF_STATUS_E_NOSUPPORT;
486 }
487 static void qdf_mem_debugfs_exit(void) {}
488 
489 #endif
490 
491 #endif /* WLAN_DEBUGFS && MEMORY_DEBUG */
492 
493 /**
494  * __qdf_mempool_init() - Create and initialize memory pool
495  *
496  * @osdev: platform device object
497  * @pool_addr: address of the pool created
498  * @elem_cnt: no. of elements in pool
499  * @elem_size: size of each pool element in bytes
500  * @flags: flags
501  *
502  * return: Handle to memory pool or NULL if allocation failed
503  */
504 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
505 		       int elem_cnt, size_t elem_size, u_int32_t flags)
506 {
507 	__qdf_mempool_ctxt_t *new_pool = NULL;
508 	u_int32_t align = L1_CACHE_BYTES;
509 	unsigned long aligned_pool_mem;
510 	int pool_id;
511 	int i;
512 
513 	if (prealloc_disabled) {
514 		/* TBD: We can maintain a list of pools in qdf_device_t
515 		 * to help debugging
516 		 * when pre-allocation is not enabled
517 		 */
518 		new_pool = (__qdf_mempool_ctxt_t *)
519 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
520 		if (new_pool == NULL)
521 			return QDF_STATUS_E_NOMEM;
522 
523 		memset(new_pool, 0, sizeof(*new_pool));
524 		/* TBD: define flags for zeroing buffers etc */
525 		new_pool->flags = flags;
526 		new_pool->elem_size = elem_size;
527 		new_pool->max_elem = elem_cnt;
528 		*pool_addr = new_pool;
529 		return 0;
530 	}
531 
532 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
533 		if (osdev->mem_pool[pool_id] == NULL)
534 			break;
535 	}
536 
537 	if (pool_id == MAX_MEM_POOLS)
538 		return -ENOMEM;
539 
540 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
541 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
542 	if (new_pool == NULL)
543 		return -ENOMEM;
544 
545 	memset(new_pool, 0, sizeof(*new_pool));
546 	/* TBD: define flags for zeroing buffers etc */
547 	new_pool->flags = flags;
548 	new_pool->pool_id = pool_id;
549 
550 	/* Round up the element size to cacheline */
551 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
552 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
553 				((align)?(align - 1):0);
554 
555 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
556 	if (new_pool->pool_mem == NULL) {
557 			/* TBD: Check if we need get_free_pages above */
558 		kfree(new_pool);
559 		osdev->mem_pool[pool_id] = NULL;
560 		return -ENOMEM;
561 	}
562 
563 	spin_lock_init(&new_pool->lock);
564 
565 	/* Initialize free list */
566 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
567 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
568 	STAILQ_INIT(&new_pool->free_list);
569 
570 	for (i = 0; i < elem_cnt; i++)
571 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
572 			(mempool_elem_t *)(aligned_pool_mem +
573 			(new_pool->elem_size * i)), mempool_entry);
574 
575 
576 	new_pool->free_cnt = elem_cnt;
577 	*pool_addr = new_pool;
578 	return 0;
579 }
580 EXPORT_SYMBOL(__qdf_mempool_init);
581 
582 /**
583  * __qdf_mempool_destroy() - Destroy memory pool
584  * @osdev: platform device object
585  * @Handle: to memory pool
586  *
587  * Returns: none
588  */
589 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
590 {
591 	int pool_id = 0;
592 
593 	if (!pool)
594 		return;
595 
596 	if (prealloc_disabled) {
597 		kfree(pool);
598 		return;
599 	}
600 
601 	pool_id = pool->pool_id;
602 
603 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
604 	kfree(pool->pool_mem);
605 	kfree(pool);
606 	osdev->mem_pool[pool_id] = NULL;
607 }
608 EXPORT_SYMBOL(__qdf_mempool_destroy);
609 
610 /**
611  * __qdf_mempool_alloc() - Allocate an element memory pool
612  *
613  * @osdev: platform device object
614  * @Handle: to memory pool
615  *
616  * Return: Pointer to the allocated element or NULL if the pool is empty
617  */
618 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
619 {
620 	void *buf = NULL;
621 
622 	if (!pool)
623 		return NULL;
624 
625 	if (prealloc_disabled)
626 		return  qdf_mem_malloc(pool->elem_size);
627 
628 	spin_lock_bh(&pool->lock);
629 
630 	buf = STAILQ_FIRST(&pool->free_list);
631 	if (buf != NULL) {
632 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
633 		pool->free_cnt--;
634 	}
635 
636 	/* TBD: Update free count if debug is enabled */
637 	spin_unlock_bh(&pool->lock);
638 
639 	return buf;
640 }
641 EXPORT_SYMBOL(__qdf_mempool_alloc);
642 
643 /**
644  * __qdf_mempool_free() - Free a memory pool element
645  * @osdev: Platform device object
646  * @pool: Handle to memory pool
647  * @buf: Element to be freed
648  *
649  * Returns: none
650  */
651 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
652 {
653 	if (!pool)
654 		return;
655 
656 
657 	if (prealloc_disabled)
658 		return qdf_mem_free(buf);
659 
660 	spin_lock_bh(&pool->lock);
661 	pool->free_cnt++;
662 
663 	STAILQ_INSERT_TAIL
664 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
665 	spin_unlock_bh(&pool->lock);
666 }
667 EXPORT_SYMBOL(__qdf_mempool_free);
668 
669 /**
670  * qdf_mem_alloc_outline() - allocation QDF memory
671  * @osdev: platform device object
672  * @size: Number of bytes of memory to allocate.
673  *
674  * This function will dynamicallly allocate the specified number of bytes of
675  * memory.
676  *
677  * Return:
678  * Upon successful allocate, returns a non-NULL pointer to the allocated
679  * memory.  If this function is unable to allocate the amount of memory
680  * specified (for any reason) it returns NULL.
681  */
682 void *
683 qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
684 {
685 	return qdf_mem_malloc(size);
686 }
687 EXPORT_SYMBOL(qdf_mem_alloc_outline);
688 
689 /**
690  * qdf_mem_free_outline() - QDF memory free API
691  * @ptr: Pointer to the starting address of the memory to be free'd.
692  *
693  * This function will free the memory pointed to by 'ptr'. It also checks
694  * is memory is corrupted or getting double freed and panic.
695  *
696  * Return: none
697  */
698 void
699 qdf_mem_free_outline(void *buf)
700 {
701 	qdf_mem_free(buf);
702 }
703 EXPORT_SYMBOL(qdf_mem_free_outline);
704 
705 /**
706  * qdf_mem_zero_outline() - zero out memory
707  * @buf: pointer to memory that will be set to zero
708  * @size: number of bytes zero
709  *
710  * This function sets the memory location to all zeros, essentially clearing
711  * the memory.
712  *
713  * Return: none
714  */
715 void
716 qdf_mem_zero_outline(void *buf, qdf_size_t size)
717 {
718 	qdf_mem_zero(buf, size);
719 }
720 EXPORT_SYMBOL(qdf_mem_zero_outline);
721 
722 #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
723 /**
724  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
725  * @size: the number of bytes to allocate
726  *
727  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
728  * a chunk of pre-allocated memory. If size if less than or equal to
729  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
730  *
731  * Return: NULL on failure, non-NULL on success
732  */
733 static void *qdf_mem_prealloc_get(size_t size)
734 {
735 	void *mem;
736 
737 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
738 		return NULL;
739 
740 	mem = wcnss_prealloc_get(size);
741 	if (mem)
742 		memset(mem, 0, size);
743 
744 	return mem;
745 }
746 
747 static inline bool qdf_mem_prealloc_put(void *ptr)
748 {
749 	return wcnss_prealloc_put(ptr);
750 }
751 #else
752 static inline void *qdf_mem_prealloc_get(size_t size)
753 {
754 	return NULL;
755 }
756 
757 static inline bool qdf_mem_prealloc_put(void *ptr)
758 {
759 	return false;
760 }
761 #endif /* CONFIG_WCNSS_MEM_PRE_ALLOC */
762 
763 /* External Function implementation */
764 #ifdef MEMORY_DEBUG
765 
766 /**
767  * qdf_mem_init() - initialize qdf memory debug functionality
768  *
769  * Return: none
770  */
771 void qdf_mem_init(void)
772 {
773 	/* Initalizing the list with maximum size of 60000 */
774 	qdf_list_create(&qdf_mem_list, 60000);
775 	qdf_spinlock_create(&qdf_mem_list_lock);
776 	qdf_net_buf_debug_init();
777 	qdf_mem_debugfs_init();
778 	return;
779 }
780 EXPORT_SYMBOL(qdf_mem_init);
781 
782 /**
783  * qdf_mem_clean() - display memory leak debug info and free leaked pointers
784  *
785  * Return: none
786  */
787 void qdf_mem_clean(void)
788 {
789 	uint32_t list_size;
790 	list_size = qdf_list_size(&qdf_mem_list);
791 	if (list_size) {
792 		qdf_list_node_t *node;
793 		QDF_STATUS qdf_status;
794 
795 		struct s_qdf_mem_struct *mem_struct;
796 		char *prev_mleak_file = "";
797 		unsigned int prev_mleak_line_num = 0;
798 		unsigned int prev_mleak_sz = 0;
799 		unsigned int mleak_cnt = 0;
800 
801 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
802 			  "%s: List is not Empty. list_size %d ",
803 			  __func__, (int)list_size);
804 
805 		do {
806 			qdf_spin_lock(&qdf_mem_list_lock);
807 			qdf_status =
808 				qdf_list_remove_front(&qdf_mem_list, &node);
809 			qdf_spin_unlock(&qdf_mem_list_lock);
810 			if (QDF_STATUS_SUCCESS == qdf_status) {
811 				mem_struct = (struct s_qdf_mem_struct *)node;
812 				/* Take care to log only once multiple memory
813 				   leaks from the same place */
814 				if (strcmp(prev_mleak_file,
815 					mem_struct->file_name)
816 				    || (prev_mleak_line_num !=
817 					mem_struct->line_num)
818 				    || (prev_mleak_sz != mem_struct->size)) {
819 					if (mleak_cnt != 0) {
820 						QDF_TRACE(QDF_MODULE_ID_QDF,
821 							  QDF_TRACE_LEVEL_FATAL,
822 							  "%d Time Memory Leak@ File %s, @Line %d, size %d",
823 							  mleak_cnt,
824 							  prev_mleak_file,
825 							  prev_mleak_line_num,
826 							  prev_mleak_sz);
827 					}
828 					prev_mleak_file = mem_struct->file_name;
829 					prev_mleak_line_num =
830 						 mem_struct->line_num;
831 					prev_mleak_sz = mem_struct->size;
832 					mleak_cnt = 0;
833 				}
834 				mleak_cnt++;
835 				kfree((void *)mem_struct);
836 			}
837 		} while (qdf_status == QDF_STATUS_SUCCESS);
838 
839 		/* Print last memory leak from the module */
840 		if (mleak_cnt) {
841 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
842 				  "%d Time memory Leak@ File %s, @Line %d, size %d",
843 				  mleak_cnt, prev_mleak_file,
844 				  prev_mleak_line_num, prev_mleak_sz);
845 		}
846 #ifdef CONFIG_HALT_KMEMLEAK
847 		QDF_BUG(0);
848 #endif
849 	}
850 }
851 EXPORT_SYMBOL(qdf_mem_clean);
852 
853 /**
854  * qdf_mem_exit() - exit qdf memory debug functionality
855  *
856  * Return: none
857  */
858 void qdf_mem_exit(void)
859 {
860 	qdf_mem_debugfs_exit();
861 	qdf_net_buf_debug_exit();
862 	qdf_mem_clean();
863 	qdf_list_destroy(&qdf_mem_list);
864 }
865 EXPORT_SYMBOL(qdf_mem_exit);
866 
867 /**
868  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
869  * @size: Number of bytes of memory to allocate.
870  * @file_name: File name from which memory allocation is called
871  * @line_num: Line number from which memory allocation is called
872  *
873  * This function will dynamicallly allocate the specified number of bytes of
874  * memory and ad it in qdf tracking list to check against memory leaks and
875  * corruptions
876  *
877  * Return:
878  * Upon successful allocate, returns a non-NULL pointer to the allocated
879  * memory.  If this function is unable to allocate the amount of memory
880  * specified (for any reason) it returns %NULL.
881  */
882 void *qdf_mem_malloc_debug(size_t size,
883 			char *file_name, uint32_t line_num)
884 {
885 	struct s_qdf_mem_struct *mem_struct;
886 	void *mem_ptr = NULL;
887 	uint32_t new_size;
888 	int flags = GFP_KERNEL;
889 	unsigned long  time_before_kzalloc;
890 
891 	if (size > (1024 * 1024) || size == 0) {
892 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
893 			  "%s: called with invalid arg; passed in %zu !!!",
894 			  __func__, size);
895 		host_log_low_resource_failure(WIFI_EVENT_MEMORY_FAILURE);
896 		return NULL;
897 	}
898 
899 	mem_ptr = qdf_mem_prealloc_get(size);
900 	if (mem_ptr)
901 		return mem_ptr;
902 
903 	if (in_interrupt() || irqs_disabled() || in_atomic())
904 		flags = GFP_ATOMIC;
905 
906 	new_size = size + sizeof(struct s_qdf_mem_struct) + 8;/*TBD: what is 8*/
907 	time_before_kzalloc = qdf_mc_timer_get_system_time();
908 	mem_struct = (struct s_qdf_mem_struct *)kzalloc(new_size, flags);
909 	/**
910 	 * If time taken by kmalloc is greater than
911 	 * QDF_GET_MEMORY_TIME_THRESHOLD msec
912 	 */
913 	if (qdf_mc_timer_get_system_time() - time_before_kzalloc >=
914 					  QDF_GET_MEMORY_TIME_THRESHOLD)
915 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
916 				  "%s: kzalloc took %lu msec for size %zu called from %p_s at line %d",
917 			 __func__,
918 			 qdf_mc_timer_get_system_time() - time_before_kzalloc,
919 			 size, (void *)_RET_IP_, line_num);
920 
921 	if (mem_struct != NULL) {
922 		QDF_STATUS qdf_status;
923 
924 		mem_struct->file_name = file_name;
925 		mem_struct->line_num = line_num;
926 		mem_struct->size = size;
927 		qdf_atomic_inc(&mem_struct->in_use);
928 		qdf_mem_kmalloc_inc(size);
929 
930 		qdf_mem_copy(&mem_struct->header[0],
931 			     &WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
932 
933 		qdf_mem_copy((uint8_t *) (mem_struct + 1) + size,
934 			     &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
935 
936 		qdf_spin_lock_irqsave(&qdf_mem_list_lock);
937 		qdf_status = qdf_list_insert_front(&qdf_mem_list,
938 						   &mem_struct->node);
939 		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
940 		if (QDF_STATUS_SUCCESS != qdf_status) {
941 			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
942 				  "%s: Unable to insert node into List qdf_status %d",
943 				  __func__, qdf_status);
944 		}
945 
946 		mem_ptr = (void *)(mem_struct + 1);
947 	}
948 
949 	return mem_ptr;
950 }
951 EXPORT_SYMBOL(qdf_mem_malloc_debug);
952 
953 /**
954  * qdf_mem_validate_node_for_free() - validate that the node is in a list
955  * @qdf_node: node to check for being in a list
956  *
957  * qdf_node should be a non null value.
958  *
959  * Return: true if the node validly linked in an anchored doubly linked list
960  */
961 static bool qdf_mem_validate_node_for_free(qdf_list_node_t *qdf_node)
962 {
963 	struct list_head *node = qdf_node;
964 
965 	/*
966 	 * if the node is an empty list, it is not tied to an anchor node
967 	 * and must have been removed with list_del_init
968 	 */
969 	if (list_empty(node))
970 		return false;
971 
972 	if (node->prev == NULL)
973 		return false;
974 
975 	if (node->next == NULL)
976 		return false;
977 
978 	if (node->prev->next != node)
979 		return false;
980 
981 	if (node->next->prev != node)
982 		return false;
983 
984 	return true;
985 }
986 
987 
988 
989 /**
990  * qdf_mem_free() - QDF memory free API
991  * @ptr: Pointer to the starting address of the memory to be free'd.
992  *
993  * This function will free the memory pointed to by 'ptr'. It also checks
994  * is memory is corrupted or getting double freed and panic.
995  *
996  * Return: none
997  */
998 void qdf_mem_free(void *ptr)
999 {
1000 	struct s_qdf_mem_struct *mem_struct;
1001 
1002 	/* freeing a null pointer is valid */
1003 	if (qdf_unlikely(ptr == NULL))
1004 		return;
1005 
1006 	mem_struct = ((struct s_qdf_mem_struct *)ptr) - 1;
1007 
1008 	if (qdf_unlikely(mem_struct == NULL)) {
1009 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1010 			  "%s: null mem_struct", __func__);
1011 		QDF_BUG(0);
1012 	}
1013 
1014 	if (qdf_mem_prealloc_put(ptr))
1015 		return;
1016 
1017 	if (!qdf_atomic_dec_and_test(&mem_struct->in_use))
1018 		return;
1019 
1020 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1021 
1022 	/*
1023 	 * invalid memory access when checking the header/tailer
1024 	 * would be a use after free and would indicate a double free
1025 	 * or invalid pointer passed.
1026 	 */
1027 	if (qdf_mem_cmp(mem_struct->header, &WLAN_MEM_HEADER[0],
1028 			sizeof(WLAN_MEM_HEADER)))
1029 		goto error;
1030 
1031 	/*
1032 	 * invalid memory access while checking validate node
1033 	 * would indicate corruption in the nodes pointed to
1034 	 */
1035 	if (!qdf_mem_validate_node_for_free(&mem_struct->node))
1036 		goto error;
1037 
1038 	/*
1039 	 * invalid memory access here is unlikely and would imply
1040 	 * that the size value was corrupted/incorrect.
1041 	 * It is unlikely that the above checks would pass in a
1042 	 * double free case.
1043 	 */
1044 	if (qdf_mem_cmp((uint8_t *) ptr + mem_struct->size,
1045 			&WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL)))
1046 		goto error;
1047 
1048 	/*
1049 	 * make the node an empty list before doing the spin unlock
1050 	 * The empty list check will guarantee that we avoid a race condition.
1051 	 */
1052 	list_del_init(&mem_struct->node);
1053 	qdf_mem_list.count--;
1054 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1055 	qdf_mem_kmalloc_dec(mem_struct->size);
1056 	kfree(mem_struct);
1057 	return;
1058 
1059 error:
1060 	if (!qdf_list_has_node(&qdf_mem_list, &mem_struct->node)) {
1061 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1062 			  "%s: Unallocated memory (double free?)",
1063 			  __func__);
1064 		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1065 		QDF_BUG(0);
1066 	}
1067 
1068 	if (qdf_mem_cmp(mem_struct->header, &WLAN_MEM_HEADER[0],
1069 				sizeof(WLAN_MEM_HEADER))) {
1070 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1071 			  "Memory Header is corrupted.");
1072 		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1073 		QDF_BUG(0);
1074 	}
1075 
1076 	if (!qdf_mem_validate_node_for_free(&mem_struct->node)) {
1077 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1078 			  "Memory_struct is corrupted.");
1079 		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1080 		QDF_BUG(0);
1081 	}
1082 
1083 	if (qdf_mem_cmp((uint8_t *) ptr + mem_struct->size,
1084 			&WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL))) {
1085 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1086 			  "Memory Trailer is corrupted. mem_info: Filename %s, line_num %d",
1087 			  mem_struct->file_name, (int)mem_struct->line_num);
1088 		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1089 		QDF_BUG(0);
1090 	}
1091 
1092 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
1093 		  "%s unexpected error", __func__);
1094 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1095 	QDF_BUG(0);
1096 }
1097 EXPORT_SYMBOL(qdf_mem_free);
1098 #else
1099 
1100 /**
1101  * qdf_mem_malloc() - allocation QDF memory
1102  * @size: Number of bytes of memory to allocate.
1103  *
1104  * This function will dynamicallly allocate the specified number of bytes of
1105  * memory.
1106  *
1107  * Return:
1108  * Upon successful allocate, returns a non-NULL pointer to the allocated
1109  * memory.  If this function is unable to allocate the amount of memory
1110  * specified (for any reason) it returns NULL.
1111  */
1112 void *qdf_mem_malloc(size_t size)
1113 {
1114 	int flags = GFP_KERNEL;
1115 	void *mem;
1116 
1117 	mem = qdf_mem_prealloc_get(size);
1118 	if (mem)
1119 		return mem;
1120 
1121 	if (in_interrupt() || irqs_disabled() || in_atomic())
1122 		flags = GFP_ATOMIC;
1123 
1124 	return kzalloc(size, flags);
1125 }
1126 EXPORT_SYMBOL(qdf_mem_malloc);
1127 
1128 /**
1129  * qdf_mem_free() - free QDF memory
1130  * @ptr: Pointer to the starting address of the memory to be free'd.
1131  *
1132  * This function will free the memory pointed to by 'ptr'.
1133  *
1134  * Return: None
1135  */
1136 void qdf_mem_free(void *ptr)
1137 {
1138 	if (ptr == NULL)
1139 		return;
1140 
1141 	if (qdf_mem_prealloc_put(ptr))
1142 		return;
1143 
1144 	kfree(ptr);
1145 }
1146 EXPORT_SYMBOL(qdf_mem_free);
1147 #endif
1148 
1149 /**
1150  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
1151  * @osdev: OS device handle pointer
1152  * @pages: Multi page information storage
1153  * @element_size: Each element size
1154  * @element_num: Total number of elements should be allocated
1155  * @memctxt: Memory context
1156  * @cacheable: Coherent memory or cacheable memory
1157  *
1158  * This function will allocate large size of memory over multiple pages.
1159  * Large size of contiguous memory allocation will fail frequently, then
1160  * instead of allocate large memory by one shot, allocate through multiple, non
1161  * contiguous memory and combine pages when actual usage
1162  *
1163  * Return: None
1164  */
1165 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1166 			       struct qdf_mem_multi_page_t *pages,
1167 			       size_t element_size, uint16_t element_num,
1168 			       qdf_dma_context_t memctxt, bool cacheable)
1169 {
1170 	uint16_t page_idx;
1171 	struct qdf_mem_dma_page_t *dma_pages;
1172 	void **cacheable_pages = NULL;
1173 	uint16_t i;
1174 
1175 	pages->num_element_per_page = PAGE_SIZE / element_size;
1176 	if (!pages->num_element_per_page) {
1177 		qdf_print("Invalid page %d or element size %d",
1178 			  (int)PAGE_SIZE, (int)element_size);
1179 		goto out_fail;
1180 	}
1181 
1182 	pages->num_pages = element_num / pages->num_element_per_page;
1183 	if (element_num % pages->num_element_per_page)
1184 		pages->num_pages++;
1185 
1186 	if (cacheable) {
1187 		/* Pages information storage */
1188 		pages->cacheable_pages = qdf_mem_malloc(
1189 			pages->num_pages * sizeof(pages->cacheable_pages));
1190 		if (!pages->cacheable_pages) {
1191 			qdf_print("Cacheable page storage alloc fail");
1192 			goto out_fail;
1193 		}
1194 
1195 		cacheable_pages = pages->cacheable_pages;
1196 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1197 			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
1198 			if (!cacheable_pages[page_idx]) {
1199 				qdf_print("cacheable page alloc fail, pi %d",
1200 					  page_idx);
1201 				goto page_alloc_fail;
1202 			}
1203 		}
1204 		pages->dma_pages = NULL;
1205 	} else {
1206 		pages->dma_pages = qdf_mem_malloc(
1207 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1208 		if (!pages->dma_pages) {
1209 			qdf_print("dmaable page storage alloc fail");
1210 			goto out_fail;
1211 		}
1212 
1213 		dma_pages = pages->dma_pages;
1214 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1215 			dma_pages->page_v_addr_start =
1216 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1217 					 PAGE_SIZE,
1218 					&dma_pages->page_p_addr);
1219 			if (!dma_pages->page_v_addr_start) {
1220 				qdf_print("dmaable page alloc fail pi %d",
1221 					page_idx);
1222 				goto page_alloc_fail;
1223 			}
1224 			dma_pages->page_v_addr_end =
1225 				dma_pages->page_v_addr_start + PAGE_SIZE;
1226 			dma_pages++;
1227 		}
1228 		pages->cacheable_pages = NULL;
1229 	}
1230 	return;
1231 
1232 page_alloc_fail:
1233 	if (cacheable) {
1234 		for (i = 0; i < page_idx; i++)
1235 			qdf_mem_free(pages->cacheable_pages[i]);
1236 		qdf_mem_free(pages->cacheable_pages);
1237 	} else {
1238 		dma_pages = pages->dma_pages;
1239 		for (i = 0; i < page_idx; i++) {
1240 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1241 				dma_pages->page_v_addr_start,
1242 				dma_pages->page_p_addr, memctxt);
1243 			dma_pages++;
1244 		}
1245 		qdf_mem_free(pages->dma_pages);
1246 	}
1247 
1248 out_fail:
1249 	pages->cacheable_pages = NULL;
1250 	pages->dma_pages = NULL;
1251 	pages->num_pages = 0;
1252 	return;
1253 }
1254 EXPORT_SYMBOL(qdf_mem_multi_pages_alloc);
1255 
1256 /**
1257  * qdf_mem_multi_pages_free() - free large size of kernel memory
1258  * @osdev: OS device handle pointer
1259  * @pages: Multi page information storage
1260  * @memctxt: Memory context
1261  * @cacheable: Coherent memory or cacheable memory
1262  *
1263  * This function will free large size of memory over multiple pages.
1264  *
1265  * Return: None
1266  */
1267 void qdf_mem_multi_pages_free(qdf_device_t osdev,
1268 			      struct qdf_mem_multi_page_t *pages,
1269 			      qdf_dma_context_t memctxt, bool cacheable)
1270 {
1271 	unsigned int page_idx;
1272 	struct qdf_mem_dma_page_t *dma_pages;
1273 
1274 	if (cacheable) {
1275 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1276 			qdf_mem_free(pages->cacheable_pages[page_idx]);
1277 		qdf_mem_free(pages->cacheable_pages);
1278 	} else {
1279 		dma_pages = pages->dma_pages;
1280 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1281 			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
1282 				dma_pages->page_v_addr_start,
1283 				dma_pages->page_p_addr, memctxt);
1284 			dma_pages++;
1285 		}
1286 		qdf_mem_free(pages->dma_pages);
1287 	}
1288 
1289 	pages->cacheable_pages = NULL;
1290 	pages->dma_pages = NULL;
1291 	pages->num_pages = 0;
1292 	return;
1293 }
1294 EXPORT_SYMBOL(qdf_mem_multi_pages_free);
1295 
1296 /**
1297  * qdf_mem_multi_page_link() - Make links for multi page elements
1298  * @osdev: OS device handle pointer
1299  * @pages: Multi page information storage
1300  * @elem_size: Single element size
1301  * @elem_count: elements count should be linked
1302  * @cacheable: Coherent memory or cacheable memory
1303  *
1304  * This function will make links for multi page allocated structure
1305  *
1306  * Return: 0 success
1307  */
1308 int qdf_mem_multi_page_link(qdf_device_t osdev,
1309 		struct qdf_mem_multi_page_t *pages,
1310 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
1311 {
1312 	uint16_t i, i_int;
1313 	void *page_info;
1314 	void **c_elem = NULL;
1315 	uint32_t num_link = 0;
1316 
1317 	for (i = 0; i < pages->num_pages; i++) {
1318 		if (cacheable)
1319 			page_info = pages->cacheable_pages[i];
1320 		else
1321 			page_info = pages->dma_pages[i].page_v_addr_start;
1322 
1323 		if (!page_info)
1324 			return -ENOMEM;
1325 
1326 		c_elem = (void **)page_info;
1327 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
1328 			if (i_int == (pages->num_element_per_page - 1)) {
1329 				if (cacheable)
1330 					*c_elem = pages->
1331 						cacheable_pages[i + 1];
1332 				else
1333 					*c_elem = pages->
1334 						dma_pages[i + 1].
1335 							page_v_addr_start;
1336 				num_link++;
1337 				break;
1338 			} else {
1339 				*c_elem =
1340 					(void *)(((char *)c_elem) + elem_size);
1341 			}
1342 			num_link++;
1343 			c_elem = (void **)*c_elem;
1344 
1345 			/* Last link established exit */
1346 			if (num_link == (elem_count - 1))
1347 				break;
1348 		}
1349 	}
1350 
1351 	if (c_elem)
1352 		*c_elem = NULL;
1353 
1354 	return 0;
1355 }
1356 EXPORT_SYMBOL(qdf_mem_multi_page_link);
1357 
1358 /**
1359  * qdf_mem_copy() - copy memory
1360  * @dst_addr: Pointer to destination memory location (to copy to)
1361  * @src_addr: Pointer to source memory location (to copy from)
1362  * @num_bytes: Number of bytes to copy.
1363  *
1364  * Copy host memory from one location to another, similar to memcpy in
1365  * standard C.  Note this function does not specifically handle overlapping
1366  * source and destination memory locations.  Calling this function with
1367  * overlapping source and destination memory locations will result in
1368  * unpredictable results.  Use qdf_mem_move() if the memory locations
1369  * for the source and destination are overlapping (or could be overlapping!)
1370  *
1371  * Return: none
1372  */
1373 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1374 {
1375 	if (0 == num_bytes) {
1376 		/* special case where dst_addr or src_addr can be NULL */
1377 		return;
1378 	}
1379 
1380 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1381 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1382 			  "%s called with NULL parameter, source:%p destination:%p",
1383 			  __func__, src_addr, dst_addr);
1384 		QDF_ASSERT(0);
1385 		return;
1386 	}
1387 	memcpy(dst_addr, src_addr, num_bytes);
1388 }
1389 EXPORT_SYMBOL(qdf_mem_copy);
1390 
1391 /**
1392  * qdf_mem_zero() - zero out memory
1393  * @ptr: pointer to memory that will be set to zero
1394  * @num_bytes: number of bytes zero
1395  *
1396  * This function sets the memory location to all zeros, essentially clearing
1397  * the memory.
1398  *
1399  * Return: None
1400  */
1401 void qdf_mem_zero(void *ptr, uint32_t num_bytes)
1402 {
1403 	if (0 == num_bytes) {
1404 		/* special case where ptr can be NULL */
1405 		return;
1406 	}
1407 
1408 	if (ptr == NULL) {
1409 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1410 			  "%s called with NULL parameter ptr", __func__);
1411 		return;
1412 	}
1413 	memset(ptr, 0, num_bytes);
1414 }
1415 EXPORT_SYMBOL(qdf_mem_zero);
1416 
1417 /**
1418  * qdf_mem_set() - set (fill) memory with a specified byte value.
1419  * @ptr: Pointer to memory that will be set
1420  * @num_bytes: Number of bytes to be set
1421  * @value: Byte set in memory
1422  *
1423  * Return: None
1424  */
1425 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
1426 {
1427 	if (ptr == NULL) {
1428 		qdf_print("%s called with NULL parameter ptr", __func__);
1429 		return;
1430 	}
1431 	memset(ptr, value, num_bytes);
1432 }
1433 EXPORT_SYMBOL(qdf_mem_set);
1434 
1435 /**
1436  * qdf_mem_move() - move memory
1437  * @dst_addr: pointer to destination memory location (to move to)
1438  * @src_addr: pointer to source memory location (to move from)
1439  * @num_bytes: number of bytes to move.
1440  *
1441  * Move host memory from one location to another, similar to memmove in
1442  * standard C.  Note this function *does* handle overlapping
1443  * source and destination memory locations.
1444 
1445  * Return: None
1446  */
1447 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
1448 {
1449 	if (0 == num_bytes) {
1450 		/* special case where dst_addr or src_addr can be NULL */
1451 		return;
1452 	}
1453 
1454 	if ((dst_addr == NULL) || (src_addr == NULL)) {
1455 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
1456 			  "%s called with NULL parameter, source:%p destination:%p",
1457 			  __func__, src_addr, dst_addr);
1458 		QDF_ASSERT(0);
1459 		return;
1460 	}
1461 	memmove(dst_addr, src_addr, num_bytes);
1462 }
1463 EXPORT_SYMBOL(qdf_mem_move);
1464 
1465 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
1466 /**
1467  * qdf_mem_alloc_consistent() - allocates consistent qdf memory
1468  * @osdev: OS device handle
1469  * @dev: Pointer to device handle
1470  * @size: Size to be allocated
1471  * @phy_addr: Physical address
1472  *
1473  * Return: pointer of allocated memory or null if memory alloc fails
1474  */
1475 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1476 			       qdf_dma_addr_t *phy_addr)
1477 {
1478 	void *vaddr;
1479 
1480 	vaddr = qdf_mem_malloc(size);
1481 	*phy_addr = ((uintptr_t) vaddr);
1482 	/* using this type conversion to suppress "cast from pointer to integer
1483 	 * of different size" warning on some platforms
1484 	 */
1485 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
1486 	return vaddr;
1487 }
1488 
1489 #elif defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
1490 #define QCA8074_RAM_BASE 0x50000000
1491 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
1492 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1493 			       qdf_dma_addr_t *phy_addr)
1494 {
1495 	int flags = GFP_KERNEL;
1496 	void *alloc_mem = NULL;
1497 	int alloc_try_high_mem = 0;
1498 
1499 	if (in_interrupt() || irqs_disabled() || in_atomic())
1500 		flags = GFP_ATOMIC;
1501 
1502 	*phy_addr = 0;
1503 
1504 	while (alloc_try_high_mem++ < QDF_MEM_ALLOC_X86_MAX_RETRIES) {
1505 		alloc_mem = dma_alloc_coherent(dev, size, phy_addr, flags);
1506 
1507 		if (alloc_mem == NULL) {
1508 			qdf_print("%s failed , size: %zu!\n", __func__, size);
1509 			return NULL;
1510 		}
1511 
1512 		if (*phy_addr < QCA8074_RAM_BASE) {
1513 			dma_free_coherent(dev, size, alloc_mem, *phy_addr);
1514 			alloc_mem = NULL;
1515 		} else
1516 			break;
1517 	}
1518 
1519 	return alloc_mem;
1520 }
1521 #else
1522 void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
1523 			       qdf_dma_addr_t *phy_addr)
1524 {
1525 	int flags = GFP_KERNEL;
1526 	void *alloc_mem = NULL;
1527 
1528 	if (in_interrupt() || irqs_disabled() || in_atomic())
1529 		flags = GFP_ATOMIC;
1530 
1531 	alloc_mem = dma_alloc_coherent(dev, size, phy_addr, flags);
1532 	if (alloc_mem == NULL)
1533 		qdf_print("%s Warning: unable to alloc consistent memory of size %zu!\n",
1534 			__func__, size);
1535 	qdf_mem_dma_inc(size);
1536 	return alloc_mem;
1537 }
1538 
1539 #endif
1540 EXPORT_SYMBOL(qdf_mem_alloc_consistent);
1541 
1542 #if defined(A_SIMOS_DEVHOST) ||  defined(HIF_SDIO) || defined(HIF_USB)
1543 /**
1544  * qdf_mem_free_consistent() - free consistent qdf memory
1545  * @osdev: OS device handle
1546  * @size: Size to be allocated
1547  * @vaddr: virtual address
1548  * @phy_addr: Physical address
1549  * @mctx: Pointer to DMA context
1550  *
1551  * Return: none
1552  */
1553 inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1554 				    qdf_size_t size, void *vaddr,
1555 				    qdf_dma_addr_t phy_addr,
1556 				    qdf_dma_context_t memctx)
1557 {
1558 	qdf_mem_free(vaddr);
1559 	return;
1560 }
1561 
1562 #else
1563 inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
1564 				    qdf_size_t size, void *vaddr,
1565 				    qdf_dma_addr_t phy_addr,
1566 				    qdf_dma_context_t memctx)
1567 {
1568 	dma_free_coherent(dev, size, vaddr, phy_addr);
1569 	qdf_mem_dma_dec(size);
1570 }
1571 
1572 #endif
1573 EXPORT_SYMBOL(qdf_mem_free_consistent);
1574 
1575 /**
1576  * qdf_mem_dma_sync_single_for_device() - assign memory to device
1577  * @osdev: OS device handle
1578  * @bus_addr: dma address to give to the device
1579  * @size: Size of the memory block
1580  * @direction: direction data will be DMAed
1581  *
1582  * Assign memory to the remote device.
1583  * The cache lines are flushed to ram or invalidated as needed.
1584  *
1585  * Return: none
1586  */
1587 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
1588 					qdf_dma_addr_t bus_addr,
1589 					qdf_size_t size,
1590 					enum dma_data_direction direction)
1591 {
1592 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
1593 }
1594 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
1595 
1596 /**
1597  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
1598  * @osdev: OS device handle
1599  * @bus_addr: dma address to give to the cpu
1600  * @size: Size of the memory block
1601  * @direction: direction data will be DMAed
1602  *
1603  * Assign memory to the CPU.
1604  *
1605  * Return: none
1606  */
1607 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
1608 				     qdf_dma_addr_t bus_addr,
1609 				     qdf_size_t size,
1610 				     enum dma_data_direction direction)
1611 {
1612 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
1613 }
1614 EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_cpu);
1615