xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * This file provides OS dependent memory management APIs
23  */
24 
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46 
47 /* cnss prealloc maintains various prealloc pools of 8Kb, 16Kb, 32Kb and so
48  * on and allocates buffer from the pool for wlan driver. When wlan driver
49  * requests to free the memory buffer then cnss prealloc derives slab_cache
50  * from virtual memory via page struct to identify prealloc pool id to put
51  * back memory buffer into the pool. Kernel 5.17 removed slab_cache from page
52  * struct. So add headroom to store cache pointer at the beginning of
53  * allocated memory buffer to use it later in identifying prealloc pool id.
54  */
55 #if defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE)
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
57 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
58 {
59 	return true;
60 }
61 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
62 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
63 {
64 	return false;
65 }
66 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
67 #else /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
68 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
69 {
70 	return false;
71 }
72 #endif /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
73 
74 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
75 static bool mem_debug_disabled;
76 qdf_declare_param(mem_debug_disabled, bool);
77 qdf_export_symbol(mem_debug_disabled);
78 #endif
79 
80 #ifdef MEMORY_DEBUG
81 static bool is_initial_mem_debug_disabled;
82 #endif
83 
84 /* Preprocessor Definitions and Constants */
85 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
86 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
87 #define QDF_DEBUG_STRING_SIZE 512
88 
89 /**
90  * struct __qdf_mem_stat - qdf memory statistics
91  * @kmalloc: total kmalloc allocations
92  * @dma: total dma allocations
93  * @skb: total skb allocations
94  * @skb_total: total skb allocations in host driver
95  * @dp_tx_skb: total Tx skb allocations in datapath
96  * @dp_rx_skb: total Rx skb allocations in datapath
97  * @skb_mem_max: high watermark for skb allocations
98  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
99  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
100  * @dp_tx_skb_count: DP Tx buffer count
101  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
102  * @dp_rx_skb_count: DP Rx buffer count
103  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
104  * @tx_descs_outstanding: Current pending Tx descs count
105  * @tx_descs_max: High watermark for pending Tx descs count
106  */
107 static struct __qdf_mem_stat {
108 	qdf_atomic_t kmalloc;
109 	qdf_atomic_t dma;
110 	qdf_atomic_t skb;
111 	qdf_atomic_t skb_total;
112 	qdf_atomic_t dp_tx_skb;
113 	qdf_atomic_t dp_rx_skb;
114 	int32_t skb_mem_max;
115 	int32_t dp_tx_skb_mem_max;
116 	int32_t dp_rx_skb_mem_max;
117 	qdf_atomic_t dp_tx_skb_count;
118 	int32_t dp_tx_skb_count_max;
119 	qdf_atomic_t dp_rx_skb_count;
120 	int32_t dp_rx_skb_count_max;
121 	qdf_atomic_t tx_descs_outstanding;
122 	int32_t tx_descs_max;
123 } qdf_mem_stat;
124 
125 #ifdef MEMORY_DEBUG
126 #include "qdf_debug_domain.h"
127 
128 enum list_type {
129 	LIST_TYPE_MEM = 0,
130 	LIST_TYPE_DMA = 1,
131 	LIST_TYPE_NBUF = 2,
132 	LIST_TYPE_MAX,
133 };
134 
135 /**
136  * major_alloc_priv: private data registered to debugfs entry created to list
137  *                   the list major allocations
138  * @type:            type of the list to be parsed
139  * @threshold:       configured by user by overwriting the respective debugfs
140  *                   sys entry. This is to list the functions which requested
141  *                   memory/dma allocations more than threshold number of times.
142  */
143 struct major_alloc_priv {
144 	enum list_type type;
145 	uint32_t threshold;
146 };
147 
148 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
149 static qdf_spinlock_t qdf_mem_list_lock;
150 
151 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
152 static qdf_spinlock_t qdf_mem_dma_list_lock;
153 
154 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
155 {
156 	return &qdf_mem_domains[domain];
157 }
158 
159 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
160 {
161 	return &qdf_mem_dma_domains[domain];
162 }
163 
164 /**
165  * struct qdf_mem_header - memory object to dubug
166  * @node: node to the list
167  * @domain: the active memory domain at time of allocation
168  * @freed: flag set during free, used to detect double frees
169  *	Use uint8_t so we can detect corruption
170  * @func: name of the function the allocation was made from
171  * @line: line number of the file the allocation was made from
172  * @size: size of the allocation in bytes
173  * @caller: Caller of the function for which memory is allocated
174  * @header: a known value, used to detect out-of-bounds access
175  * @time: timestamp at which allocation was made
176  */
177 struct qdf_mem_header {
178 	qdf_list_node_t node;
179 	enum qdf_debug_domain domain;
180 	uint8_t freed;
181 	char func[QDF_MEM_FUNC_NAME_SIZE];
182 	uint32_t line;
183 	uint32_t size;
184 	void *caller;
185 	uint64_t header;
186 	uint64_t time;
187 };
188 
189 /* align the qdf_mem_header to 8 bytes */
190 #define QDF_DMA_MEM_HEADER_ALIGN 8
191 
192 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
193 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
194 
195 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
196 {
197 	return (struct qdf_mem_header *)ptr - 1;
198 }
199 
200 /* make sure the header pointer is 8bytes aligned */
201 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
202 							    qdf_size_t size)
203 {
204 	return (struct qdf_mem_header *)
205 				qdf_roundup((size_t)((uint8_t *)ptr + size),
206 					    QDF_DMA_MEM_HEADER_ALIGN);
207 }
208 
209 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
210 {
211 	return (uint64_t *)((void *)(header + 1) + header->size);
212 }
213 
214 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
215 {
216 	return (void *)(header + 1);
217 }
218 
219 /* number of bytes needed for the qdf memory debug information */
220 #define QDF_MEM_DEBUG_SIZE \
221 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
222 
223 /* number of bytes needed for the qdf dma memory debug information */
224 #define QDF_DMA_MEM_DEBUG_SIZE \
225 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
226 
227 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
228 {
229 	QDF_BUG(header);
230 	if (!header)
231 		return;
232 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
233 }
234 
235 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
236 				const char *func, uint32_t line, void *caller)
237 {
238 	QDF_BUG(header);
239 	if (!header)
240 		return;
241 
242 	header->domain = qdf_debug_domain_get();
243 	header->freed = false;
244 
245 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
246 
247 	header->line = line;
248 	header->size = size;
249 	header->caller = caller;
250 	header->header = WLAN_MEM_HEADER;
251 	header->time = qdf_get_log_timestamp();
252 }
253 
254 enum qdf_mem_validation_bitmap {
255 	QDF_MEM_BAD_HEADER = 1 << 0,
256 	QDF_MEM_BAD_TRAILER = 1 << 1,
257 	QDF_MEM_BAD_SIZE = 1 << 2,
258 	QDF_MEM_DOUBLE_FREE = 1 << 3,
259 	QDF_MEM_BAD_FREED = 1 << 4,
260 	QDF_MEM_BAD_NODE = 1 << 5,
261 	QDF_MEM_BAD_DOMAIN = 1 << 6,
262 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
263 };
264 
265 static enum qdf_mem_validation_bitmap
266 qdf_mem_trailer_validate(struct qdf_mem_header *header)
267 {
268 	enum qdf_mem_validation_bitmap error_bitmap = 0;
269 
270 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
271 		error_bitmap |= QDF_MEM_BAD_TRAILER;
272 	return error_bitmap;
273 }
274 
275 static enum qdf_mem_validation_bitmap
276 qdf_mem_header_validate(struct qdf_mem_header *header,
277 			enum qdf_debug_domain domain)
278 {
279 	enum qdf_mem_validation_bitmap error_bitmap = 0;
280 
281 	if (header->header != WLAN_MEM_HEADER)
282 		error_bitmap |= QDF_MEM_BAD_HEADER;
283 
284 	if (header->size > QDF_MEM_MAX_MALLOC)
285 		error_bitmap |= QDF_MEM_BAD_SIZE;
286 
287 	if (header->freed == true)
288 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
289 	else if (header->freed)
290 		error_bitmap |= QDF_MEM_BAD_FREED;
291 
292 	if (!qdf_list_node_in_any_list(&header->node))
293 		error_bitmap |= QDF_MEM_BAD_NODE;
294 
295 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
296 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
297 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
298 	else if (header->domain != domain)
299 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
300 
301 	return error_bitmap;
302 }
303 
304 static void
305 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
306 			    enum qdf_debug_domain current_domain,
307 			    enum qdf_mem_validation_bitmap error_bitmap,
308 			    const char *func,
309 			    uint32_t line)
310 {
311 	if (!error_bitmap)
312 		return;
313 
314 	if (error_bitmap & QDF_MEM_BAD_HEADER)
315 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
316 			header->header, WLAN_MEM_HEADER);
317 
318 	if (error_bitmap & QDF_MEM_BAD_SIZE)
319 		qdf_err("Corrupted memory size %u (expected < %d)",
320 			header->size, QDF_MEM_MAX_MALLOC);
321 
322 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
323 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
324 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
325 
326 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
327 		qdf_err("Memory has previously been freed");
328 
329 	if (error_bitmap & QDF_MEM_BAD_FREED)
330 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
331 
332 	if (error_bitmap & QDF_MEM_BAD_NODE)
333 		qdf_err("Corrupted memory header node or double free");
334 
335 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
336 		qdf_err("Corrupted memory domain 0x%x", header->domain);
337 
338 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
339 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
340 			qdf_debug_domain_name(header->domain), header->domain,
341 			qdf_debug_domain_name(current_domain), current_domain);
342 
343 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
344 }
345 
346 /**
347  * struct __qdf_mem_info - memory statistics
348  * @func: the function which allocated memory
349  * @line: the line at which allocation happened
350  * @size: the size of allocation
351  * @caller: Address of the caller function
352  * @count: how many allocations of same type
353  * @time: timestamp at which allocation happened
354  */
355 struct __qdf_mem_info {
356 	char func[QDF_MEM_FUNC_NAME_SIZE];
357 	uint32_t line;
358 	uint32_t size;
359 	void *caller;
360 	uint32_t count;
361 	uint64_t time;
362 };
363 
364 /*
365  * The table depth defines the de-duplication proximity scope.
366  * A deeper table takes more time, so choose any optimum value.
367  */
368 #define QDF_MEM_STAT_TABLE_SIZE 8
369 
370 /**
371  * qdf_mem_debug_print_header() - memory debug header print logic
372  * @print: the print adapter function
373  * @print_priv: the private data to be consumed by @print
374  * @threshold: the threshold value set by user to list top allocations
375  *
376  * Return: None
377  */
378 static void qdf_mem_debug_print_header(qdf_abstract_print print,
379 				       void *print_priv,
380 				       uint32_t threshold)
381 {
382 	if (threshold)
383 		print(print_priv, "APIs requested allocations >= %u no of time",
384 		      threshold);
385 	print(print_priv,
386 	      "--------------------------------------------------------------");
387 	print(print_priv,
388 	      " count    size     total    filename     caller    timestamp");
389 	print(print_priv,
390 	      "--------------------------------------------------------------");
391 }
392 
393 /**
394  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
395  * @table: the memory metadata table to insert into
396  * @meta: the memory metadata to insert
397  *
398  * Return: true if the table is full after inserting, false otherwise
399  */
400 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
401 				      struct qdf_mem_header *meta)
402 {
403 	int i;
404 
405 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
406 		if (!table[i].count) {
407 			qdf_str_lcopy(table[i].func, meta->func,
408 				      QDF_MEM_FUNC_NAME_SIZE);
409 			table[i].line = meta->line;
410 			table[i].size = meta->size;
411 			table[i].count = 1;
412 			table[i].caller = meta->caller;
413 			table[i].time = meta->time;
414 			break;
415 		}
416 
417 		if (qdf_str_eq(table[i].func, meta->func) &&
418 		    table[i].line == meta->line &&
419 		    table[i].size == meta->size &&
420 		    table[i].caller == meta->caller) {
421 			table[i].count++;
422 			break;
423 		}
424 	}
425 
426 	/* return true if the table is now full */
427 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
428 }
429 
430 /**
431  * qdf_mem_domain_print() - output agnostic memory domain print logic
432  * @domain: the memory domain to print
433  * @print: the print adapter function
434  * @print_priv: the private data to be consumed by @print
435  * @threshold: the threshold value set by uset to list top allocations
436  * @mem_print: pointer to function which prints the memory allocation data
437  *
438  * Return: None
439  */
440 static void qdf_mem_domain_print(qdf_list_t *domain,
441 				 qdf_abstract_print print,
442 				 void *print_priv,
443 				 uint32_t threshold,
444 				 void (*mem_print)(struct __qdf_mem_info *,
445 						   qdf_abstract_print,
446 						   void *, uint32_t))
447 {
448 	QDF_STATUS status;
449 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
450 	qdf_list_node_t *node;
451 
452 	qdf_mem_zero(table, sizeof(table));
453 	qdf_mem_debug_print_header(print, print_priv, threshold);
454 
455 	/* hold lock while inserting to avoid use-after free of the metadata */
456 	qdf_spin_lock(&qdf_mem_list_lock);
457 	status = qdf_list_peek_front(domain, &node);
458 	while (QDF_IS_STATUS_SUCCESS(status)) {
459 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
460 		bool is_full = qdf_mem_meta_table_insert(table, meta);
461 
462 		qdf_spin_unlock(&qdf_mem_list_lock);
463 
464 		if (is_full) {
465 			(*mem_print)(table, print, print_priv, threshold);
466 			qdf_mem_zero(table, sizeof(table));
467 		}
468 
469 		qdf_spin_lock(&qdf_mem_list_lock);
470 		status = qdf_list_peek_next(domain, node, &node);
471 	}
472 	qdf_spin_unlock(&qdf_mem_list_lock);
473 
474 	(*mem_print)(table, print, print_priv, threshold);
475 }
476 
477 /**
478  * qdf_mem_meta_table_print() - memory metadata table print logic
479  * @table: the memory metadata table to print
480  * @print: the print adapter function
481  * @print_priv: the private data to be consumed by @print
482  * @threshold: the threshold value set by user to list top allocations
483  *
484  * Return: None
485  */
486 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
487 				     qdf_abstract_print print,
488 				     void *print_priv,
489 				     uint32_t threshold)
490 {
491 	int i;
492 	char debug_str[QDF_DEBUG_STRING_SIZE];
493 	size_t len = 0;
494 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
495 
496 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
497 			     "%s", debug_prefix);
498 
499 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
500 		if (!table[i].count)
501 			break;
502 
503 		print(print_priv,
504 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
505 		      table[i].count,
506 		      table[i].size,
507 		      table[i].count * table[i].size,
508 		      table[i].func,
509 		      table[i].line, table[i].caller,
510 		      table[i].time);
511 		len += qdf_scnprintf(debug_str + len,
512 				     sizeof(debug_str) - len,
513 				     " @ %s:%u %pS",
514 				     table[i].func,
515 				     table[i].line,
516 				     table[i].caller);
517 	}
518 	print(print_priv, "%s", debug_str);
519 }
520 
521 static int qdf_err_printer(void *priv, const char *fmt, ...)
522 {
523 	va_list args;
524 
525 	va_start(args, fmt);
526 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
527 	va_end(args);
528 
529 	return 0;
530 }
531 
532 #endif /* MEMORY_DEBUG */
533 
534 bool prealloc_disabled = 1;
535 qdf_declare_param(prealloc_disabled, bool);
536 qdf_export_symbol(prealloc_disabled);
537 
538 int qdf_mem_malloc_flags(void)
539 {
540 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
541 		return GFP_ATOMIC;
542 
543 	return GFP_KERNEL;
544 }
545 
546 qdf_export_symbol(qdf_mem_malloc_flags);
547 
548 /**
549  * qdf_prealloc_disabled_config_get() - Get the user configuration of
550  *                                       prealloc_disabled
551  *
552  * Return: value of prealloc_disabled qdf module argument
553  */
554 bool qdf_prealloc_disabled_config_get(void)
555 {
556 	return prealloc_disabled;
557 }
558 
559 qdf_export_symbol(qdf_prealloc_disabled_config_get);
560 
561 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
562 /**
563  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
564  * @str_value: value of the module param
565  *
566  * This function will set qdf module param prealloc_disabled
567  *
568  * Return: QDF_STATUS_SUCCESS on Success
569  */
570 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
571 {
572 	QDF_STATUS status;
573 
574 	status = qdf_bool_parse(str_value, &prealloc_disabled);
575 	return status;
576 }
577 #endif
578 
579 #if defined WLAN_DEBUGFS
580 
581 /* Debugfs root directory for qdf_mem */
582 static struct dentry *qdf_mem_debugfs_root;
583 
584 #ifdef MEMORY_DEBUG
585 static int seq_printf_printer(void *priv, const char *fmt, ...)
586 {
587 	struct seq_file *file = priv;
588 	va_list args;
589 
590 	va_start(args, fmt);
591 	seq_vprintf(file, fmt, args);
592 	seq_puts(file, "\n");
593 	va_end(args);
594 
595 	return 0;
596 }
597 
598 /**
599  * qdf_print_major_alloc() - memory metadata table print logic
600  * @table: the memory metadata table to print
601  * @print: the print adapter function
602  * @print_priv: the private data to be consumed by @print
603  * @threshold: the threshold value set by uset to list top allocations
604  *
605  * Return: None
606  */
607 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
608 				  qdf_abstract_print print,
609 				  void *print_priv,
610 				  uint32_t threshold)
611 {
612 	int i;
613 
614 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
615 		if (!table[i].count)
616 			break;
617 		if (table[i].count >= threshold)
618 			print(print_priv,
619 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
620 			      table[i].count,
621 			      table[i].size,
622 			      table[i].count * table[i].size,
623 			      table[i].func,
624 			      table[i].line, table[i].caller,
625 			      table[i].time);
626 	}
627 }
628 
629 /**
630  * qdf_mem_seq_start() - sequential callback to start
631  * @seq: seq_file handle
632  * @pos: The start position of the sequence
633  *
634  * Return: iterator pointer, or NULL if iteration is complete
635  */
636 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
637 {
638 	enum qdf_debug_domain domain = *pos;
639 
640 	if (!qdf_debug_domain_valid(domain))
641 		return NULL;
642 
643 	/* just use the current position as our iterator */
644 	return pos;
645 }
646 
647 /**
648  * qdf_mem_seq_next() - next sequential callback
649  * @seq: seq_file handle
650  * @v: the current iterator
651  * @pos: the current position
652  *
653  * Get the next node and release previous node.
654  *
655  * Return: iterator pointer, or NULL if iteration is complete
656  */
657 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
658 {
659 	++*pos;
660 
661 	return qdf_mem_seq_start(seq, pos);
662 }
663 
664 /**
665  * qdf_mem_seq_stop() - stop sequential callback
666  * @seq: seq_file handle
667  * @v: current iterator
668  *
669  * Return: None
670  */
671 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
672 
673 /**
674  * qdf_mem_seq_show() - print sequential callback
675  * @seq: seq_file handle
676  * @v: current iterator
677  *
678  * Return: 0 - success
679  */
680 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
681 {
682 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
683 
684 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
685 		   qdf_debug_domain_name(domain_id), domain_id);
686 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
687 			     seq_printf_printer,
688 			     seq,
689 			     0,
690 			     qdf_mem_meta_table_print);
691 
692 	return 0;
693 }
694 
695 /* sequential file operation table */
696 static const struct seq_operations qdf_mem_seq_ops = {
697 	.start = qdf_mem_seq_start,
698 	.next  = qdf_mem_seq_next,
699 	.stop  = qdf_mem_seq_stop,
700 	.show  = qdf_mem_seq_show,
701 };
702 
703 
704 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
705 {
706 	return seq_open(file, &qdf_mem_seq_ops);
707 }
708 
709 /**
710  * qdf_major_alloc_show() - print sequential callback
711  * @seq: seq_file handle
712  * @v: current iterator
713  *
714  * Return: 0 - success
715  */
716 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
717 {
718 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
719 	struct major_alloc_priv *priv;
720 	qdf_list_t *list;
721 
722 	priv = (struct major_alloc_priv *)seq->private;
723 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
724 		   qdf_debug_domain_name(domain_id), domain_id);
725 
726 	switch (priv->type) {
727 	case LIST_TYPE_MEM:
728 		list = qdf_mem_list_get(domain_id);
729 		break;
730 	case LIST_TYPE_DMA:
731 		list = qdf_mem_dma_list(domain_id);
732 		break;
733 	default:
734 		list = NULL;
735 		break;
736 	}
737 
738 	if (list)
739 		qdf_mem_domain_print(list,
740 				     seq_printf_printer,
741 				     seq,
742 				     priv->threshold,
743 				     qdf_print_major_alloc);
744 
745 	return 0;
746 }
747 
748 /* sequential file operation table created to track major allocs */
749 static const struct seq_operations qdf_major_allocs_seq_ops = {
750 	.start = qdf_mem_seq_start,
751 	.next = qdf_mem_seq_next,
752 	.stop = qdf_mem_seq_stop,
753 	.show = qdf_major_alloc_show,
754 };
755 
756 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
757 {
758 	void *private = inode->i_private;
759 	struct seq_file *seq;
760 	int rc;
761 
762 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
763 	if (rc == 0) {
764 		seq = file->private_data;
765 		seq->private = private;
766 	}
767 	return rc;
768 }
769 
770 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
771 					     const char __user *user_buf,
772 					     size_t count,
773 					     loff_t *pos)
774 {
775 	char buf[32];
776 	ssize_t buf_size;
777 	uint32_t threshold;
778 	struct seq_file *seq = file->private_data;
779 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
780 
781 	buf_size = min(count, (sizeof(buf) - 1));
782 	if (buf_size <= 0)
783 		return 0;
784 	if (copy_from_user(buf, user_buf, buf_size))
785 		return -EFAULT;
786 	buf[buf_size] = '\0';
787 	if (!kstrtou32(buf, 10, &threshold))
788 		priv->threshold = threshold;
789 	return buf_size;
790 }
791 
792 /**
793  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
794  * @threshold: the threshold value set by uset to list top allocations
795  * @print: the print adapter function
796  * @print_priv: the private data to be consumed by @print
797  * @mem_print: pointer to function which prints the memory allocation data
798  *
799  * Return: None
800  */
801 static void
802 qdf_print_major_nbuf_allocs(uint32_t threshold,
803 			    qdf_abstract_print print,
804 			    void *print_priv,
805 			    void (*mem_print)(struct __qdf_mem_info *,
806 					      qdf_abstract_print,
807 					      void *, uint32_t))
808 {
809 	uint32_t nbuf_iter;
810 	unsigned long irq_flag = 0;
811 	QDF_NBUF_TRACK *p_node;
812 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
813 	struct qdf_mem_header meta;
814 	bool is_full;
815 
816 	qdf_mem_zero(table, sizeof(table));
817 	qdf_mem_debug_print_header(print, print_priv, threshold);
818 
819 	if (is_initial_mem_debug_disabled)
820 		return;
821 
822 	qdf_rl_info("major nbuf print with threshold %u", threshold);
823 
824 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
825 	     nbuf_iter++) {
826 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
827 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
828 		while (p_node) {
829 			meta.line = p_node->line_num;
830 			meta.size = p_node->size;
831 			meta.caller = NULL;
832 			meta.time = p_node->time;
833 			qdf_str_lcopy(meta.func, p_node->func_name,
834 				      QDF_MEM_FUNC_NAME_SIZE);
835 
836 			is_full = qdf_mem_meta_table_insert(table, &meta);
837 
838 			if (is_full) {
839 				(*mem_print)(table, print,
840 					     print_priv, threshold);
841 				qdf_mem_zero(table, sizeof(table));
842 			}
843 
844 			p_node = p_node->p_next;
845 		}
846 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
847 	}
848 
849 	(*mem_print)(table, print, print_priv, threshold);
850 
851 	qdf_rl_info("major nbuf print end");
852 }
853 
854 /**
855  * qdf_major_nbuf_alloc_show() - print sequential callback
856  * @seq: seq_file handle
857  * @v: current iterator
858  *
859  * Return: 0 - success
860  */
861 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
862 {
863 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
864 
865 	if (!priv) {
866 		qdf_err("priv is null");
867 		return -EINVAL;
868 	}
869 
870 	qdf_print_major_nbuf_allocs(priv->threshold,
871 				    seq_printf_printer,
872 				    seq,
873 				    qdf_print_major_alloc);
874 
875 	return 0;
876 }
877 
878 /**
879  * qdf_nbuf_seq_start() - sequential callback to start
880  * @seq: seq_file handle
881  * @pos: The start position of the sequence
882  *
883  * Return: iterator pointer, or NULL if iteration is complete
884  */
885 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
886 {
887 	enum qdf_debug_domain domain = *pos;
888 
889 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
890 		return NULL;
891 
892 	return pos;
893 }
894 
895 /**
896  * qdf_nbuf_seq_next() - next sequential callback
897  * @seq: seq_file handle
898  * @v: the current iterator
899  * @pos: the current position
900  *
901  * Get the next node and release previous node.
902  *
903  * Return: iterator pointer, or NULL if iteration is complete
904  */
905 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
906 {
907 	++*pos;
908 
909 	return qdf_nbuf_seq_start(seq, pos);
910 }
911 
912 /**
913  * qdf_nbuf_seq_stop() - stop sequential callback
914  * @seq: seq_file handle
915  * @v: current iterator
916  *
917  * Return: None
918  */
919 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
920 
921 /* sequential file operation table created to track major skb allocs */
922 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
923 	.start = qdf_nbuf_seq_start,
924 	.next = qdf_nbuf_seq_next,
925 	.stop = qdf_nbuf_seq_stop,
926 	.show = qdf_major_nbuf_alloc_show,
927 };
928 
929 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
930 {
931 	void *private = inode->i_private;
932 	struct seq_file *seq;
933 	int rc;
934 
935 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
936 	if (rc == 0) {
937 		seq = file->private_data;
938 		seq->private = private;
939 	}
940 	return rc;
941 }
942 
943 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
944 						  const char __user *user_buf,
945 						  size_t count,
946 						  loff_t *pos)
947 {
948 	char buf[32];
949 	ssize_t buf_size;
950 	uint32_t threshold;
951 	struct seq_file *seq = file->private_data;
952 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
953 
954 	buf_size = min(count, (sizeof(buf) - 1));
955 	if (buf_size <= 0)
956 		return 0;
957 	if (copy_from_user(buf, user_buf, buf_size))
958 		return -EFAULT;
959 	buf[buf_size] = '\0';
960 	if (!kstrtou32(buf, 10, &threshold))
961 		priv->threshold = threshold;
962 	return buf_size;
963 }
964 
965 /* file operation table for listing major allocs */
966 static const struct file_operations fops_qdf_major_allocs = {
967 	.owner = THIS_MODULE,
968 	.open = qdf_major_allocs_open,
969 	.read = seq_read,
970 	.llseek = seq_lseek,
971 	.release = seq_release,
972 	.write = qdf_major_alloc_set_threshold,
973 };
974 
975 /* debugfs file operation table */
976 static const struct file_operations fops_qdf_mem_debugfs = {
977 	.owner = THIS_MODULE,
978 	.open = qdf_mem_debugfs_open,
979 	.read = seq_read,
980 	.llseek = seq_lseek,
981 	.release = seq_release,
982 };
983 
984 /* file operation table for listing major allocs */
985 static const struct file_operations fops_qdf_nbuf_major_allocs = {
986 	.owner = THIS_MODULE,
987 	.open = qdf_major_nbuf_allocs_open,
988 	.read = seq_read,
989 	.llseek = seq_lseek,
990 	.release = seq_release,
991 	.write = qdf_major_nbuf_alloc_set_threshold,
992 };
993 
994 static struct major_alloc_priv mem_priv = {
995 	/* List type set to mem */
996 	LIST_TYPE_MEM,
997 	/* initial threshold to list APIs which allocates mem >= 50 times */
998 	50
999 };
1000 
1001 static struct major_alloc_priv dma_priv = {
1002 	/* List type set to DMA */
1003 	LIST_TYPE_DMA,
1004 	/* initial threshold to list APIs which allocates dma >= 50 times */
1005 	50
1006 };
1007 
1008 static struct major_alloc_priv nbuf_priv = {
1009 	/* List type set to NBUF */
1010 	LIST_TYPE_NBUF,
1011 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
1012 	50
1013 };
1014 
1015 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1016 {
1017 	if (is_initial_mem_debug_disabled)
1018 		return QDF_STATUS_SUCCESS;
1019 
1020 	if (!qdf_mem_debugfs_root)
1021 		return QDF_STATUS_E_FAILURE;
1022 
1023 	debugfs_create_file("list",
1024 			    S_IRUSR,
1025 			    qdf_mem_debugfs_root,
1026 			    NULL,
1027 			    &fops_qdf_mem_debugfs);
1028 
1029 	debugfs_create_file("major_mem_allocs",
1030 			    0600,
1031 			    qdf_mem_debugfs_root,
1032 			    &mem_priv,
1033 			    &fops_qdf_major_allocs);
1034 
1035 	debugfs_create_file("major_dma_allocs",
1036 			    0600,
1037 			    qdf_mem_debugfs_root,
1038 			    &dma_priv,
1039 			    &fops_qdf_major_allocs);
1040 
1041 	debugfs_create_file("major_nbuf_allocs",
1042 			    0600,
1043 			    qdf_mem_debugfs_root,
1044 			    &nbuf_priv,
1045 			    &fops_qdf_nbuf_major_allocs);
1046 
1047 	return QDF_STATUS_SUCCESS;
1048 }
1049 
1050 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1051 {
1052 	return QDF_STATUS_SUCCESS;
1053 }
1054 
1055 #else /* MEMORY_DEBUG */
1056 
1057 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1058 {
1059 	return QDF_STATUS_E_NOSUPPORT;
1060 }
1061 
1062 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1063 {
1064 	return QDF_STATUS_E_NOSUPPORT;
1065 }
1066 
1067 #endif /* MEMORY_DEBUG */
1068 
1069 
1070 static void qdf_mem_debugfs_exit(void)
1071 {
1072 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1073 	qdf_mem_debugfs_root = NULL;
1074 }
1075 
1076 static QDF_STATUS qdf_mem_debugfs_init(void)
1077 {
1078 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1079 
1080 	if (!qdf_debugfs_root)
1081 		return QDF_STATUS_E_FAILURE;
1082 
1083 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1084 
1085 	if (!qdf_mem_debugfs_root)
1086 		return QDF_STATUS_E_FAILURE;
1087 
1088 
1089 	debugfs_create_atomic_t("kmalloc",
1090 				S_IRUSR,
1091 				qdf_mem_debugfs_root,
1092 				&qdf_mem_stat.kmalloc);
1093 
1094 	debugfs_create_atomic_t("dma",
1095 				S_IRUSR,
1096 				qdf_mem_debugfs_root,
1097 				&qdf_mem_stat.dma);
1098 
1099 	debugfs_create_atomic_t("skb",
1100 				S_IRUSR,
1101 				qdf_mem_debugfs_root,
1102 				&qdf_mem_stat.skb);
1103 
1104 	return QDF_STATUS_SUCCESS;
1105 }
1106 
1107 #else /* WLAN_DEBUGFS */
1108 
1109 static QDF_STATUS qdf_mem_debugfs_init(void)
1110 {
1111 	return QDF_STATUS_E_NOSUPPORT;
1112 }
1113 static void qdf_mem_debugfs_exit(void) {}
1114 
1115 
1116 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1117 {
1118 	return QDF_STATUS_E_NOSUPPORT;
1119 }
1120 
1121 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1122 {
1123 	return QDF_STATUS_E_NOSUPPORT;
1124 }
1125 
1126 #endif /* WLAN_DEBUGFS */
1127 
1128 void qdf_mem_kmalloc_inc(qdf_size_t size)
1129 {
1130 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1131 }
1132 
1133 static void qdf_mem_dma_inc(qdf_size_t size)
1134 {
1135 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1136 }
1137 
1138 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1139 void qdf_mem_skb_inc(qdf_size_t size)
1140 {
1141 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1142 }
1143 
1144 void qdf_mem_skb_dec(qdf_size_t size)
1145 {
1146 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1147 }
1148 
1149 void qdf_mem_skb_total_inc(qdf_size_t size)
1150 {
1151 	int32_t skb_mem_max = 0;
1152 
1153 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1154 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1155 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1156 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1157 }
1158 
1159 void qdf_mem_skb_total_dec(qdf_size_t size)
1160 {
1161 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1162 }
1163 
1164 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1165 {
1166 	int32_t curr_dp_tx_skb_mem_max = 0;
1167 
1168 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1169 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1170 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1171 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1172 }
1173 
1174 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1175 {
1176 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1177 }
1178 
1179 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1180 {
1181 	int32_t curr_dp_rx_skb_mem_max = 0;
1182 
1183 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1184 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1185 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1186 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1187 }
1188 
1189 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1190 {
1191 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1192 }
1193 
1194 void qdf_mem_dp_tx_skb_cnt_inc(void)
1195 {
1196 	int32_t curr_dp_tx_skb_count_max = 0;
1197 
1198 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1199 	curr_dp_tx_skb_count_max =
1200 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1201 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1202 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1203 }
1204 
1205 void qdf_mem_dp_tx_skb_cnt_dec(void)
1206 {
1207 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1208 }
1209 
1210 void qdf_mem_dp_rx_skb_cnt_inc(void)
1211 {
1212 	int32_t curr_dp_rx_skb_count_max = 0;
1213 
1214 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1215 	curr_dp_rx_skb_count_max =
1216 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1217 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1218 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1219 }
1220 
1221 void qdf_mem_dp_rx_skb_cnt_dec(void)
1222 {
1223 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1224 }
1225 #endif
1226 
1227 void qdf_mem_kmalloc_dec(qdf_size_t size)
1228 {
1229 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1230 }
1231 
1232 static inline void qdf_mem_dma_dec(qdf_size_t size)
1233 {
1234 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1235 }
1236 
1237 /**
1238  * __qdf_mempool_init() - Create and initialize memory pool
1239  *
1240  * @osdev: platform device object
1241  * @pool_addr: address of the pool created
1242  * @elem_cnt: no. of elements in pool
1243  * @elem_size: size of each pool element in bytes
1244  * @flags: flags
1245  *
1246  * return: Handle to memory pool or NULL if allocation failed
1247  */
1248 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1249 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1250 {
1251 	__qdf_mempool_ctxt_t *new_pool = NULL;
1252 	u_int32_t align = L1_CACHE_BYTES;
1253 	unsigned long aligned_pool_mem;
1254 	int pool_id;
1255 	int i;
1256 
1257 	if (prealloc_disabled) {
1258 		/* TBD: We can maintain a list of pools in qdf_device_t
1259 		 * to help debugging
1260 		 * when pre-allocation is not enabled
1261 		 */
1262 		new_pool = (__qdf_mempool_ctxt_t *)
1263 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1264 		if (!new_pool)
1265 			return QDF_STATUS_E_NOMEM;
1266 
1267 		memset(new_pool, 0, sizeof(*new_pool));
1268 		/* TBD: define flags for zeroing buffers etc */
1269 		new_pool->flags = flags;
1270 		new_pool->elem_size = elem_size;
1271 		new_pool->max_elem = elem_cnt;
1272 		*pool_addr = new_pool;
1273 		return 0;
1274 	}
1275 
1276 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1277 		if (!osdev->mem_pool[pool_id])
1278 			break;
1279 	}
1280 
1281 	if (pool_id == MAX_MEM_POOLS)
1282 		return -ENOMEM;
1283 
1284 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1285 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1286 	if (!new_pool)
1287 		return -ENOMEM;
1288 
1289 	memset(new_pool, 0, sizeof(*new_pool));
1290 	/* TBD: define flags for zeroing buffers etc */
1291 	new_pool->flags = flags;
1292 	new_pool->pool_id = pool_id;
1293 
1294 	/* Round up the element size to cacheline */
1295 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1296 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1297 				((align)?(align - 1):0);
1298 
1299 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1300 	if (!new_pool->pool_mem) {
1301 			/* TBD: Check if we need get_free_pages above */
1302 		kfree(new_pool);
1303 		osdev->mem_pool[pool_id] = NULL;
1304 		return -ENOMEM;
1305 	}
1306 
1307 	spin_lock_init(&new_pool->lock);
1308 
1309 	/* Initialize free list */
1310 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1311 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1312 	STAILQ_INIT(&new_pool->free_list);
1313 
1314 	for (i = 0; i < elem_cnt; i++)
1315 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1316 			(mempool_elem_t *)(aligned_pool_mem +
1317 			(new_pool->elem_size * i)), mempool_entry);
1318 
1319 
1320 	new_pool->free_cnt = elem_cnt;
1321 	*pool_addr = new_pool;
1322 	return 0;
1323 }
1324 qdf_export_symbol(__qdf_mempool_init);
1325 
1326 /**
1327  * __qdf_mempool_destroy() - Destroy memory pool
1328  * @osdev: platform device object
1329  * @Handle: to memory pool
1330  *
1331  * Returns: none
1332  */
1333 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1334 {
1335 	int pool_id = 0;
1336 
1337 	if (!pool)
1338 		return;
1339 
1340 	if (prealloc_disabled) {
1341 		kfree(pool);
1342 		return;
1343 	}
1344 
1345 	pool_id = pool->pool_id;
1346 
1347 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1348 	kfree(pool->pool_mem);
1349 	kfree(pool);
1350 	osdev->mem_pool[pool_id] = NULL;
1351 }
1352 qdf_export_symbol(__qdf_mempool_destroy);
1353 
1354 /**
1355  * __qdf_mempool_alloc() - Allocate an element memory pool
1356  *
1357  * @osdev: platform device object
1358  * @Handle: to memory pool
1359  *
1360  * Return: Pointer to the allocated element or NULL if the pool is empty
1361  */
1362 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1363 {
1364 	void *buf = NULL;
1365 
1366 	if (!pool)
1367 		return NULL;
1368 
1369 	if (prealloc_disabled)
1370 		return  qdf_mem_malloc(pool->elem_size);
1371 
1372 	spin_lock_bh(&pool->lock);
1373 
1374 	buf = STAILQ_FIRST(&pool->free_list);
1375 	if (buf) {
1376 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1377 		pool->free_cnt--;
1378 	}
1379 
1380 	/* TBD: Update free count if debug is enabled */
1381 	spin_unlock_bh(&pool->lock);
1382 
1383 	return buf;
1384 }
1385 qdf_export_symbol(__qdf_mempool_alloc);
1386 
1387 /**
1388  * __qdf_mempool_free() - Free a memory pool element
1389  * @osdev: Platform device object
1390  * @pool: Handle to memory pool
1391  * @buf: Element to be freed
1392  *
1393  * Returns: none
1394  */
1395 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1396 {
1397 	if (!pool)
1398 		return;
1399 
1400 
1401 	if (prealloc_disabled)
1402 		return qdf_mem_free(buf);
1403 
1404 	spin_lock_bh(&pool->lock);
1405 	pool->free_cnt++;
1406 
1407 	STAILQ_INSERT_TAIL
1408 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1409 	spin_unlock_bh(&pool->lock);
1410 }
1411 qdf_export_symbol(__qdf_mempool_free);
1412 
1413 #ifdef CNSS_MEM_PRE_ALLOC
1414 static bool qdf_might_be_prealloc(void *ptr)
1415 {
1416 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1417 		return true;
1418 	else
1419 		return false;
1420 }
1421 
1422 /**
1423  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1424  * @size: the number of bytes to allocate
1425  *
1426  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1427  * a chunk of pre-allocated memory. If size if less than or equal to
1428  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1429  *
1430  * Return: NULL on failure, non-NULL on success
1431  */
1432 static void *qdf_mem_prealloc_get(size_t size)
1433 {
1434 	void *ptr;
1435 
1436 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1437 		return NULL;
1438 
1439 	ptr = wcnss_prealloc_get(size);
1440 	if (!ptr)
1441 		return NULL;
1442 
1443 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1444 		ptr += sizeof(void *);
1445 
1446 	memset(ptr, 0, size);
1447 
1448 	return ptr;
1449 }
1450 
1451 static inline bool qdf_mem_prealloc_put(void *ptr)
1452 {
1453 	return wcnss_prealloc_put(ptr);
1454 }
1455 #else
1456 static bool qdf_might_be_prealloc(void *ptr)
1457 {
1458 	return false;
1459 }
1460 
1461 static inline void *qdf_mem_prealloc_get(size_t size)
1462 {
1463 	return NULL;
1464 }
1465 
1466 static inline bool qdf_mem_prealloc_put(void *ptr)
1467 {
1468 	return false;
1469 }
1470 #endif /* CNSS_MEM_PRE_ALLOC */
1471 
1472 /* External Function implementation */
1473 #ifdef MEMORY_DEBUG
1474 /**
1475  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1476  *
1477  * Return: value of mem_debug_disabled qdf module argument
1478  */
1479 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1480 bool qdf_mem_debug_config_get(void)
1481 {
1482 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1483 	return false;
1484 }
1485 #else
1486 bool qdf_mem_debug_config_get(void)
1487 {
1488 	return mem_debug_disabled;
1489 }
1490 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1491 
1492 /**
1493  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
1494  * @str_value: value of the module param
1495  *
1496  * This function will se qdf module param mem_debug_disabled
1497  *
1498  * Return: QDF_STATUS_SUCCESS on Success
1499  */
1500 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1501 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1502 {
1503 	QDF_STATUS status;
1504 
1505 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1506 	return status;
1507 }
1508 #endif
1509 
1510 /**
1511  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1512  *
1513  * Return: none
1514  */
1515 static void qdf_mem_debug_init(void)
1516 {
1517 	int i;
1518 
1519 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1520 
1521 	if (is_initial_mem_debug_disabled)
1522 		return;
1523 
1524 	/* Initializing the list with maximum size of 60000 */
1525 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1526 		qdf_list_create(&qdf_mem_domains[i], 60000);
1527 	qdf_spinlock_create(&qdf_mem_list_lock);
1528 
1529 	/* dma */
1530 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1531 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1532 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1533 }
1534 
1535 static uint32_t
1536 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1537 			       qdf_list_t *mem_list)
1538 {
1539 	if (is_initial_mem_debug_disabled)
1540 		return 0;
1541 
1542 	if (qdf_list_empty(mem_list))
1543 		return 0;
1544 
1545 	qdf_err("Memory leaks detected in %s domain!",
1546 		qdf_debug_domain_name(domain));
1547 	qdf_mem_domain_print(mem_list,
1548 			     qdf_err_printer,
1549 			     NULL,
1550 			     0,
1551 			     qdf_mem_meta_table_print);
1552 
1553 	return mem_list->count;
1554 }
1555 
1556 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1557 {
1558 	uint32_t leak_count = 0;
1559 	int i;
1560 
1561 	if (is_initial_mem_debug_disabled)
1562 		return;
1563 
1564 	/* detect and print leaks */
1565 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1566 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1567 
1568 	if (leak_count)
1569 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1570 				   leak_count);
1571 }
1572 
1573 /**
1574  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1575  *
1576  * Return: none
1577  */
1578 static void qdf_mem_debug_exit(void)
1579 {
1580 	int i;
1581 
1582 	if (is_initial_mem_debug_disabled)
1583 		return;
1584 
1585 	/* mem */
1586 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1587 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1588 		qdf_list_destroy(qdf_mem_list_get(i));
1589 
1590 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1591 
1592 	/* dma */
1593 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1594 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1595 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1596 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1597 }
1598 
1599 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1600 			   void *caller, uint32_t flag)
1601 {
1602 	QDF_STATUS status;
1603 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1604 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1605 	struct qdf_mem_header *header;
1606 	void *ptr;
1607 	unsigned long start, duration;
1608 
1609 	if (is_initial_mem_debug_disabled)
1610 		return __qdf_mem_malloc(size, func, line);
1611 
1612 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1613 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1614 		return NULL;
1615 	}
1616 
1617 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1618 		size += sizeof(void *);
1619 
1620 	ptr = qdf_mem_prealloc_get(size);
1621 	if (ptr)
1622 		return ptr;
1623 
1624 	if (!flag)
1625 		flag = qdf_mem_malloc_flags();
1626 
1627 	start = qdf_mc_timer_get_system_time();
1628 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1629 	duration = qdf_mc_timer_get_system_time() - start;
1630 
1631 	if (duration > QDF_MEM_WARN_THRESHOLD)
1632 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1633 			 duration, size, func, line);
1634 
1635 	if (!header) {
1636 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1637 		return NULL;
1638 	}
1639 
1640 	qdf_mem_header_init(header, size, func, line, caller);
1641 	qdf_mem_trailer_init(header);
1642 	ptr = qdf_mem_get_ptr(header);
1643 
1644 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1645 	status = qdf_list_insert_front(mem_list, &header->node);
1646 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1647 	if (QDF_IS_STATUS_ERROR(status))
1648 		qdf_err("Failed to insert memory header; status %d", status);
1649 
1650 	qdf_mem_kmalloc_inc(ksize(header));
1651 
1652 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1653 		ptr += sizeof(void *);
1654 
1655 	return ptr;
1656 }
1657 qdf_export_symbol(qdf_mem_malloc_debug);
1658 
1659 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1660 				  uint32_t line, void *caller)
1661 {
1662 	QDF_STATUS status;
1663 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1664 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1665 	struct qdf_mem_header *header;
1666 	void *ptr;
1667 	unsigned long start, duration;
1668 
1669 	if (is_initial_mem_debug_disabled)
1670 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1671 
1672 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1673 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1674 		return NULL;
1675 	}
1676 
1677 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1678 		size += sizeof(void *);
1679 
1680 	ptr = qdf_mem_prealloc_get(size);
1681 	if (ptr)
1682 		return ptr;
1683 
1684 	start = qdf_mc_timer_get_system_time();
1685 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1686 	duration = qdf_mc_timer_get_system_time() - start;
1687 
1688 	if (duration > QDF_MEM_WARN_THRESHOLD)
1689 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1690 			 duration, size, func, line);
1691 
1692 	if (!header) {
1693 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1694 		return NULL;
1695 	}
1696 
1697 	qdf_mem_header_init(header, size, func, line, caller);
1698 	qdf_mem_trailer_init(header);
1699 	ptr = qdf_mem_get_ptr(header);
1700 
1701 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1702 	status = qdf_list_insert_front(mem_list, &header->node);
1703 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1704 	if (QDF_IS_STATUS_ERROR(status))
1705 		qdf_err("Failed to insert memory header; status %d", status);
1706 
1707 	qdf_mem_kmalloc_inc(ksize(header));
1708 
1709 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1710 		ptr += sizeof(void *);
1711 
1712 	return ptr;
1713 }
1714 
1715 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1716 
1717 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1718 				     uint32_t line)
1719 {
1720 	void *ptr;
1721 
1722 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1723 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1724 			     line);
1725 		return NULL;
1726 	}
1727 
1728 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1729 		size += sizeof(void *);
1730 
1731 	ptr = qdf_mem_prealloc_get(size);
1732 	if (ptr)
1733 		return ptr;
1734 
1735 	ptr = kzalloc(size, GFP_ATOMIC);
1736 	if (!ptr) {
1737 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1738 			      size, func, line);
1739 		return NULL;
1740 	}
1741 
1742 	qdf_mem_kmalloc_inc(ksize(ptr));
1743 
1744 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1745 		ptr += sizeof(void *);
1746 
1747 	return ptr;
1748 }
1749 
1750 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1751 
1752 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1753 {
1754 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1755 	struct qdf_mem_header *header;
1756 	enum qdf_mem_validation_bitmap error_bitmap;
1757 
1758 	if (is_initial_mem_debug_disabled) {
1759 		__qdf_mem_free(ptr);
1760 		return;
1761 	}
1762 
1763 	/* freeing a null pointer is valid */
1764 	if (qdf_unlikely(!ptr))
1765 		return;
1766 
1767 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1768 		ptr = ptr - sizeof(void *);
1769 
1770 	if (qdf_mem_prealloc_put(ptr))
1771 		return;
1772 
1773 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1774 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1775 				   ptr);
1776 
1777 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1778 
1779 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1780 	header = qdf_mem_get_header(ptr);
1781 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1782 	error_bitmap |= qdf_mem_trailer_validate(header);
1783 
1784 	if (!error_bitmap) {
1785 		header->freed = true;
1786 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1787 				     &header->node);
1788 	}
1789 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1790 
1791 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1792 				    func, line);
1793 
1794 	qdf_mem_kmalloc_dec(ksize(header));
1795 	kfree(header);
1796 }
1797 qdf_export_symbol(qdf_mem_free_debug);
1798 
1799 void qdf_mem_check_for_leaks(void)
1800 {
1801 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1802 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1803 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1804 	uint32_t leaks_count = 0;
1805 
1806 	if (is_initial_mem_debug_disabled)
1807 		return;
1808 
1809 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1810 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1811 
1812 	if (leaks_count)
1813 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1814 				   leaks_count);
1815 }
1816 
1817 /**
1818  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1819  * qdf_mem_multi_pages_alloc
1820  * @osdev: OS device handle pointer
1821  * @pages: Multi page information storage
1822  * @element_size: Each element size
1823  * @element_num: Total number of elements should be allocated
1824  * @memctxt: Memory context
1825  * @cacheable: Coherent memory or cacheable memory
1826  * @func: Caller of this allocator
1827  * @line: Line number of the caller
1828  * @caller: Return address of the caller
1829  *
1830  * This function will allocate large size of memory over multiple pages.
1831  * Large size of contiguous memory allocation will fail frequently, then
1832  * instead of allocate large memory by one shot, allocate through multiple, non
1833  * contiguous memory and combine pages when actual usage
1834  *
1835  * Return: None
1836  */
1837 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1838 				     struct qdf_mem_multi_page_t *pages,
1839 				     size_t element_size, uint32_t element_num,
1840 				     qdf_dma_context_t memctxt, bool cacheable,
1841 				     const char *func, uint32_t line,
1842 				     void *caller)
1843 {
1844 	uint16_t page_idx;
1845 	struct qdf_mem_dma_page_t *dma_pages;
1846 	void **cacheable_pages = NULL;
1847 	uint16_t i;
1848 
1849 	if (!pages->page_size)
1850 		pages->page_size = qdf_page_size;
1851 
1852 	pages->num_element_per_page = pages->page_size / element_size;
1853 	if (!pages->num_element_per_page) {
1854 		qdf_print("Invalid page %d or element size %d",
1855 			  (int)pages->page_size, (int)element_size);
1856 		goto out_fail;
1857 	}
1858 
1859 	pages->num_pages = element_num / pages->num_element_per_page;
1860 	if (element_num % pages->num_element_per_page)
1861 		pages->num_pages++;
1862 
1863 	if (cacheable) {
1864 		/* Pages information storage */
1865 		pages->cacheable_pages = qdf_mem_malloc_debug(
1866 			pages->num_pages * sizeof(pages->cacheable_pages),
1867 			func, line, caller, 0);
1868 		if (!pages->cacheable_pages)
1869 			goto out_fail;
1870 
1871 		cacheable_pages = pages->cacheable_pages;
1872 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1873 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1874 				pages->page_size, func, line, caller, 0);
1875 			if (!cacheable_pages[page_idx])
1876 				goto page_alloc_fail;
1877 		}
1878 		pages->dma_pages = NULL;
1879 	} else {
1880 		pages->dma_pages = qdf_mem_malloc_debug(
1881 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1882 			func, line, caller, 0);
1883 		if (!pages->dma_pages)
1884 			goto out_fail;
1885 
1886 		dma_pages = pages->dma_pages;
1887 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1888 			dma_pages->page_v_addr_start =
1889 				qdf_mem_alloc_consistent_debug(
1890 					osdev, osdev->dev, pages->page_size,
1891 					&dma_pages->page_p_addr,
1892 					func, line, caller);
1893 			if (!dma_pages->page_v_addr_start) {
1894 				qdf_print("dmaable page alloc fail pi %d",
1895 					  page_idx);
1896 				goto page_alloc_fail;
1897 			}
1898 			dma_pages->page_v_addr_end =
1899 				dma_pages->page_v_addr_start + pages->page_size;
1900 			dma_pages++;
1901 		}
1902 		pages->cacheable_pages = NULL;
1903 	}
1904 	return;
1905 
1906 page_alloc_fail:
1907 	if (cacheable) {
1908 		for (i = 0; i < page_idx; i++)
1909 			qdf_mem_free_debug(pages->cacheable_pages[i],
1910 					   func, line);
1911 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1912 	} else {
1913 		dma_pages = pages->dma_pages;
1914 		for (i = 0; i < page_idx; i++) {
1915 			qdf_mem_free_consistent_debug(
1916 				osdev, osdev->dev,
1917 				pages->page_size, dma_pages->page_v_addr_start,
1918 				dma_pages->page_p_addr, memctxt, func, line);
1919 			dma_pages++;
1920 		}
1921 		qdf_mem_free_debug(pages->dma_pages, func, line);
1922 	}
1923 
1924 out_fail:
1925 	pages->cacheable_pages = NULL;
1926 	pages->dma_pages = NULL;
1927 	pages->num_pages = 0;
1928 }
1929 
1930 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1931 
1932 /**
1933  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1934  * @osdev: OS device handle pointer
1935  * @pages: Multi page information storage
1936  * @memctxt: Memory context
1937  * @cacheable: Coherent memory or cacheable memory
1938  * @func: Caller of this allocator
1939  * @line: Line number of the caller
1940  *
1941  * This function will free large size of memory over multiple pages.
1942  *
1943  * Return: None
1944  */
1945 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1946 				    struct qdf_mem_multi_page_t *pages,
1947 				    qdf_dma_context_t memctxt, bool cacheable,
1948 				    const char *func, uint32_t line)
1949 {
1950 	unsigned int page_idx;
1951 	struct qdf_mem_dma_page_t *dma_pages;
1952 
1953 	if (!pages->page_size)
1954 		pages->page_size = qdf_page_size;
1955 
1956 	if (cacheable) {
1957 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1958 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1959 					   func, line);
1960 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1961 	} else {
1962 		dma_pages = pages->dma_pages;
1963 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1964 			qdf_mem_free_consistent_debug(
1965 				osdev, osdev->dev, pages->page_size,
1966 				dma_pages->page_v_addr_start,
1967 				dma_pages->page_p_addr, memctxt, func, line);
1968 			dma_pages++;
1969 		}
1970 		qdf_mem_free_debug(pages->dma_pages, func, line);
1971 	}
1972 
1973 	pages->cacheable_pages = NULL;
1974 	pages->dma_pages = NULL;
1975 	pages->num_pages = 0;
1976 }
1977 
1978 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1979 
1980 #else
1981 static void qdf_mem_debug_init(void) {}
1982 
1983 static void qdf_mem_debug_exit(void) {}
1984 
1985 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1986 {
1987 	void *ptr;
1988 
1989 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1990 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1991 			     line);
1992 		return NULL;
1993 	}
1994 
1995 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1996 		size += sizeof(void *);
1997 
1998 	ptr = qdf_mem_prealloc_get(size);
1999 	if (ptr)
2000 		return ptr;
2001 
2002 	ptr = kzalloc(size, GFP_ATOMIC);
2003 	if (!ptr) {
2004 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
2005 			      size, func, line);
2006 		return NULL;
2007 	}
2008 
2009 	qdf_mem_kmalloc_inc(ksize(ptr));
2010 
2011 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2012 		ptr += sizeof(void *);
2013 
2014 	return ptr;
2015 }
2016 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
2017 
2018 /**
2019  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
2020  * @osdev: OS device handle pointer
2021  * @pages: Multi page information storage
2022  * @element_size: Each element size
2023  * @element_num: Total number of elements should be allocated
2024  * @memctxt: Memory context
2025  * @cacheable: Coherent memory or cacheable memory
2026  *
2027  * This function will allocate large size of memory over multiple pages.
2028  * Large size of contiguous memory allocation will fail frequently, then
2029  * instead of allocate large memory by one shot, allocate through multiple, non
2030  * contiguous memory and combine pages when actual usage
2031  *
2032  * Return: None
2033  */
2034 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
2035 			       struct qdf_mem_multi_page_t *pages,
2036 			       size_t element_size, uint32_t element_num,
2037 			       qdf_dma_context_t memctxt, bool cacheable)
2038 {
2039 	uint16_t page_idx;
2040 	struct qdf_mem_dma_page_t *dma_pages;
2041 	void **cacheable_pages = NULL;
2042 	uint16_t i;
2043 
2044 	if (!pages->page_size)
2045 		pages->page_size = qdf_page_size;
2046 
2047 	pages->num_element_per_page = pages->page_size / element_size;
2048 	if (!pages->num_element_per_page) {
2049 		qdf_print("Invalid page %d or element size %d",
2050 			  (int)pages->page_size, (int)element_size);
2051 		goto out_fail;
2052 	}
2053 
2054 	pages->num_pages = element_num / pages->num_element_per_page;
2055 	if (element_num % pages->num_element_per_page)
2056 		pages->num_pages++;
2057 
2058 	if (cacheable) {
2059 		/* Pages information storage */
2060 		pages->cacheable_pages = qdf_mem_malloc(
2061 			pages->num_pages * sizeof(pages->cacheable_pages));
2062 		if (!pages->cacheable_pages)
2063 			goto out_fail;
2064 
2065 		cacheable_pages = pages->cacheable_pages;
2066 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2067 			cacheable_pages[page_idx] =
2068 				qdf_mem_malloc(pages->page_size);
2069 			if (!cacheable_pages[page_idx])
2070 				goto page_alloc_fail;
2071 		}
2072 		pages->dma_pages = NULL;
2073 	} else {
2074 		pages->dma_pages = qdf_mem_malloc(
2075 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
2076 		if (!pages->dma_pages)
2077 			goto out_fail;
2078 
2079 		dma_pages = pages->dma_pages;
2080 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2081 			dma_pages->page_v_addr_start =
2082 				qdf_mem_alloc_consistent(osdev, osdev->dev,
2083 					 pages->page_size,
2084 					&dma_pages->page_p_addr);
2085 			if (!dma_pages->page_v_addr_start) {
2086 				qdf_print("dmaable page alloc fail pi %d",
2087 					page_idx);
2088 				goto page_alloc_fail;
2089 			}
2090 			dma_pages->page_v_addr_end =
2091 				dma_pages->page_v_addr_start + pages->page_size;
2092 			dma_pages++;
2093 		}
2094 		pages->cacheable_pages = NULL;
2095 	}
2096 	return;
2097 
2098 page_alloc_fail:
2099 	if (cacheable) {
2100 		for (i = 0; i < page_idx; i++)
2101 			qdf_mem_free(pages->cacheable_pages[i]);
2102 		qdf_mem_free(pages->cacheable_pages);
2103 	} else {
2104 		dma_pages = pages->dma_pages;
2105 		for (i = 0; i < page_idx; i++) {
2106 			qdf_mem_free_consistent(
2107 				osdev, osdev->dev, pages->page_size,
2108 				dma_pages->page_v_addr_start,
2109 				dma_pages->page_p_addr, memctxt);
2110 			dma_pages++;
2111 		}
2112 		qdf_mem_free(pages->dma_pages);
2113 	}
2114 
2115 out_fail:
2116 	pages->cacheable_pages = NULL;
2117 	pages->dma_pages = NULL;
2118 	pages->num_pages = 0;
2119 	return;
2120 }
2121 qdf_export_symbol(qdf_mem_multi_pages_alloc);
2122 
2123 /**
2124  * qdf_mem_multi_pages_free() - free large size of kernel memory
2125  * @osdev: OS device handle pointer
2126  * @pages: Multi page information storage
2127  * @memctxt: Memory context
2128  * @cacheable: Coherent memory or cacheable memory
2129  *
2130  * This function will free large size of memory over multiple pages.
2131  *
2132  * Return: None
2133  */
2134 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2135 			      struct qdf_mem_multi_page_t *pages,
2136 			      qdf_dma_context_t memctxt, bool cacheable)
2137 {
2138 	unsigned int page_idx;
2139 	struct qdf_mem_dma_page_t *dma_pages;
2140 
2141 	if (!pages->page_size)
2142 		pages->page_size = qdf_page_size;
2143 
2144 	if (cacheable) {
2145 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2146 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2147 		qdf_mem_free(pages->cacheable_pages);
2148 	} else {
2149 		dma_pages = pages->dma_pages;
2150 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2151 			qdf_mem_free_consistent(
2152 				osdev, osdev->dev, pages->page_size,
2153 				dma_pages->page_v_addr_start,
2154 				dma_pages->page_p_addr, memctxt);
2155 			dma_pages++;
2156 		}
2157 		qdf_mem_free(pages->dma_pages);
2158 	}
2159 
2160 	pages->cacheable_pages = NULL;
2161 	pages->dma_pages = NULL;
2162 	pages->num_pages = 0;
2163 	return;
2164 }
2165 qdf_export_symbol(qdf_mem_multi_pages_free);
2166 #endif
2167 
2168 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2169 			      bool cacheable)
2170 {
2171 	unsigned int page_idx;
2172 	struct qdf_mem_dma_page_t *dma_pages;
2173 
2174 	if (!pages->page_size)
2175 		pages->page_size = qdf_page_size;
2176 
2177 	if (cacheable) {
2178 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2179 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2180 				     pages->page_size);
2181 	} else {
2182 		dma_pages = pages->dma_pages;
2183 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2184 			qdf_mem_zero(dma_pages->page_v_addr_start,
2185 				     pages->page_size);
2186 			dma_pages++;
2187 		}
2188 	}
2189 }
2190 
2191 qdf_export_symbol(qdf_mem_multi_pages_zero);
2192 
2193 void __qdf_mem_free(void *ptr)
2194 {
2195 	if (!ptr)
2196 		return;
2197 
2198 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2199 		ptr = ptr - sizeof(void *);
2200 
2201 	if (qdf_might_be_prealloc(ptr)) {
2202 		if (qdf_mem_prealloc_put(ptr))
2203 			return;
2204 	}
2205 
2206 	qdf_mem_kmalloc_dec(ksize(ptr));
2207 
2208 	kfree(ptr);
2209 }
2210 
2211 qdf_export_symbol(__qdf_mem_free);
2212 
2213 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2214 {
2215 	void *ptr;
2216 
2217 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2218 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2219 			     line);
2220 		return NULL;
2221 	}
2222 
2223 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2224 		size += sizeof(void *);
2225 
2226 	ptr = qdf_mem_prealloc_get(size);
2227 	if (ptr)
2228 		return ptr;
2229 
2230 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2231 	if (!ptr)
2232 		return NULL;
2233 
2234 	qdf_mem_kmalloc_inc(ksize(ptr));
2235 
2236 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2237 		ptr += sizeof(void *);
2238 
2239 	return ptr;
2240 }
2241 
2242 qdf_export_symbol(__qdf_mem_malloc);
2243 
2244 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2245 void __qdf_untracked_mem_free(void *ptr)
2246 {
2247 	if (!ptr)
2248 		return;
2249 
2250 	kfree(ptr);
2251 }
2252 
2253 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2254 {
2255 	void *ptr;
2256 
2257 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2258 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2259 			     line);
2260 		return NULL;
2261 	}
2262 
2263 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2264 	if (!ptr)
2265 		return NULL;
2266 
2267 	return ptr;
2268 }
2269 #endif
2270 
2271 void *qdf_aligned_malloc_fl(uint32_t *size,
2272 			    void **vaddr_unaligned,
2273 				qdf_dma_addr_t *paddr_unaligned,
2274 				qdf_dma_addr_t *paddr_aligned,
2275 				uint32_t align,
2276 			    const char *func, uint32_t line)
2277 {
2278 	void *vaddr_aligned;
2279 	uint32_t align_alloc_size;
2280 
2281 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2282 			line);
2283 	if (!*vaddr_unaligned) {
2284 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2285 		return NULL;
2286 	}
2287 
2288 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2289 
2290 	/* Re-allocate additional bytes to align base address only if
2291 	 * above allocation returns unaligned address. Reason for
2292 	 * trying exact size allocation above is, OS tries to allocate
2293 	 * blocks of size power-of-2 pages and then free extra pages.
2294 	 * e.g., of a ring size of 1MB, the allocation below will
2295 	 * request 1MB plus 7 bytes for alignment, which will cause a
2296 	 * 2MB block allocation,and that is failing sometimes due to
2297 	 * memory fragmentation.
2298 	 */
2299 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2300 		align_alloc_size = *size + align - 1;
2301 
2302 		qdf_mem_free(*vaddr_unaligned);
2303 		*vaddr_unaligned = qdf_mem_malloc_fl(
2304 				(qdf_size_t)align_alloc_size, func, line);
2305 		if (!*vaddr_unaligned) {
2306 			qdf_warn("Failed to alloc %uB @ %s:%d",
2307 				 align_alloc_size, func, line);
2308 			return NULL;
2309 		}
2310 
2311 		*paddr_unaligned = qdf_mem_virt_to_phys(
2312 				*vaddr_unaligned);
2313 		*size = align_alloc_size;
2314 	}
2315 
2316 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2317 		((unsigned long)(*paddr_unaligned), align);
2318 
2319 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2320 			((unsigned long)(*paddr_aligned) -
2321 			 (unsigned long)(*paddr_unaligned)));
2322 
2323 	return vaddr_aligned;
2324 }
2325 
2326 qdf_export_symbol(qdf_aligned_malloc_fl);
2327 
2328 #ifdef DP_UMAC_HW_RESET_SUPPORT
2329 /**
2330  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
2331  * @ctxt: Context to be passed to the cb
2332  * @pages: Multi page information storage
2333  * @elem_size: Each element size
2334  * @elem_count: Total number of elements should be allocated
2335  * @cacheable: Coherent memory or cacheable memory
2336  * @cb: Callback to free the elements
2337  * @elem_list: elem list for delayed free
2338  *
2339  * Return: 0 on Succscc, or Error code
2340  */
2341 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2342 			       uint32_t elem_size, uint32_t elem_count,
2343 			       uint8_t cacheable, qdf_mem_release_cb cb,
2344 			       void *elem_list)
2345 {
2346 	uint16_t i, i_int;
2347 	void *page_info;
2348 	void *elem;
2349 	uint32_t num_link = 0;
2350 
2351 	for (i = 0; i < pages->num_pages; i++) {
2352 		if (cacheable)
2353 			page_info = pages->cacheable_pages[i];
2354 		else
2355 			page_info = pages->dma_pages[i].page_v_addr_start;
2356 
2357 		if (!page_info)
2358 			return -ENOMEM;
2359 
2360 		elem = page_info;
2361 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2362 			if (i_int == (pages->num_element_per_page - 1)) {
2363 				cb(ctxt, elem, elem_list);
2364 
2365 				if ((i + 1) == pages->num_pages)
2366 					break;
2367 				if (cacheable)
2368 					elem =
2369 					(void *)(pages->cacheable_pages[i + 1]);
2370 				else
2371 					elem = (void *)(pages->
2372 					dma_pages[i + 1].page_v_addr_start);
2373 
2374 				num_link++;
2375 
2376 				break;
2377 			}
2378 
2379 			cb(ctxt, elem, elem_list);
2380 			elem = ((char *)elem + elem_size);
2381 			num_link++;
2382 
2383 			/* Last link established exit */
2384 			if (num_link == (elem_count - 1))
2385 				break;
2386 		}
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2393 #endif
2394 
2395 /**
2396  * qdf_mem_multi_page_link() - Make links for multi page elements
2397  * @osdev: OS device handle pointer
2398  * @pages: Multi page information storage
2399  * @elem_size: Single element size
2400  * @elem_count: elements count should be linked
2401  * @cacheable: Coherent memory or cacheable memory
2402  *
2403  * This function will make links for multi page allocated structure
2404  *
2405  * Return: 0 success
2406  */
2407 int qdf_mem_multi_page_link(qdf_device_t osdev,
2408 		struct qdf_mem_multi_page_t *pages,
2409 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2410 {
2411 	uint16_t i, i_int;
2412 	void *page_info;
2413 	void **c_elem = NULL;
2414 	uint32_t num_link = 0;
2415 
2416 	for (i = 0; i < pages->num_pages; i++) {
2417 		if (cacheable)
2418 			page_info = pages->cacheable_pages[i];
2419 		else
2420 			page_info = pages->dma_pages[i].page_v_addr_start;
2421 
2422 		if (!page_info)
2423 			return -ENOMEM;
2424 
2425 		c_elem = (void **)page_info;
2426 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2427 			if (i_int == (pages->num_element_per_page - 1)) {
2428 				if ((i + 1) == pages->num_pages)
2429 					break;
2430 				if (cacheable)
2431 					*c_elem = pages->
2432 						cacheable_pages[i + 1];
2433 				else
2434 					*c_elem = pages->
2435 						dma_pages[i + 1].
2436 							page_v_addr_start;
2437 				num_link++;
2438 				break;
2439 			} else {
2440 				*c_elem =
2441 					(void *)(((char *)c_elem) + elem_size);
2442 			}
2443 			num_link++;
2444 			c_elem = (void **)*c_elem;
2445 
2446 			/* Last link established exit */
2447 			if (num_link == (elem_count - 1))
2448 				break;
2449 		}
2450 	}
2451 
2452 	if (c_elem)
2453 		*c_elem = NULL;
2454 
2455 	return 0;
2456 }
2457 qdf_export_symbol(qdf_mem_multi_page_link);
2458 
2459 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2460 {
2461 	/* special case where dst_addr or src_addr can be NULL */
2462 	if (!num_bytes)
2463 		return;
2464 
2465 	QDF_BUG(dst_addr);
2466 	QDF_BUG(src_addr);
2467 	if (!dst_addr || !src_addr)
2468 		return;
2469 
2470 	memcpy(dst_addr, src_addr, num_bytes);
2471 }
2472 qdf_export_symbol(qdf_mem_copy);
2473 
2474 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2475 {
2476 	qdf_shared_mem_t *shared_mem;
2477 	qdf_dma_addr_t dma_addr, paddr;
2478 	int ret;
2479 
2480 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2481 	if (!shared_mem)
2482 		return NULL;
2483 
2484 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2485 				size, qdf_mem_get_dma_addr_ptr(osdev,
2486 						&shared_mem->mem_info));
2487 	if (!shared_mem->vaddr) {
2488 		qdf_err("Unable to allocate DMA memory for shared resource");
2489 		qdf_mem_free(shared_mem);
2490 		return NULL;
2491 	}
2492 
2493 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2494 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2495 
2496 	qdf_mem_zero(shared_mem->vaddr, size);
2497 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2498 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2499 
2500 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2501 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2502 				      shared_mem->vaddr, dma_addr, size);
2503 	if (ret) {
2504 		qdf_err("Unable to get DMA sgtable");
2505 		qdf_mem_free_consistent(osdev, osdev->dev,
2506 					shared_mem->mem_info.size,
2507 					shared_mem->vaddr,
2508 					dma_addr,
2509 					qdf_get_dma_mem_context(shared_mem,
2510 								memctx));
2511 		qdf_mem_free(shared_mem);
2512 		return NULL;
2513 	}
2514 
2515 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2516 
2517 	return shared_mem;
2518 }
2519 
2520 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2521 
2522 /**
2523  * qdf_mem_copy_toio() - copy memory
2524  * @dst_addr: Pointer to destination memory location (to copy to)
2525  * @src_addr: Pointer to source memory location (to copy from)
2526  * @num_bytes: Number of bytes to copy.
2527  *
2528  * Return: none
2529  */
2530 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2531 {
2532 	if (0 == num_bytes) {
2533 		/* special case where dst_addr or src_addr can be NULL */
2534 		return;
2535 	}
2536 
2537 	if ((!dst_addr) || (!src_addr)) {
2538 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2539 			  "%s called with NULL parameter, source:%pK destination:%pK",
2540 			  __func__, src_addr, dst_addr);
2541 		QDF_ASSERT(0);
2542 		return;
2543 	}
2544 	memcpy_toio(dst_addr, src_addr, num_bytes);
2545 }
2546 
2547 qdf_export_symbol(qdf_mem_copy_toio);
2548 
2549 /**
2550  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2551  * @ptr: Pointer to memory that will be set
2552  * @value: Byte set in memory
2553  * @num_bytes: Number of bytes to be set
2554  *
2555  * Return: None
2556  */
2557 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2558 {
2559 	if (!ptr) {
2560 		qdf_print("%s called with NULL parameter ptr", __func__);
2561 		return;
2562 	}
2563 	memset_io(ptr, value, num_bytes);
2564 }
2565 
2566 qdf_export_symbol(qdf_mem_set_io);
2567 
2568 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2569 {
2570 	QDF_BUG(ptr);
2571 	if (!ptr)
2572 		return;
2573 
2574 	memset(ptr, value, num_bytes);
2575 }
2576 qdf_export_symbol(qdf_mem_set);
2577 
2578 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2579 {
2580 	/* special case where dst_addr or src_addr can be NULL */
2581 	if (!num_bytes)
2582 		return;
2583 
2584 	QDF_BUG(dst_addr);
2585 	QDF_BUG(src_addr);
2586 	if (!dst_addr || !src_addr)
2587 		return;
2588 
2589 	memmove(dst_addr, src_addr, num_bytes);
2590 }
2591 qdf_export_symbol(qdf_mem_move);
2592 
2593 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2594 {
2595 	QDF_BUG(left);
2596 	QDF_BUG(right);
2597 
2598 	return memcmp(left, right, size);
2599 }
2600 qdf_export_symbol(qdf_mem_cmp);
2601 
2602 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2603 /**
2604  * qdf_mem_dma_alloc() - allocates memory for dma
2605  * @osdev: OS device handle
2606  * @dev: Pointer to device handle
2607  * @size: Size to be allocated
2608  * @phy_addr: Physical address
2609  *
2610  * Return: pointer of allocated memory or null if memory alloc fails
2611  */
2612 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2613 				      qdf_size_t size,
2614 				      qdf_dma_addr_t *phy_addr)
2615 {
2616 	void *vaddr;
2617 
2618 	vaddr = qdf_mem_malloc(size);
2619 	*phy_addr = ((uintptr_t) vaddr);
2620 	/* using this type conversion to suppress "cast from pointer to integer
2621 	 * of different size" warning on some platforms
2622 	 */
2623 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2624 	return vaddr;
2625 }
2626 
2627 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2628 	!defined(QCA_WIFI_QCN9000)
2629 
2630 #define QCA8074_RAM_BASE 0x50000000
2631 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2632 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2633 			qdf_dma_addr_t *phy_addr)
2634 {
2635 	void *vaddr = NULL;
2636 	int i;
2637 
2638 	*phy_addr = 0;
2639 
2640 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2641 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2642 					   qdf_mem_malloc_flags());
2643 
2644 		if (!vaddr) {
2645 			qdf_err("%s failed , size: %zu!", __func__, size);
2646 			return NULL;
2647 		}
2648 
2649 		if (*phy_addr >= QCA8074_RAM_BASE)
2650 			return vaddr;
2651 
2652 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2653 	}
2654 
2655 	return NULL;
2656 }
2657 
2658 #else
2659 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2660 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2661 {
2662 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2663 }
2664 #endif
2665 
2666 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2667 static inline void
2668 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2669 {
2670 	qdf_mem_free(vaddr);
2671 }
2672 #else
2673 
2674 static inline void
2675 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2676 {
2677 	dma_free_coherent(dev, size, vaddr, paddr);
2678 }
2679 #endif
2680 
2681 #ifdef MEMORY_DEBUG
2682 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2683 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2684 				     const char *func, uint32_t line,
2685 				     void *caller)
2686 {
2687 	QDF_STATUS status;
2688 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2689 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2690 	struct qdf_mem_header *header;
2691 	void *vaddr;
2692 
2693 	if (is_initial_mem_debug_disabled)
2694 		return __qdf_mem_alloc_consistent(osdev, dev,
2695 						  size, paddr,
2696 						  func, line);
2697 
2698 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2699 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2700 		return NULL;
2701 	}
2702 
2703 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2704 				   paddr);
2705 
2706 	if (!vaddr) {
2707 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2708 		return NULL;
2709 	}
2710 
2711 	header = qdf_mem_dma_get_header(vaddr, size);
2712 	/* For DMA buffers we only add trailers, this function will init
2713 	 * the header structure at the tail
2714 	 * Prefix the header into DMA buffer causes SMMU faults, so
2715 	 * do not prefix header into the DMA buffers
2716 	 */
2717 	qdf_mem_header_init(header, size, func, line, caller);
2718 
2719 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2720 	status = qdf_list_insert_front(mem_list, &header->node);
2721 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2722 	if (QDF_IS_STATUS_ERROR(status))
2723 		qdf_err("Failed to insert memory header; status %d", status);
2724 
2725 	qdf_mem_dma_inc(size);
2726 
2727 	return vaddr;
2728 }
2729 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2730 
2731 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2732 				   qdf_size_t size, void *vaddr,
2733 				   qdf_dma_addr_t paddr,
2734 				   qdf_dma_context_t memctx,
2735 				   const char *func, uint32_t line)
2736 {
2737 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2738 	struct qdf_mem_header *header;
2739 	enum qdf_mem_validation_bitmap error_bitmap;
2740 
2741 	if (is_initial_mem_debug_disabled) {
2742 		__qdf_mem_free_consistent(
2743 					  osdev, dev,
2744 					  size, vaddr,
2745 					  paddr, memctx);
2746 		return;
2747 	}
2748 
2749 	/* freeing a null pointer is valid */
2750 	if (qdf_unlikely(!vaddr))
2751 		return;
2752 
2753 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2754 
2755 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2756 	/* For DMA buffers we only add trailers, this function will retrieve
2757 	 * the header structure at the tail
2758 	 * Prefix the header into DMA buffer causes SMMU faults, so
2759 	 * do not prefix header into the DMA buffers
2760 	 */
2761 	header = qdf_mem_dma_get_header(vaddr, size);
2762 	error_bitmap = qdf_mem_header_validate(header, domain);
2763 	if (!error_bitmap) {
2764 		header->freed = true;
2765 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2766 				     &header->node);
2767 	}
2768 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2769 
2770 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2771 
2772 	qdf_mem_dma_dec(header->size);
2773 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2774 }
2775 qdf_export_symbol(qdf_mem_free_consistent_debug);
2776 #endif /* MEMORY_DEBUG */
2777 
2778 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2779 			       qdf_size_t size, void *vaddr,
2780 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2781 {
2782 	qdf_mem_dma_dec(size);
2783 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2784 }
2785 
2786 qdf_export_symbol(__qdf_mem_free_consistent);
2787 
2788 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2789 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2790 				 const char *func, uint32_t line)
2791 {
2792 	void *vaddr;
2793 
2794 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2795 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2796 			     size, func, line);
2797 		return NULL;
2798 	}
2799 
2800 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2801 
2802 	if (vaddr)
2803 		qdf_mem_dma_inc(size);
2804 
2805 	return vaddr;
2806 }
2807 
2808 qdf_export_symbol(__qdf_mem_alloc_consistent);
2809 
2810 void *qdf_aligned_mem_alloc_consistent_fl(
2811 	qdf_device_t osdev, uint32_t *size,
2812 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2813 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2814 	const char *func, uint32_t line)
2815 {
2816 	void *vaddr_aligned;
2817 	uint32_t align_alloc_size;
2818 
2819 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2820 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2821 	if (!*vaddr_unaligned) {
2822 		qdf_warn("Failed to alloc %uB @ %s:%d",
2823 			 *size, func, line);
2824 		return NULL;
2825 	}
2826 
2827 	/* Re-allocate additional bytes to align base address only if
2828 	 * above allocation returns unaligned address. Reason for
2829 	 * trying exact size allocation above is, OS tries to allocate
2830 	 * blocks of size power-of-2 pages and then free extra pages.
2831 	 * e.g., of a ring size of 1MB, the allocation below will
2832 	 * request 1MB plus 7 bytes for alignment, which will cause a
2833 	 * 2MB block allocation,and that is failing sometimes due to
2834 	 * memory fragmentation.
2835 	 */
2836 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2837 		align_alloc_size = *size + align - 1;
2838 
2839 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2840 					*vaddr_unaligned,
2841 					*paddr_unaligned, 0);
2842 
2843 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2844 				osdev, osdev->dev, align_alloc_size,
2845 				paddr_unaligned);
2846 		if (!*vaddr_unaligned) {
2847 			qdf_warn("Failed to alloc %uB @ %s:%d",
2848 				 align_alloc_size, func, line);
2849 			return NULL;
2850 		}
2851 
2852 		*size = align_alloc_size;
2853 	}
2854 
2855 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2856 			(unsigned long)(*paddr_unaligned), align);
2857 
2858 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2859 				 ((unsigned long)(*paddr_aligned) -
2860 				  (unsigned long)(*paddr_unaligned)));
2861 
2862 	return vaddr_aligned;
2863 }
2864 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2865 
2866 /**
2867  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2868  * @osdev: OS device handle
2869  * @bus_addr: dma address to give to the device
2870  * @size: Size of the memory block
2871  * @direction: direction data will be DMAed
2872  *
2873  * Assign memory to the remote device.
2874  * The cache lines are flushed to ram or invalidated as needed.
2875  *
2876  * Return: none
2877  */
2878 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2879 					qdf_dma_addr_t bus_addr,
2880 					qdf_size_t size,
2881 					enum dma_data_direction direction)
2882 {
2883 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2884 }
2885 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2886 
2887 /**
2888  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2889  * @osdev: OS device handle
2890  * @bus_addr: dma address to give to the cpu
2891  * @size: Size of the memory block
2892  * @direction: direction data will be DMAed
2893  *
2894  * Assign memory to the CPU.
2895  *
2896  * Return: none
2897  */
2898 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2899 				     qdf_dma_addr_t bus_addr,
2900 				     qdf_size_t size,
2901 				     enum dma_data_direction direction)
2902 {
2903 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2904 }
2905 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2906 
2907 void qdf_mem_init(void)
2908 {
2909 	qdf_mem_debug_init();
2910 	qdf_net_buf_debug_init();
2911 	qdf_frag_debug_init();
2912 	qdf_mem_debugfs_init();
2913 	qdf_mem_debug_debugfs_init();
2914 }
2915 qdf_export_symbol(qdf_mem_init);
2916 
2917 void qdf_mem_exit(void)
2918 {
2919 	qdf_mem_debug_debugfs_exit();
2920 	qdf_mem_debugfs_exit();
2921 	qdf_frag_debug_exit();
2922 	qdf_net_buf_debug_exit();
2923 	qdf_mem_debug_exit();
2924 }
2925 qdf_export_symbol(qdf_mem_exit);
2926 
2927 /**
2928  * qdf_ether_addr_copy() - copy an Ethernet address
2929  *
2930  * @dst_addr: A six-byte array Ethernet address destination
2931  * @src_addr: A six-byte array Ethernet address source
2932  *
2933  * Please note: dst & src must both be aligned to u16.
2934  *
2935  * Return: none
2936  */
2937 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2938 {
2939 	if ((!dst_addr) || (!src_addr)) {
2940 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2941 			  "%s called with NULL parameter, source:%pK destination:%pK",
2942 			  __func__, src_addr, dst_addr);
2943 		QDF_ASSERT(0);
2944 		return;
2945 	}
2946 	ether_addr_copy(dst_addr, src_addr);
2947 }
2948 qdf_export_symbol(qdf_ether_addr_copy);
2949 
2950 int32_t qdf_dma_mem_stats_read(void)
2951 {
2952 	return qdf_atomic_read(&qdf_mem_stat.dma);
2953 }
2954 
2955 qdf_export_symbol(qdf_dma_mem_stats_read);
2956 
2957 int32_t qdf_heap_mem_stats_read(void)
2958 {
2959 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2960 }
2961 
2962 qdf_export_symbol(qdf_heap_mem_stats_read);
2963 
2964 int32_t qdf_skb_mem_stats_read(void)
2965 {
2966 	return qdf_atomic_read(&qdf_mem_stat.skb);
2967 }
2968 
2969 qdf_export_symbol(qdf_skb_mem_stats_read);
2970 
2971 int32_t qdf_skb_total_mem_stats_read(void)
2972 {
2973 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2974 }
2975 
2976 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2977 
2978 int32_t qdf_skb_max_mem_stats_read(void)
2979 {
2980 	return qdf_mem_stat.skb_mem_max;
2981 }
2982 
2983 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2984 
2985 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2986 {
2987 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2988 }
2989 
2990 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2991 
2992 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2993 {
2994 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2995 }
2996 
2997 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2998 
2999 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
3000 {
3001 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
3002 }
3003 
3004 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
3005 
3006 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
3007 {
3008 	return qdf_mem_stat.dp_tx_skb_count_max;
3009 }
3010 
3011 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
3012 
3013 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
3014 {
3015 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
3016 }
3017 
3018 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
3019 
3020 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
3021 {
3022 	return qdf_mem_stat.dp_rx_skb_count_max;
3023 }
3024 
3025 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
3026 
3027 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
3028 {
3029 	return qdf_mem_stat.dp_tx_skb_mem_max;
3030 }
3031 
3032 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
3033 
3034 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
3035 {
3036 	return qdf_mem_stat.dp_rx_skb_mem_max;
3037 }
3038 
3039 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
3040 
3041 int32_t qdf_mem_tx_desc_cnt_read(void)
3042 {
3043 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
3044 }
3045 
3046 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
3047 
3048 int32_t qdf_mem_tx_desc_max_read(void)
3049 {
3050 	return qdf_mem_stat.tx_descs_max;
3051 }
3052 
3053 qdf_export_symbol(qdf_mem_tx_desc_max_read);
3054 
3055 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
3056 				int32_t tx_descs_max)
3057 {
3058 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
3059 	qdf_mem_stat.tx_descs_max = tx_descs_max;
3060 }
3061 
3062 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
3063 
3064 void qdf_mem_stats_init(void)
3065 {
3066 	qdf_mem_stat.skb_mem_max = 0;
3067 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
3068 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
3069 	qdf_mem_stat.dp_tx_skb_count_max = 0;
3070 	qdf_mem_stat.dp_rx_skb_count_max = 0;
3071 	qdf_mem_stat.tx_descs_max = 0;
3072 }
3073 
3074 qdf_export_symbol(qdf_mem_stats_init);
3075 
3076 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
3077 {
3078 	void *ptr;
3079 
3080 	if (!size) {
3081 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
3082 		return NULL;
3083 	}
3084 
3085 	ptr = vzalloc(size);
3086 
3087 	return ptr;
3088 }
3089 
3090 qdf_export_symbol(__qdf_mem_valloc);
3091 
3092 void __qdf_mem_vfree(void *ptr)
3093 {
3094 	if (qdf_unlikely(!ptr))
3095 		return;
3096 
3097 	vfree(ptr);
3098 }
3099 
3100 qdf_export_symbol(__qdf_mem_vfree);
3101 
3102 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
3103 int
3104 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
3105 			  enum qdf_iommu_attr attr, void *data)
3106 {
3107 	return __qdf_iommu_domain_get_attr(domain, attr, data);
3108 }
3109 
3110 qdf_export_symbol(qdf_iommu_domain_get_attr);
3111 #endif
3112 
3113 #ifdef ENHANCED_OS_ABSTRACTION
3114 void qdf_update_mem_map_table(qdf_device_t osdev,
3115 			      qdf_mem_info_t *mem_info,
3116 			      qdf_dma_addr_t dma_addr,
3117 			      uint32_t mem_size)
3118 {
3119 	if (!mem_info) {
3120 		qdf_nofl_err("%s: NULL mem_info", __func__);
3121 		return;
3122 	}
3123 
3124 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
3125 }
3126 
3127 qdf_export_symbol(qdf_update_mem_map_table);
3128 
3129 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
3130 					  qdf_dma_addr_t dma_addr)
3131 {
3132 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
3133 }
3134 
3135 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
3136 #endif
3137 
3138 #ifdef QCA_KMEM_CACHE_SUPPORT
3139 qdf_kmem_cache_t
3140 __qdf_kmem_cache_create(const char *cache_name,
3141 			qdf_size_t size)
3142 {
3143 	struct kmem_cache *cache;
3144 
3145 	cache = kmem_cache_create(cache_name, size,
3146 				  0, 0, NULL);
3147 
3148 	if (!cache)
3149 		return NULL;
3150 
3151 	return cache;
3152 }
3153 qdf_export_symbol(__qdf_kmem_cache_create);
3154 
3155 void
3156 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3157 {
3158 	kmem_cache_destroy(cache);
3159 }
3160 
3161 qdf_export_symbol(__qdf_kmem_cache_destroy);
3162 
3163 void*
3164 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3165 {
3166 	int flags = GFP_KERNEL;
3167 
3168 	if (in_interrupt() || irqs_disabled() || in_atomic())
3169 		flags = GFP_ATOMIC;
3170 
3171 	return kmem_cache_alloc(cache, flags);
3172 }
3173 
3174 qdf_export_symbol(__qdf_kmem_cache_alloc);
3175 
3176 void
3177 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3178 
3179 {
3180 	kmem_cache_free(cache, node);
3181 }
3182 
3183 qdf_export_symbol(__qdf_kmem_cache_free);
3184 #else
3185 qdf_kmem_cache_t
3186 __qdf_kmem_cache_create(const char *cache_name,
3187 			qdf_size_t size)
3188 {
3189 	return NULL;
3190 }
3191 
3192 void
3193 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3194 {
3195 }
3196 
3197 void *
3198 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3199 {
3200 	return NULL;
3201 }
3202 
3203 void
3204 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3205 {
3206 }
3207 #endif
3208