xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * This file provides OS dependent memory management APIs
23  */
24 
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38 
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46 
47 /* cnss prealloc maintains various prealloc pools of 8Kb, 16Kb, 32Kb and so
48  * on and allocates buffer from the pool for wlan driver. When wlan driver
49  * requests to free the memory buffer then cnss prealloc derives slab_cache
50  * from virtual memory via page struct to identify prealloc pool id to put
51  * back memory buffer into the pool. Kernel 5.17 removed slab_cache from page
52  * struct. So add headroom to store cache pointer at the beginning of
53  * allocated memory buffer to use it later in identifying prealloc pool id.
54  */
55 #if defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE)
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
57 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
58 {
59 	return true;
60 }
61 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
62 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
63 {
64 	return false;
65 }
66 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
67 #else /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
68 static inline bool add_headroom_for_cnss_prealloc_cache_ptr(void)
69 {
70 	return false;
71 }
72 #endif /* defined(CNSS_MEM_PRE_ALLOC) && defined(CONFIG_CNSS_OUT_OF_TREE) */
73 
74 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
75 static bool mem_debug_disabled;
76 qdf_declare_param(mem_debug_disabled, bool);
77 #endif
78 
79 #ifdef MEMORY_DEBUG
80 static bool is_initial_mem_debug_disabled;
81 #endif
82 
83 /* Preprocessor Definitions and Constants */
84 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
85 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
86 #define QDF_DEBUG_STRING_SIZE 512
87 
88 /**
89  * struct __qdf_mem_stat - qdf memory statistics
90  * @kmalloc: total kmalloc allocations
91  * @dma: total dma allocations
92  * @skb: total skb allocations
93  * @skb_total: total skb allocations in host driver
94  * @dp_tx_skb: total Tx skb allocations in datapath
95  * @dp_rx_skb: total Rx skb allocations in datapath
96  * @skb_mem_max: high watermark for skb allocations
97  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
98  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
99  * @dp_tx_skb_count: DP Tx buffer count
100  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
101  * @dp_rx_skb_count: DP Rx buffer count
102  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
103  * @tx_descs_outstanding: Current pending Tx descs count
104  * @tx_descs_max: High watermark for pending Tx descs count
105  */
106 static struct __qdf_mem_stat {
107 	qdf_atomic_t kmalloc;
108 	qdf_atomic_t dma;
109 	qdf_atomic_t skb;
110 	qdf_atomic_t skb_total;
111 	qdf_atomic_t dp_tx_skb;
112 	qdf_atomic_t dp_rx_skb;
113 	int32_t skb_mem_max;
114 	int32_t dp_tx_skb_mem_max;
115 	int32_t dp_rx_skb_mem_max;
116 	qdf_atomic_t dp_tx_skb_count;
117 	int32_t dp_tx_skb_count_max;
118 	qdf_atomic_t dp_rx_skb_count;
119 	int32_t dp_rx_skb_count_max;
120 	qdf_atomic_t tx_descs_outstanding;
121 	int32_t tx_descs_max;
122 } qdf_mem_stat;
123 
124 #ifdef MEMORY_DEBUG
125 #include "qdf_debug_domain.h"
126 
127 enum list_type {
128 	LIST_TYPE_MEM = 0,
129 	LIST_TYPE_DMA = 1,
130 	LIST_TYPE_NBUF = 2,
131 	LIST_TYPE_MAX,
132 };
133 
134 /**
135  * major_alloc_priv: private data registered to debugfs entry created to list
136  *                   the list major allocations
137  * @type:            type of the list to be parsed
138  * @threshold:       configured by user by overwriting the respective debugfs
139  *                   sys entry. This is to list the functions which requested
140  *                   memory/dma allocations more than threshold number of times.
141  */
142 struct major_alloc_priv {
143 	enum list_type type;
144 	uint32_t threshold;
145 };
146 
147 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
148 static qdf_spinlock_t qdf_mem_list_lock;
149 
150 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
151 static qdf_spinlock_t qdf_mem_dma_list_lock;
152 
153 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
154 {
155 	return &qdf_mem_domains[domain];
156 }
157 
158 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
159 {
160 	return &qdf_mem_dma_domains[domain];
161 }
162 
163 /**
164  * struct qdf_mem_header - memory object to dubug
165  * @node: node to the list
166  * @domain: the active memory domain at time of allocation
167  * @freed: flag set during free, used to detect double frees
168  *	Use uint8_t so we can detect corruption
169  * @func: name of the function the allocation was made from
170  * @line: line number of the file the allocation was made from
171  * @size: size of the allocation in bytes
172  * @caller: Caller of the function for which memory is allocated
173  * @header: a known value, used to detect out-of-bounds access
174  * @time: timestamp at which allocation was made
175  */
176 struct qdf_mem_header {
177 	qdf_list_node_t node;
178 	enum qdf_debug_domain domain;
179 	uint8_t freed;
180 	char func[QDF_MEM_FUNC_NAME_SIZE];
181 	uint32_t line;
182 	uint32_t size;
183 	void *caller;
184 	uint64_t header;
185 	uint64_t time;
186 };
187 
188 /* align the qdf_mem_header to 8 bytes */
189 #define QDF_DMA_MEM_HEADER_ALIGN 8
190 
191 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
192 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
193 
194 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
195 {
196 	return (struct qdf_mem_header *)ptr - 1;
197 }
198 
199 /* make sure the header pointer is 8bytes aligned */
200 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
201 							    qdf_size_t size)
202 {
203 	return (struct qdf_mem_header *)
204 				qdf_roundup((size_t)((uint8_t *)ptr + size),
205 					    QDF_DMA_MEM_HEADER_ALIGN);
206 }
207 
208 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
209 {
210 	return (uint64_t *)((void *)(header + 1) + header->size);
211 }
212 
213 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
214 {
215 	return (void *)(header + 1);
216 }
217 
218 /* number of bytes needed for the qdf memory debug information */
219 #define QDF_MEM_DEBUG_SIZE \
220 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
221 
222 /* number of bytes needed for the qdf dma memory debug information */
223 #define QDF_DMA_MEM_DEBUG_SIZE \
224 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
225 
226 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
227 {
228 	QDF_BUG(header);
229 	if (!header)
230 		return;
231 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
232 }
233 
234 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
235 				const char *func, uint32_t line, void *caller)
236 {
237 	QDF_BUG(header);
238 	if (!header)
239 		return;
240 
241 	header->domain = qdf_debug_domain_get();
242 	header->freed = false;
243 
244 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
245 
246 	header->line = line;
247 	header->size = size;
248 	header->caller = caller;
249 	header->header = WLAN_MEM_HEADER;
250 	header->time = qdf_get_log_timestamp();
251 }
252 
253 enum qdf_mem_validation_bitmap {
254 	QDF_MEM_BAD_HEADER = 1 << 0,
255 	QDF_MEM_BAD_TRAILER = 1 << 1,
256 	QDF_MEM_BAD_SIZE = 1 << 2,
257 	QDF_MEM_DOUBLE_FREE = 1 << 3,
258 	QDF_MEM_BAD_FREED = 1 << 4,
259 	QDF_MEM_BAD_NODE = 1 << 5,
260 	QDF_MEM_BAD_DOMAIN = 1 << 6,
261 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
262 };
263 
264 static enum qdf_mem_validation_bitmap
265 qdf_mem_trailer_validate(struct qdf_mem_header *header)
266 {
267 	enum qdf_mem_validation_bitmap error_bitmap = 0;
268 
269 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
270 		error_bitmap |= QDF_MEM_BAD_TRAILER;
271 	return error_bitmap;
272 }
273 
274 static enum qdf_mem_validation_bitmap
275 qdf_mem_header_validate(struct qdf_mem_header *header,
276 			enum qdf_debug_domain domain)
277 {
278 	enum qdf_mem_validation_bitmap error_bitmap = 0;
279 
280 	if (header->header != WLAN_MEM_HEADER)
281 		error_bitmap |= QDF_MEM_BAD_HEADER;
282 
283 	if (header->size > QDF_MEM_MAX_MALLOC)
284 		error_bitmap |= QDF_MEM_BAD_SIZE;
285 
286 	if (header->freed == true)
287 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
288 	else if (header->freed)
289 		error_bitmap |= QDF_MEM_BAD_FREED;
290 
291 	if (!qdf_list_node_in_any_list(&header->node))
292 		error_bitmap |= QDF_MEM_BAD_NODE;
293 
294 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
295 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
296 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
297 	else if (header->domain != domain)
298 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
299 
300 	return error_bitmap;
301 }
302 
303 static void
304 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
305 			    enum qdf_debug_domain current_domain,
306 			    enum qdf_mem_validation_bitmap error_bitmap,
307 			    const char *func,
308 			    uint32_t line)
309 {
310 	if (!error_bitmap)
311 		return;
312 
313 	if (error_bitmap & QDF_MEM_BAD_HEADER)
314 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
315 			header->header, WLAN_MEM_HEADER);
316 
317 	if (error_bitmap & QDF_MEM_BAD_SIZE)
318 		qdf_err("Corrupted memory size %u (expected < %d)",
319 			header->size, QDF_MEM_MAX_MALLOC);
320 
321 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
322 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
323 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
324 
325 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
326 		qdf_err("Memory has previously been freed");
327 
328 	if (error_bitmap & QDF_MEM_BAD_FREED)
329 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
330 
331 	if (error_bitmap & QDF_MEM_BAD_NODE)
332 		qdf_err("Corrupted memory header node or double free");
333 
334 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
335 		qdf_err("Corrupted memory domain 0x%x", header->domain);
336 
337 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
338 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
339 			qdf_debug_domain_name(header->domain), header->domain,
340 			qdf_debug_domain_name(current_domain), current_domain);
341 
342 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
343 }
344 
345 /**
346  * struct __qdf_mem_info - memory statistics
347  * @func: the function which allocated memory
348  * @line: the line at which allocation happened
349  * @size: the size of allocation
350  * @caller: Address of the caller function
351  * @count: how many allocations of same type
352  * @time: timestamp at which allocation happened
353  */
354 struct __qdf_mem_info {
355 	char func[QDF_MEM_FUNC_NAME_SIZE];
356 	uint32_t line;
357 	uint32_t size;
358 	void *caller;
359 	uint32_t count;
360 	uint64_t time;
361 };
362 
363 /*
364  * The table depth defines the de-duplication proximity scope.
365  * A deeper table takes more time, so choose any optimum value.
366  */
367 #define QDF_MEM_STAT_TABLE_SIZE 8
368 
369 /**
370  * qdf_mem_debug_print_header() - memory debug header print logic
371  * @print: the print adapter function
372  * @print_priv: the private data to be consumed by @print
373  * @threshold: the threshold value set by user to list top allocations
374  *
375  * Return: None
376  */
377 static void qdf_mem_debug_print_header(qdf_abstract_print print,
378 				       void *print_priv,
379 				       uint32_t threshold)
380 {
381 	if (threshold)
382 		print(print_priv, "APIs requested allocations >= %u no of time",
383 		      threshold);
384 	print(print_priv,
385 	      "--------------------------------------------------------------");
386 	print(print_priv,
387 	      " count    size     total    filename     caller    timestamp");
388 	print(print_priv,
389 	      "--------------------------------------------------------------");
390 }
391 
392 /**
393  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
394  * @table: the memory metadata table to insert into
395  * @meta: the memory metadata to insert
396  *
397  * Return: true if the table is full after inserting, false otherwise
398  */
399 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
400 				      struct qdf_mem_header *meta)
401 {
402 	int i;
403 
404 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
405 		if (!table[i].count) {
406 			qdf_str_lcopy(table[i].func, meta->func,
407 				      QDF_MEM_FUNC_NAME_SIZE);
408 			table[i].line = meta->line;
409 			table[i].size = meta->size;
410 			table[i].count = 1;
411 			table[i].caller = meta->caller;
412 			table[i].time = meta->time;
413 			break;
414 		}
415 
416 		if (qdf_str_eq(table[i].func, meta->func) &&
417 		    table[i].line == meta->line &&
418 		    table[i].size == meta->size &&
419 		    table[i].caller == meta->caller) {
420 			table[i].count++;
421 			break;
422 		}
423 	}
424 
425 	/* return true if the table is now full */
426 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
427 }
428 
429 /**
430  * qdf_mem_domain_print() - output agnostic memory domain print logic
431  * @domain: the memory domain to print
432  * @print: the print adapter function
433  * @print_priv: the private data to be consumed by @print
434  * @threshold: the threshold value set by uset to list top allocations
435  * @mem_print: pointer to function which prints the memory allocation data
436  *
437  * Return: None
438  */
439 static void qdf_mem_domain_print(qdf_list_t *domain,
440 				 qdf_abstract_print print,
441 				 void *print_priv,
442 				 uint32_t threshold,
443 				 void (*mem_print)(struct __qdf_mem_info *,
444 						   qdf_abstract_print,
445 						   void *, uint32_t))
446 {
447 	QDF_STATUS status;
448 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
449 	qdf_list_node_t *node;
450 
451 	qdf_mem_zero(table, sizeof(table));
452 	qdf_mem_debug_print_header(print, print_priv, threshold);
453 
454 	/* hold lock while inserting to avoid use-after free of the metadata */
455 	qdf_spin_lock(&qdf_mem_list_lock);
456 	status = qdf_list_peek_front(domain, &node);
457 	while (QDF_IS_STATUS_SUCCESS(status)) {
458 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
459 		bool is_full = qdf_mem_meta_table_insert(table, meta);
460 
461 		qdf_spin_unlock(&qdf_mem_list_lock);
462 
463 		if (is_full) {
464 			(*mem_print)(table, print, print_priv, threshold);
465 			qdf_mem_zero(table, sizeof(table));
466 		}
467 
468 		qdf_spin_lock(&qdf_mem_list_lock);
469 		status = qdf_list_peek_next(domain, node, &node);
470 	}
471 	qdf_spin_unlock(&qdf_mem_list_lock);
472 
473 	(*mem_print)(table, print, print_priv, threshold);
474 }
475 
476 /**
477  * qdf_mem_meta_table_print() - memory metadata table print logic
478  * @table: the memory metadata table to print
479  * @print: the print adapter function
480  * @print_priv: the private data to be consumed by @print
481  * @threshold: the threshold value set by user to list top allocations
482  *
483  * Return: None
484  */
485 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
486 				     qdf_abstract_print print,
487 				     void *print_priv,
488 				     uint32_t threshold)
489 {
490 	int i;
491 	char debug_str[QDF_DEBUG_STRING_SIZE];
492 	size_t len = 0;
493 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
494 
495 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
496 			     "%s", debug_prefix);
497 
498 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
499 		if (!table[i].count)
500 			break;
501 
502 		print(print_priv,
503 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
504 		      table[i].count,
505 		      table[i].size,
506 		      table[i].count * table[i].size,
507 		      table[i].func,
508 		      table[i].line, table[i].caller,
509 		      table[i].time);
510 		len += qdf_scnprintf(debug_str + len,
511 				     sizeof(debug_str) - len,
512 				     " @ %s:%u %pS",
513 				     table[i].func,
514 				     table[i].line,
515 				     table[i].caller);
516 	}
517 	print(print_priv, "%s", debug_str);
518 }
519 
520 static int qdf_err_printer(void *priv, const char *fmt, ...)
521 {
522 	va_list args;
523 
524 	va_start(args, fmt);
525 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
526 	va_end(args);
527 
528 	return 0;
529 }
530 
531 #endif /* MEMORY_DEBUG */
532 
533 bool prealloc_disabled = 1;
534 qdf_declare_param(prealloc_disabled, bool);
535 qdf_export_symbol(prealloc_disabled);
536 
537 int qdf_mem_malloc_flags(void)
538 {
539 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
540 		return GFP_ATOMIC;
541 
542 	return GFP_KERNEL;
543 }
544 
545 qdf_export_symbol(qdf_mem_malloc_flags);
546 
547 /**
548  * qdf_prealloc_disabled_config_get() - Get the user configuration of
549  *                                       prealloc_disabled
550  *
551  * Return: value of prealloc_disabled qdf module argument
552  */
553 bool qdf_prealloc_disabled_config_get(void)
554 {
555 	return prealloc_disabled;
556 }
557 
558 qdf_export_symbol(qdf_prealloc_disabled_config_get);
559 
560 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
561 /**
562  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
563  * @str_value: value of the module param
564  *
565  * This function will set qdf module param prealloc_disabled
566  *
567  * Return: QDF_STATUS_SUCCESS on Success
568  */
569 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
570 {
571 	QDF_STATUS status;
572 
573 	status = qdf_bool_parse(str_value, &prealloc_disabled);
574 	return status;
575 }
576 #endif
577 
578 #if defined WLAN_DEBUGFS
579 
580 /* Debugfs root directory for qdf_mem */
581 static struct dentry *qdf_mem_debugfs_root;
582 
583 #ifdef MEMORY_DEBUG
584 static int seq_printf_printer(void *priv, const char *fmt, ...)
585 {
586 	struct seq_file *file = priv;
587 	va_list args;
588 
589 	va_start(args, fmt);
590 	seq_vprintf(file, fmt, args);
591 	seq_puts(file, "\n");
592 	va_end(args);
593 
594 	return 0;
595 }
596 
597 /**
598  * qdf_print_major_alloc() - memory metadata table print logic
599  * @table: the memory metadata table to print
600  * @print: the print adapter function
601  * @print_priv: the private data to be consumed by @print
602  * @threshold: the threshold value set by uset to list top allocations
603  *
604  * Return: None
605  */
606 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
607 				  qdf_abstract_print print,
608 				  void *print_priv,
609 				  uint32_t threshold)
610 {
611 	int i;
612 
613 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
614 		if (!table[i].count)
615 			break;
616 		if (table[i].count >= threshold)
617 			print(print_priv,
618 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
619 			      table[i].count,
620 			      table[i].size,
621 			      table[i].count * table[i].size,
622 			      table[i].func,
623 			      table[i].line, table[i].caller,
624 			      table[i].time);
625 	}
626 }
627 
628 /**
629  * qdf_mem_seq_start() - sequential callback to start
630  * @seq: seq_file handle
631  * @pos: The start position of the sequence
632  *
633  * Return: iterator pointer, or NULL if iteration is complete
634  */
635 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
636 {
637 	enum qdf_debug_domain domain = *pos;
638 
639 	if (!qdf_debug_domain_valid(domain))
640 		return NULL;
641 
642 	/* just use the current position as our iterator */
643 	return pos;
644 }
645 
646 /**
647  * qdf_mem_seq_next() - next sequential callback
648  * @seq: seq_file handle
649  * @v: the current iterator
650  * @pos: the current position
651  *
652  * Get the next node and release previous node.
653  *
654  * Return: iterator pointer, or NULL if iteration is complete
655  */
656 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
657 {
658 	++*pos;
659 
660 	return qdf_mem_seq_start(seq, pos);
661 }
662 
663 /**
664  * qdf_mem_seq_stop() - stop sequential callback
665  * @seq: seq_file handle
666  * @v: current iterator
667  *
668  * Return: None
669  */
670 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
671 
672 /**
673  * qdf_mem_seq_show() - print sequential callback
674  * @seq: seq_file handle
675  * @v: current iterator
676  *
677  * Return: 0 - success
678  */
679 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
680 {
681 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
682 
683 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
684 		   qdf_debug_domain_name(domain_id), domain_id);
685 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
686 			     seq_printf_printer,
687 			     seq,
688 			     0,
689 			     qdf_mem_meta_table_print);
690 
691 	return 0;
692 }
693 
694 /* sequential file operation table */
695 static const struct seq_operations qdf_mem_seq_ops = {
696 	.start = qdf_mem_seq_start,
697 	.next  = qdf_mem_seq_next,
698 	.stop  = qdf_mem_seq_stop,
699 	.show  = qdf_mem_seq_show,
700 };
701 
702 
703 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
704 {
705 	return seq_open(file, &qdf_mem_seq_ops);
706 }
707 
708 /**
709  * qdf_major_alloc_show() - print sequential callback
710  * @seq: seq_file handle
711  * @v: current iterator
712  *
713  * Return: 0 - success
714  */
715 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
716 {
717 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
718 	struct major_alloc_priv *priv;
719 	qdf_list_t *list;
720 
721 	priv = (struct major_alloc_priv *)seq->private;
722 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
723 		   qdf_debug_domain_name(domain_id), domain_id);
724 
725 	switch (priv->type) {
726 	case LIST_TYPE_MEM:
727 		list = qdf_mem_list_get(domain_id);
728 		break;
729 	case LIST_TYPE_DMA:
730 		list = qdf_mem_dma_list(domain_id);
731 		break;
732 	default:
733 		list = NULL;
734 		break;
735 	}
736 
737 	if (list)
738 		qdf_mem_domain_print(list,
739 				     seq_printf_printer,
740 				     seq,
741 				     priv->threshold,
742 				     qdf_print_major_alloc);
743 
744 	return 0;
745 }
746 
747 /* sequential file operation table created to track major allocs */
748 static const struct seq_operations qdf_major_allocs_seq_ops = {
749 	.start = qdf_mem_seq_start,
750 	.next = qdf_mem_seq_next,
751 	.stop = qdf_mem_seq_stop,
752 	.show = qdf_major_alloc_show,
753 };
754 
755 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
756 {
757 	void *private = inode->i_private;
758 	struct seq_file *seq;
759 	int rc;
760 
761 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
762 	if (rc == 0) {
763 		seq = file->private_data;
764 		seq->private = private;
765 	}
766 	return rc;
767 }
768 
769 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
770 					     const char __user *user_buf,
771 					     size_t count,
772 					     loff_t *pos)
773 {
774 	char buf[32];
775 	ssize_t buf_size;
776 	uint32_t threshold;
777 	struct seq_file *seq = file->private_data;
778 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
779 
780 	buf_size = min(count, (sizeof(buf) - 1));
781 	if (buf_size <= 0)
782 		return 0;
783 	if (copy_from_user(buf, user_buf, buf_size))
784 		return -EFAULT;
785 	buf[buf_size] = '\0';
786 	if (!kstrtou32(buf, 10, &threshold))
787 		priv->threshold = threshold;
788 	return buf_size;
789 }
790 
791 /**
792  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
793  * @threshold: the threshold value set by uset to list top allocations
794  * @print: the print adapter function
795  * @print_priv: the private data to be consumed by @print
796  * @mem_print: pointer to function which prints the memory allocation data
797  *
798  * Return: None
799  */
800 static void
801 qdf_print_major_nbuf_allocs(uint32_t threshold,
802 			    qdf_abstract_print print,
803 			    void *print_priv,
804 			    void (*mem_print)(struct __qdf_mem_info *,
805 					      qdf_abstract_print,
806 					      void *, uint32_t))
807 {
808 	uint32_t nbuf_iter;
809 	unsigned long irq_flag = 0;
810 	QDF_NBUF_TRACK *p_node;
811 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
812 	struct qdf_mem_header meta;
813 	bool is_full;
814 
815 	qdf_mem_zero(table, sizeof(table));
816 	qdf_mem_debug_print_header(print, print_priv, threshold);
817 
818 	if (is_initial_mem_debug_disabled)
819 		return;
820 
821 	qdf_rl_info("major nbuf print with threshold %u", threshold);
822 
823 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
824 	     nbuf_iter++) {
825 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
826 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
827 		while (p_node) {
828 			meta.line = p_node->line_num;
829 			meta.size = p_node->size;
830 			meta.caller = NULL;
831 			meta.time = p_node->time;
832 			qdf_str_lcopy(meta.func, p_node->func_name,
833 				      QDF_MEM_FUNC_NAME_SIZE);
834 
835 			is_full = qdf_mem_meta_table_insert(table, &meta);
836 
837 			if (is_full) {
838 				(*mem_print)(table, print,
839 					     print_priv, threshold);
840 				qdf_mem_zero(table, sizeof(table));
841 			}
842 
843 			p_node = p_node->p_next;
844 		}
845 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
846 	}
847 
848 	(*mem_print)(table, print, print_priv, threshold);
849 
850 	qdf_rl_info("major nbuf print end");
851 }
852 
853 /**
854  * qdf_major_nbuf_alloc_show() - print sequential callback
855  * @seq: seq_file handle
856  * @v: current iterator
857  *
858  * Return: 0 - success
859  */
860 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
861 {
862 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
863 
864 	if (!priv) {
865 		qdf_err("priv is null");
866 		return -EINVAL;
867 	}
868 
869 	qdf_print_major_nbuf_allocs(priv->threshold,
870 				    seq_printf_printer,
871 				    seq,
872 				    qdf_print_major_alloc);
873 
874 	return 0;
875 }
876 
877 /**
878  * qdf_nbuf_seq_start() - sequential callback to start
879  * @seq: seq_file handle
880  * @pos: The start position of the sequence
881  *
882  * Return: iterator pointer, or NULL if iteration is complete
883  */
884 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
885 {
886 	enum qdf_debug_domain domain = *pos;
887 
888 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
889 		return NULL;
890 
891 	return pos;
892 }
893 
894 /**
895  * qdf_nbuf_seq_next() - next sequential callback
896  * @seq: seq_file handle
897  * @v: the current iterator
898  * @pos: the current position
899  *
900  * Get the next node and release previous node.
901  *
902  * Return: iterator pointer, or NULL if iteration is complete
903  */
904 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
905 {
906 	++*pos;
907 
908 	return qdf_nbuf_seq_start(seq, pos);
909 }
910 
911 /**
912  * qdf_nbuf_seq_stop() - stop sequential callback
913  * @seq: seq_file handle
914  * @v: current iterator
915  *
916  * Return: None
917  */
918 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
919 
920 /* sequential file operation table created to track major skb allocs */
921 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
922 	.start = qdf_nbuf_seq_start,
923 	.next = qdf_nbuf_seq_next,
924 	.stop = qdf_nbuf_seq_stop,
925 	.show = qdf_major_nbuf_alloc_show,
926 };
927 
928 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
929 {
930 	void *private = inode->i_private;
931 	struct seq_file *seq;
932 	int rc;
933 
934 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
935 	if (rc == 0) {
936 		seq = file->private_data;
937 		seq->private = private;
938 	}
939 	return rc;
940 }
941 
942 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
943 						  const char __user *user_buf,
944 						  size_t count,
945 						  loff_t *pos)
946 {
947 	char buf[32];
948 	ssize_t buf_size;
949 	uint32_t threshold;
950 	struct seq_file *seq = file->private_data;
951 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
952 
953 	buf_size = min(count, (sizeof(buf) - 1));
954 	if (buf_size <= 0)
955 		return 0;
956 	if (copy_from_user(buf, user_buf, buf_size))
957 		return -EFAULT;
958 	buf[buf_size] = '\0';
959 	if (!kstrtou32(buf, 10, &threshold))
960 		priv->threshold = threshold;
961 	return buf_size;
962 }
963 
964 /* file operation table for listing major allocs */
965 static const struct file_operations fops_qdf_major_allocs = {
966 	.owner = THIS_MODULE,
967 	.open = qdf_major_allocs_open,
968 	.read = seq_read,
969 	.llseek = seq_lseek,
970 	.release = seq_release,
971 	.write = qdf_major_alloc_set_threshold,
972 };
973 
974 /* debugfs file operation table */
975 static const struct file_operations fops_qdf_mem_debugfs = {
976 	.owner = THIS_MODULE,
977 	.open = qdf_mem_debugfs_open,
978 	.read = seq_read,
979 	.llseek = seq_lseek,
980 	.release = seq_release,
981 };
982 
983 /* file operation table for listing major allocs */
984 static const struct file_operations fops_qdf_nbuf_major_allocs = {
985 	.owner = THIS_MODULE,
986 	.open = qdf_major_nbuf_allocs_open,
987 	.read = seq_read,
988 	.llseek = seq_lseek,
989 	.release = seq_release,
990 	.write = qdf_major_nbuf_alloc_set_threshold,
991 };
992 
993 static struct major_alloc_priv mem_priv = {
994 	/* List type set to mem */
995 	LIST_TYPE_MEM,
996 	/* initial threshold to list APIs which allocates mem >= 50 times */
997 	50
998 };
999 
1000 static struct major_alloc_priv dma_priv = {
1001 	/* List type set to DMA */
1002 	LIST_TYPE_DMA,
1003 	/* initial threshold to list APIs which allocates dma >= 50 times */
1004 	50
1005 };
1006 
1007 static struct major_alloc_priv nbuf_priv = {
1008 	/* List type set to NBUF */
1009 	LIST_TYPE_NBUF,
1010 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
1011 	50
1012 };
1013 
1014 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1015 {
1016 	if (is_initial_mem_debug_disabled)
1017 		return QDF_STATUS_SUCCESS;
1018 
1019 	if (!qdf_mem_debugfs_root)
1020 		return QDF_STATUS_E_FAILURE;
1021 
1022 	debugfs_create_file("list",
1023 			    S_IRUSR,
1024 			    qdf_mem_debugfs_root,
1025 			    NULL,
1026 			    &fops_qdf_mem_debugfs);
1027 
1028 	debugfs_create_file("major_mem_allocs",
1029 			    0600,
1030 			    qdf_mem_debugfs_root,
1031 			    &mem_priv,
1032 			    &fops_qdf_major_allocs);
1033 
1034 	debugfs_create_file("major_dma_allocs",
1035 			    0600,
1036 			    qdf_mem_debugfs_root,
1037 			    &dma_priv,
1038 			    &fops_qdf_major_allocs);
1039 
1040 	debugfs_create_file("major_nbuf_allocs",
1041 			    0600,
1042 			    qdf_mem_debugfs_root,
1043 			    &nbuf_priv,
1044 			    &fops_qdf_nbuf_major_allocs);
1045 
1046 	return QDF_STATUS_SUCCESS;
1047 }
1048 
1049 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1050 {
1051 	return QDF_STATUS_SUCCESS;
1052 }
1053 
1054 #else /* MEMORY_DEBUG */
1055 
1056 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1057 {
1058 	return QDF_STATUS_E_NOSUPPORT;
1059 }
1060 
1061 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1062 {
1063 	return QDF_STATUS_E_NOSUPPORT;
1064 }
1065 
1066 #endif /* MEMORY_DEBUG */
1067 
1068 
1069 static void qdf_mem_debugfs_exit(void)
1070 {
1071 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1072 	qdf_mem_debugfs_root = NULL;
1073 }
1074 
1075 static QDF_STATUS qdf_mem_debugfs_init(void)
1076 {
1077 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1078 
1079 	if (!qdf_debugfs_root)
1080 		return QDF_STATUS_E_FAILURE;
1081 
1082 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1083 
1084 	if (!qdf_mem_debugfs_root)
1085 		return QDF_STATUS_E_FAILURE;
1086 
1087 
1088 	debugfs_create_atomic_t("kmalloc",
1089 				S_IRUSR,
1090 				qdf_mem_debugfs_root,
1091 				&qdf_mem_stat.kmalloc);
1092 
1093 	debugfs_create_atomic_t("dma",
1094 				S_IRUSR,
1095 				qdf_mem_debugfs_root,
1096 				&qdf_mem_stat.dma);
1097 
1098 	debugfs_create_atomic_t("skb",
1099 				S_IRUSR,
1100 				qdf_mem_debugfs_root,
1101 				&qdf_mem_stat.skb);
1102 
1103 	return QDF_STATUS_SUCCESS;
1104 }
1105 
1106 #else /* WLAN_DEBUGFS */
1107 
1108 static QDF_STATUS qdf_mem_debugfs_init(void)
1109 {
1110 	return QDF_STATUS_E_NOSUPPORT;
1111 }
1112 static void qdf_mem_debugfs_exit(void) {}
1113 
1114 
1115 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1116 {
1117 	return QDF_STATUS_E_NOSUPPORT;
1118 }
1119 
1120 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1121 {
1122 	return QDF_STATUS_E_NOSUPPORT;
1123 }
1124 
1125 #endif /* WLAN_DEBUGFS */
1126 
1127 void qdf_mem_kmalloc_inc(qdf_size_t size)
1128 {
1129 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1130 }
1131 
1132 static void qdf_mem_dma_inc(qdf_size_t size)
1133 {
1134 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1135 }
1136 
1137 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1138 void qdf_mem_skb_inc(qdf_size_t size)
1139 {
1140 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1141 }
1142 
1143 void qdf_mem_skb_dec(qdf_size_t size)
1144 {
1145 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1146 }
1147 
1148 void qdf_mem_skb_total_inc(qdf_size_t size)
1149 {
1150 	int32_t skb_mem_max = 0;
1151 
1152 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1153 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1154 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1155 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1156 }
1157 
1158 void qdf_mem_skb_total_dec(qdf_size_t size)
1159 {
1160 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1161 }
1162 
1163 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1164 {
1165 	int32_t curr_dp_tx_skb_mem_max = 0;
1166 
1167 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1168 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1169 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1170 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1171 }
1172 
1173 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1174 {
1175 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1176 }
1177 
1178 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1179 {
1180 	int32_t curr_dp_rx_skb_mem_max = 0;
1181 
1182 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1183 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1184 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1185 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1186 }
1187 
1188 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1189 {
1190 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1191 }
1192 
1193 void qdf_mem_dp_tx_skb_cnt_inc(void)
1194 {
1195 	int32_t curr_dp_tx_skb_count_max = 0;
1196 
1197 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1198 	curr_dp_tx_skb_count_max =
1199 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1200 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1201 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1202 }
1203 
1204 void qdf_mem_dp_tx_skb_cnt_dec(void)
1205 {
1206 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1207 }
1208 
1209 void qdf_mem_dp_rx_skb_cnt_inc(void)
1210 {
1211 	int32_t curr_dp_rx_skb_count_max = 0;
1212 
1213 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1214 	curr_dp_rx_skb_count_max =
1215 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1216 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1217 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1218 }
1219 
1220 void qdf_mem_dp_rx_skb_cnt_dec(void)
1221 {
1222 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1223 }
1224 #endif
1225 
1226 void qdf_mem_kmalloc_dec(qdf_size_t size)
1227 {
1228 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1229 }
1230 
1231 static inline void qdf_mem_dma_dec(qdf_size_t size)
1232 {
1233 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1234 }
1235 
1236 /**
1237  * __qdf_mempool_init() - Create and initialize memory pool
1238  *
1239  * @osdev: platform device object
1240  * @pool_addr: address of the pool created
1241  * @elem_cnt: no. of elements in pool
1242  * @elem_size: size of each pool element in bytes
1243  * @flags: flags
1244  *
1245  * return: Handle to memory pool or NULL if allocation failed
1246  */
1247 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1248 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1249 {
1250 	__qdf_mempool_ctxt_t *new_pool = NULL;
1251 	u_int32_t align = L1_CACHE_BYTES;
1252 	unsigned long aligned_pool_mem;
1253 	int pool_id;
1254 	int i;
1255 
1256 	if (prealloc_disabled) {
1257 		/* TBD: We can maintain a list of pools in qdf_device_t
1258 		 * to help debugging
1259 		 * when pre-allocation is not enabled
1260 		 */
1261 		new_pool = (__qdf_mempool_ctxt_t *)
1262 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1263 		if (!new_pool)
1264 			return QDF_STATUS_E_NOMEM;
1265 
1266 		memset(new_pool, 0, sizeof(*new_pool));
1267 		/* TBD: define flags for zeroing buffers etc */
1268 		new_pool->flags = flags;
1269 		new_pool->elem_size = elem_size;
1270 		new_pool->max_elem = elem_cnt;
1271 		*pool_addr = new_pool;
1272 		return 0;
1273 	}
1274 
1275 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1276 		if (!osdev->mem_pool[pool_id])
1277 			break;
1278 	}
1279 
1280 	if (pool_id == MAX_MEM_POOLS)
1281 		return -ENOMEM;
1282 
1283 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1284 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1285 	if (!new_pool)
1286 		return -ENOMEM;
1287 
1288 	memset(new_pool, 0, sizeof(*new_pool));
1289 	/* TBD: define flags for zeroing buffers etc */
1290 	new_pool->flags = flags;
1291 	new_pool->pool_id = pool_id;
1292 
1293 	/* Round up the element size to cacheline */
1294 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1295 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1296 				((align)?(align - 1):0);
1297 
1298 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1299 	if (!new_pool->pool_mem) {
1300 			/* TBD: Check if we need get_free_pages above */
1301 		kfree(new_pool);
1302 		osdev->mem_pool[pool_id] = NULL;
1303 		return -ENOMEM;
1304 	}
1305 
1306 	spin_lock_init(&new_pool->lock);
1307 
1308 	/* Initialize free list */
1309 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1310 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1311 	STAILQ_INIT(&new_pool->free_list);
1312 
1313 	for (i = 0; i < elem_cnt; i++)
1314 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1315 			(mempool_elem_t *)(aligned_pool_mem +
1316 			(new_pool->elem_size * i)), mempool_entry);
1317 
1318 
1319 	new_pool->free_cnt = elem_cnt;
1320 	*pool_addr = new_pool;
1321 	return 0;
1322 }
1323 qdf_export_symbol(__qdf_mempool_init);
1324 
1325 /**
1326  * __qdf_mempool_destroy() - Destroy memory pool
1327  * @osdev: platform device object
1328  * @Handle: to memory pool
1329  *
1330  * Returns: none
1331  */
1332 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1333 {
1334 	int pool_id = 0;
1335 
1336 	if (!pool)
1337 		return;
1338 
1339 	if (prealloc_disabled) {
1340 		kfree(pool);
1341 		return;
1342 	}
1343 
1344 	pool_id = pool->pool_id;
1345 
1346 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1347 	kfree(pool->pool_mem);
1348 	kfree(pool);
1349 	osdev->mem_pool[pool_id] = NULL;
1350 }
1351 qdf_export_symbol(__qdf_mempool_destroy);
1352 
1353 /**
1354  * __qdf_mempool_alloc() - Allocate an element memory pool
1355  *
1356  * @osdev: platform device object
1357  * @Handle: to memory pool
1358  *
1359  * Return: Pointer to the allocated element or NULL if the pool is empty
1360  */
1361 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1362 {
1363 	void *buf = NULL;
1364 
1365 	if (!pool)
1366 		return NULL;
1367 
1368 	if (prealloc_disabled)
1369 		return  qdf_mem_malloc(pool->elem_size);
1370 
1371 	spin_lock_bh(&pool->lock);
1372 
1373 	buf = STAILQ_FIRST(&pool->free_list);
1374 	if (buf) {
1375 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1376 		pool->free_cnt--;
1377 	}
1378 
1379 	/* TBD: Update free count if debug is enabled */
1380 	spin_unlock_bh(&pool->lock);
1381 
1382 	return buf;
1383 }
1384 qdf_export_symbol(__qdf_mempool_alloc);
1385 
1386 /**
1387  * __qdf_mempool_free() - Free a memory pool element
1388  * @osdev: Platform device object
1389  * @pool: Handle to memory pool
1390  * @buf: Element to be freed
1391  *
1392  * Returns: none
1393  */
1394 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1395 {
1396 	if (!pool)
1397 		return;
1398 
1399 
1400 	if (prealloc_disabled)
1401 		return qdf_mem_free(buf);
1402 
1403 	spin_lock_bh(&pool->lock);
1404 	pool->free_cnt++;
1405 
1406 	STAILQ_INSERT_TAIL
1407 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1408 	spin_unlock_bh(&pool->lock);
1409 }
1410 qdf_export_symbol(__qdf_mempool_free);
1411 
1412 #ifdef CNSS_MEM_PRE_ALLOC
1413 static bool qdf_might_be_prealloc(void *ptr)
1414 {
1415 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1416 		return true;
1417 	else
1418 		return false;
1419 }
1420 
1421 /**
1422  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1423  * @size: the number of bytes to allocate
1424  *
1425  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1426  * a chunk of pre-allocated memory. If size if less than or equal to
1427  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1428  *
1429  * Return: NULL on failure, non-NULL on success
1430  */
1431 static void *qdf_mem_prealloc_get(size_t size)
1432 {
1433 	void *ptr;
1434 
1435 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1436 		return NULL;
1437 
1438 	ptr = wcnss_prealloc_get(size);
1439 	if (!ptr)
1440 		return NULL;
1441 
1442 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1443 		ptr += sizeof(void *);
1444 
1445 	memset(ptr, 0, size);
1446 
1447 	return ptr;
1448 }
1449 
1450 static inline bool qdf_mem_prealloc_put(void *ptr)
1451 {
1452 	return wcnss_prealloc_put(ptr);
1453 }
1454 #else
1455 static bool qdf_might_be_prealloc(void *ptr)
1456 {
1457 	return false;
1458 }
1459 
1460 static inline void *qdf_mem_prealloc_get(size_t size)
1461 {
1462 	return NULL;
1463 }
1464 
1465 static inline bool qdf_mem_prealloc_put(void *ptr)
1466 {
1467 	return false;
1468 }
1469 #endif /* CNSS_MEM_PRE_ALLOC */
1470 
1471 /* External Function implementation */
1472 #ifdef MEMORY_DEBUG
1473 /**
1474  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
1475  *
1476  * Return: value of mem_debug_disabled qdf module argument
1477  */
1478 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
1479 bool qdf_mem_debug_config_get(void)
1480 {
1481 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1482 	return false;
1483 }
1484 #else
1485 bool qdf_mem_debug_config_get(void)
1486 {
1487 	return mem_debug_disabled;
1488 }
1489 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1490 
1491 /**
1492  * qdf_mem_debug_disabled_set() - Set mem_debug_disabled
1493  * @str_value: value of the module param
1494  *
1495  * This function will se qdf module param mem_debug_disabled
1496  *
1497  * Return: QDF_STATUS_SUCCESS on Success
1498  */
1499 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
1500 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1501 {
1502 	QDF_STATUS status;
1503 
1504 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1505 	return status;
1506 }
1507 #endif
1508 
1509 /**
1510  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1511  *
1512  * Return: none
1513  */
1514 static void qdf_mem_debug_init(void)
1515 {
1516 	int i;
1517 
1518 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1519 
1520 	if (is_initial_mem_debug_disabled)
1521 		return;
1522 
1523 	/* Initializing the list with maximum size of 60000 */
1524 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1525 		qdf_list_create(&qdf_mem_domains[i], 60000);
1526 	qdf_spinlock_create(&qdf_mem_list_lock);
1527 
1528 	/* dma */
1529 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1530 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1531 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1532 }
1533 
1534 static uint32_t
1535 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1536 			       qdf_list_t *mem_list)
1537 {
1538 	if (is_initial_mem_debug_disabled)
1539 		return 0;
1540 
1541 	if (qdf_list_empty(mem_list))
1542 		return 0;
1543 
1544 	qdf_err("Memory leaks detected in %s domain!",
1545 		qdf_debug_domain_name(domain));
1546 	qdf_mem_domain_print(mem_list,
1547 			     qdf_err_printer,
1548 			     NULL,
1549 			     0,
1550 			     qdf_mem_meta_table_print);
1551 
1552 	return mem_list->count;
1553 }
1554 
1555 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1556 {
1557 	uint32_t leak_count = 0;
1558 	int i;
1559 
1560 	if (is_initial_mem_debug_disabled)
1561 		return;
1562 
1563 	/* detect and print leaks */
1564 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1565 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1566 
1567 	if (leak_count)
1568 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1569 				   leak_count);
1570 }
1571 
1572 /**
1573  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1574  *
1575  * Return: none
1576  */
1577 static void qdf_mem_debug_exit(void)
1578 {
1579 	int i;
1580 
1581 	if (is_initial_mem_debug_disabled)
1582 		return;
1583 
1584 	/* mem */
1585 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1586 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1587 		qdf_list_destroy(qdf_mem_list_get(i));
1588 
1589 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1590 
1591 	/* dma */
1592 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1593 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1594 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1595 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1596 }
1597 
1598 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1599 			   void *caller, uint32_t flag)
1600 {
1601 	QDF_STATUS status;
1602 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1603 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1604 	struct qdf_mem_header *header;
1605 	void *ptr;
1606 	unsigned long start, duration;
1607 
1608 	if (is_initial_mem_debug_disabled)
1609 		return __qdf_mem_malloc(size, func, line);
1610 
1611 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1612 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1613 		return NULL;
1614 	}
1615 
1616 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1617 		size += sizeof(void *);
1618 
1619 	ptr = qdf_mem_prealloc_get(size);
1620 	if (ptr)
1621 		return ptr;
1622 
1623 	if (!flag)
1624 		flag = qdf_mem_malloc_flags();
1625 
1626 	start = qdf_mc_timer_get_system_time();
1627 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1628 	duration = qdf_mc_timer_get_system_time() - start;
1629 
1630 	if (duration > QDF_MEM_WARN_THRESHOLD)
1631 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1632 			 duration, size, func, line);
1633 
1634 	if (!header) {
1635 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1636 		return NULL;
1637 	}
1638 
1639 	qdf_mem_header_init(header, size, func, line, caller);
1640 	qdf_mem_trailer_init(header);
1641 	ptr = qdf_mem_get_ptr(header);
1642 
1643 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1644 	status = qdf_list_insert_front(mem_list, &header->node);
1645 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1646 	if (QDF_IS_STATUS_ERROR(status))
1647 		qdf_err("Failed to insert memory header; status %d", status);
1648 
1649 	qdf_mem_kmalloc_inc(ksize(header));
1650 
1651 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1652 		ptr += sizeof(void *);
1653 
1654 	return ptr;
1655 }
1656 qdf_export_symbol(qdf_mem_malloc_debug);
1657 
1658 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1659 				  uint32_t line, void *caller)
1660 {
1661 	QDF_STATUS status;
1662 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1663 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1664 	struct qdf_mem_header *header;
1665 	void *ptr;
1666 	unsigned long start, duration;
1667 
1668 	if (is_initial_mem_debug_disabled)
1669 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1670 
1671 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1672 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1673 		return NULL;
1674 	}
1675 
1676 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1677 		size += sizeof(void *);
1678 
1679 	ptr = qdf_mem_prealloc_get(size);
1680 	if (ptr)
1681 		return ptr;
1682 
1683 	start = qdf_mc_timer_get_system_time();
1684 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1685 	duration = qdf_mc_timer_get_system_time() - start;
1686 
1687 	if (duration > QDF_MEM_WARN_THRESHOLD)
1688 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1689 			 duration, size, func, line);
1690 
1691 	if (!header) {
1692 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1693 		return NULL;
1694 	}
1695 
1696 	qdf_mem_header_init(header, size, func, line, caller);
1697 	qdf_mem_trailer_init(header);
1698 	ptr = qdf_mem_get_ptr(header);
1699 
1700 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1701 	status = qdf_list_insert_front(mem_list, &header->node);
1702 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1703 	if (QDF_IS_STATUS_ERROR(status))
1704 		qdf_err("Failed to insert memory header; status %d", status);
1705 
1706 	qdf_mem_kmalloc_inc(ksize(header));
1707 
1708 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1709 		ptr += sizeof(void *);
1710 
1711 	return ptr;
1712 }
1713 
1714 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1715 
1716 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1717 				     uint32_t line)
1718 {
1719 	void *ptr;
1720 
1721 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1722 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1723 			     line);
1724 		return NULL;
1725 	}
1726 
1727 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1728 		size += sizeof(void *);
1729 
1730 	ptr = qdf_mem_prealloc_get(size);
1731 	if (ptr)
1732 		return ptr;
1733 
1734 	ptr = kzalloc(size, GFP_ATOMIC);
1735 	if (!ptr) {
1736 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1737 			      size, func, line);
1738 		return NULL;
1739 	}
1740 
1741 	qdf_mem_kmalloc_inc(ksize(ptr));
1742 
1743 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1744 		ptr += sizeof(void *);
1745 
1746 	return ptr;
1747 }
1748 
1749 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1750 
1751 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1752 {
1753 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1754 	struct qdf_mem_header *header;
1755 	enum qdf_mem_validation_bitmap error_bitmap;
1756 
1757 	if (is_initial_mem_debug_disabled) {
1758 		__qdf_mem_free(ptr);
1759 		return;
1760 	}
1761 
1762 	/* freeing a null pointer is valid */
1763 	if (qdf_unlikely(!ptr))
1764 		return;
1765 
1766 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1767 		ptr = ptr - sizeof(void *);
1768 
1769 	if (qdf_mem_prealloc_put(ptr))
1770 		return;
1771 
1772 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1773 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1774 				   ptr);
1775 
1776 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1777 
1778 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1779 	header = qdf_mem_get_header(ptr);
1780 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1781 	error_bitmap |= qdf_mem_trailer_validate(header);
1782 
1783 	if (!error_bitmap) {
1784 		header->freed = true;
1785 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1786 				     &header->node);
1787 	}
1788 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1789 
1790 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1791 				    func, line);
1792 
1793 	qdf_mem_kmalloc_dec(ksize(header));
1794 	kfree(header);
1795 }
1796 qdf_export_symbol(qdf_mem_free_debug);
1797 
1798 void qdf_mem_check_for_leaks(void)
1799 {
1800 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1801 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1802 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1803 	uint32_t leaks_count = 0;
1804 
1805 	if (is_initial_mem_debug_disabled)
1806 		return;
1807 
1808 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1809 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1810 
1811 	if (leaks_count)
1812 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1813 				   leaks_count);
1814 }
1815 
1816 /**
1817  * qdf_mem_multi_pages_alloc_debug() - Debug version of
1818  * qdf_mem_multi_pages_alloc
1819  * @osdev: OS device handle pointer
1820  * @pages: Multi page information storage
1821  * @element_size: Each element size
1822  * @element_num: Total number of elements should be allocated
1823  * @memctxt: Memory context
1824  * @cacheable: Coherent memory or cacheable memory
1825  * @func: Caller of this allocator
1826  * @line: Line number of the caller
1827  * @caller: Return address of the caller
1828  *
1829  * This function will allocate large size of memory over multiple pages.
1830  * Large size of contiguous memory allocation will fail frequently, then
1831  * instead of allocate large memory by one shot, allocate through multiple, non
1832  * contiguous memory and combine pages when actual usage
1833  *
1834  * Return: None
1835  */
1836 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1837 				     struct qdf_mem_multi_page_t *pages,
1838 				     size_t element_size, uint32_t element_num,
1839 				     qdf_dma_context_t memctxt, bool cacheable,
1840 				     const char *func, uint32_t line,
1841 				     void *caller)
1842 {
1843 	uint16_t page_idx;
1844 	struct qdf_mem_dma_page_t *dma_pages;
1845 	void **cacheable_pages = NULL;
1846 	uint16_t i;
1847 
1848 	if (!pages->page_size)
1849 		pages->page_size = qdf_page_size;
1850 
1851 	pages->num_element_per_page = pages->page_size / element_size;
1852 	if (!pages->num_element_per_page) {
1853 		qdf_print("Invalid page %d or element size %d",
1854 			  (int)pages->page_size, (int)element_size);
1855 		goto out_fail;
1856 	}
1857 
1858 	pages->num_pages = element_num / pages->num_element_per_page;
1859 	if (element_num % pages->num_element_per_page)
1860 		pages->num_pages++;
1861 
1862 	if (cacheable) {
1863 		/* Pages information storage */
1864 		pages->cacheable_pages = qdf_mem_malloc_debug(
1865 			pages->num_pages * sizeof(pages->cacheable_pages),
1866 			func, line, caller, 0);
1867 		if (!pages->cacheable_pages)
1868 			goto out_fail;
1869 
1870 		cacheable_pages = pages->cacheable_pages;
1871 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1872 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1873 				pages->page_size, func, line, caller, 0);
1874 			if (!cacheable_pages[page_idx])
1875 				goto page_alloc_fail;
1876 		}
1877 		pages->dma_pages = NULL;
1878 	} else {
1879 		pages->dma_pages = qdf_mem_malloc_debug(
1880 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1881 			func, line, caller, 0);
1882 		if (!pages->dma_pages)
1883 			goto out_fail;
1884 
1885 		dma_pages = pages->dma_pages;
1886 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1887 			dma_pages->page_v_addr_start =
1888 				qdf_mem_alloc_consistent_debug(
1889 					osdev, osdev->dev, pages->page_size,
1890 					&dma_pages->page_p_addr,
1891 					func, line, caller);
1892 			if (!dma_pages->page_v_addr_start) {
1893 				qdf_print("dmaable page alloc fail pi %d",
1894 					  page_idx);
1895 				goto page_alloc_fail;
1896 			}
1897 			dma_pages->page_v_addr_end =
1898 				dma_pages->page_v_addr_start + pages->page_size;
1899 			dma_pages++;
1900 		}
1901 		pages->cacheable_pages = NULL;
1902 	}
1903 	return;
1904 
1905 page_alloc_fail:
1906 	if (cacheable) {
1907 		for (i = 0; i < page_idx; i++)
1908 			qdf_mem_free_debug(pages->cacheable_pages[i],
1909 					   func, line);
1910 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1911 	} else {
1912 		dma_pages = pages->dma_pages;
1913 		for (i = 0; i < page_idx; i++) {
1914 			qdf_mem_free_consistent_debug(
1915 				osdev, osdev->dev,
1916 				pages->page_size, dma_pages->page_v_addr_start,
1917 				dma_pages->page_p_addr, memctxt, func, line);
1918 			dma_pages++;
1919 		}
1920 		qdf_mem_free_debug(pages->dma_pages, func, line);
1921 	}
1922 
1923 out_fail:
1924 	pages->cacheable_pages = NULL;
1925 	pages->dma_pages = NULL;
1926 	pages->num_pages = 0;
1927 }
1928 
1929 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1930 
1931 /**
1932  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
1933  * @osdev: OS device handle pointer
1934  * @pages: Multi page information storage
1935  * @memctxt: Memory context
1936  * @cacheable: Coherent memory or cacheable memory
1937  * @func: Caller of this allocator
1938  * @line: Line number of the caller
1939  *
1940  * This function will free large size of memory over multiple pages.
1941  *
1942  * Return: None
1943  */
1944 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1945 				    struct qdf_mem_multi_page_t *pages,
1946 				    qdf_dma_context_t memctxt, bool cacheable,
1947 				    const char *func, uint32_t line)
1948 {
1949 	unsigned int page_idx;
1950 	struct qdf_mem_dma_page_t *dma_pages;
1951 
1952 	if (!pages->page_size)
1953 		pages->page_size = qdf_page_size;
1954 
1955 	if (cacheable) {
1956 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1957 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1958 					   func, line);
1959 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1960 	} else {
1961 		dma_pages = pages->dma_pages;
1962 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1963 			qdf_mem_free_consistent_debug(
1964 				osdev, osdev->dev, pages->page_size,
1965 				dma_pages->page_v_addr_start,
1966 				dma_pages->page_p_addr, memctxt, func, line);
1967 			dma_pages++;
1968 		}
1969 		qdf_mem_free_debug(pages->dma_pages, func, line);
1970 	}
1971 
1972 	pages->cacheable_pages = NULL;
1973 	pages->dma_pages = NULL;
1974 	pages->num_pages = 0;
1975 }
1976 
1977 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1978 
1979 #else
1980 static void qdf_mem_debug_init(void) {}
1981 
1982 static void qdf_mem_debug_exit(void) {}
1983 
1984 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1985 {
1986 	void *ptr;
1987 
1988 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1989 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1990 			     line);
1991 		return NULL;
1992 	}
1993 
1994 	if (add_headroom_for_cnss_prealloc_cache_ptr())
1995 		size += sizeof(void *);
1996 
1997 	ptr = qdf_mem_prealloc_get(size);
1998 	if (ptr)
1999 		return ptr;
2000 
2001 	ptr = kzalloc(size, GFP_ATOMIC);
2002 	if (!ptr) {
2003 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
2004 			      size, func, line);
2005 		return NULL;
2006 	}
2007 
2008 	qdf_mem_kmalloc_inc(ksize(ptr));
2009 
2010 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2011 		ptr += sizeof(void *);
2012 
2013 	return ptr;
2014 }
2015 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
2016 
2017 /**
2018  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
2019  * @osdev: OS device handle pointer
2020  * @pages: Multi page information storage
2021  * @element_size: Each element size
2022  * @element_num: Total number of elements should be allocated
2023  * @memctxt: Memory context
2024  * @cacheable: Coherent memory or cacheable memory
2025  *
2026  * This function will allocate large size of memory over multiple pages.
2027  * Large size of contiguous memory allocation will fail frequently, then
2028  * instead of allocate large memory by one shot, allocate through multiple, non
2029  * contiguous memory and combine pages when actual usage
2030  *
2031  * Return: None
2032  */
2033 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
2034 			       struct qdf_mem_multi_page_t *pages,
2035 			       size_t element_size, uint32_t element_num,
2036 			       qdf_dma_context_t memctxt, bool cacheable)
2037 {
2038 	uint16_t page_idx;
2039 	struct qdf_mem_dma_page_t *dma_pages;
2040 	void **cacheable_pages = NULL;
2041 	uint16_t i;
2042 
2043 	if (!pages->page_size)
2044 		pages->page_size = qdf_page_size;
2045 
2046 	pages->num_element_per_page = pages->page_size / element_size;
2047 	if (!pages->num_element_per_page) {
2048 		qdf_print("Invalid page %d or element size %d",
2049 			  (int)pages->page_size, (int)element_size);
2050 		goto out_fail;
2051 	}
2052 
2053 	pages->num_pages = element_num / pages->num_element_per_page;
2054 	if (element_num % pages->num_element_per_page)
2055 		pages->num_pages++;
2056 
2057 	if (cacheable) {
2058 		/* Pages information storage */
2059 		pages->cacheable_pages = qdf_mem_malloc(
2060 			pages->num_pages * sizeof(pages->cacheable_pages));
2061 		if (!pages->cacheable_pages)
2062 			goto out_fail;
2063 
2064 		cacheable_pages = pages->cacheable_pages;
2065 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2066 			cacheable_pages[page_idx] =
2067 				qdf_mem_malloc(pages->page_size);
2068 			if (!cacheable_pages[page_idx])
2069 				goto page_alloc_fail;
2070 		}
2071 		pages->dma_pages = NULL;
2072 	} else {
2073 		pages->dma_pages = qdf_mem_malloc(
2074 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
2075 		if (!pages->dma_pages)
2076 			goto out_fail;
2077 
2078 		dma_pages = pages->dma_pages;
2079 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2080 			dma_pages->page_v_addr_start =
2081 				qdf_mem_alloc_consistent(osdev, osdev->dev,
2082 					 pages->page_size,
2083 					&dma_pages->page_p_addr);
2084 			if (!dma_pages->page_v_addr_start) {
2085 				qdf_print("dmaable page alloc fail pi %d",
2086 					page_idx);
2087 				goto page_alloc_fail;
2088 			}
2089 			dma_pages->page_v_addr_end =
2090 				dma_pages->page_v_addr_start + pages->page_size;
2091 			dma_pages++;
2092 		}
2093 		pages->cacheable_pages = NULL;
2094 	}
2095 	return;
2096 
2097 page_alloc_fail:
2098 	if (cacheable) {
2099 		for (i = 0; i < page_idx; i++)
2100 			qdf_mem_free(pages->cacheable_pages[i]);
2101 		qdf_mem_free(pages->cacheable_pages);
2102 	} else {
2103 		dma_pages = pages->dma_pages;
2104 		for (i = 0; i < page_idx; i++) {
2105 			qdf_mem_free_consistent(
2106 				osdev, osdev->dev, pages->page_size,
2107 				dma_pages->page_v_addr_start,
2108 				dma_pages->page_p_addr, memctxt);
2109 			dma_pages++;
2110 		}
2111 		qdf_mem_free(pages->dma_pages);
2112 	}
2113 
2114 out_fail:
2115 	pages->cacheable_pages = NULL;
2116 	pages->dma_pages = NULL;
2117 	pages->num_pages = 0;
2118 	return;
2119 }
2120 qdf_export_symbol(qdf_mem_multi_pages_alloc);
2121 
2122 /**
2123  * qdf_mem_multi_pages_free() - free large size of kernel memory
2124  * @osdev: OS device handle pointer
2125  * @pages: Multi page information storage
2126  * @memctxt: Memory context
2127  * @cacheable: Coherent memory or cacheable memory
2128  *
2129  * This function will free large size of memory over multiple pages.
2130  *
2131  * Return: None
2132  */
2133 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2134 			      struct qdf_mem_multi_page_t *pages,
2135 			      qdf_dma_context_t memctxt, bool cacheable)
2136 {
2137 	unsigned int page_idx;
2138 	struct qdf_mem_dma_page_t *dma_pages;
2139 
2140 	if (!pages->page_size)
2141 		pages->page_size = qdf_page_size;
2142 
2143 	if (cacheable) {
2144 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2145 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2146 		qdf_mem_free(pages->cacheable_pages);
2147 	} else {
2148 		dma_pages = pages->dma_pages;
2149 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2150 			qdf_mem_free_consistent(
2151 				osdev, osdev->dev, pages->page_size,
2152 				dma_pages->page_v_addr_start,
2153 				dma_pages->page_p_addr, memctxt);
2154 			dma_pages++;
2155 		}
2156 		qdf_mem_free(pages->dma_pages);
2157 	}
2158 
2159 	pages->cacheable_pages = NULL;
2160 	pages->dma_pages = NULL;
2161 	pages->num_pages = 0;
2162 	return;
2163 }
2164 qdf_export_symbol(qdf_mem_multi_pages_free);
2165 #endif
2166 
2167 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2168 			      bool cacheable)
2169 {
2170 	unsigned int page_idx;
2171 	struct qdf_mem_dma_page_t *dma_pages;
2172 
2173 	if (!pages->page_size)
2174 		pages->page_size = qdf_page_size;
2175 
2176 	if (cacheable) {
2177 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2178 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2179 				     pages->page_size);
2180 	} else {
2181 		dma_pages = pages->dma_pages;
2182 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2183 			qdf_mem_zero(dma_pages->page_v_addr_start,
2184 				     pages->page_size);
2185 			dma_pages++;
2186 		}
2187 	}
2188 }
2189 
2190 qdf_export_symbol(qdf_mem_multi_pages_zero);
2191 
2192 void __qdf_mem_free(void *ptr)
2193 {
2194 	if (!ptr)
2195 		return;
2196 
2197 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2198 		ptr = ptr - sizeof(void *);
2199 
2200 	if (qdf_might_be_prealloc(ptr)) {
2201 		if (qdf_mem_prealloc_put(ptr))
2202 			return;
2203 	}
2204 
2205 	qdf_mem_kmalloc_dec(ksize(ptr));
2206 
2207 	kfree(ptr);
2208 }
2209 
2210 qdf_export_symbol(__qdf_mem_free);
2211 
2212 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2213 {
2214 	void *ptr;
2215 
2216 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2217 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2218 			     line);
2219 		return NULL;
2220 	}
2221 
2222 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2223 		size += sizeof(void *);
2224 
2225 	ptr = qdf_mem_prealloc_get(size);
2226 	if (ptr)
2227 		return ptr;
2228 
2229 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2230 	if (!ptr)
2231 		return NULL;
2232 
2233 	qdf_mem_kmalloc_inc(ksize(ptr));
2234 
2235 	if (add_headroom_for_cnss_prealloc_cache_ptr())
2236 		ptr += sizeof(void *);
2237 
2238 	return ptr;
2239 }
2240 
2241 qdf_export_symbol(__qdf_mem_malloc);
2242 
2243 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
2244 void __qdf_untracked_mem_free(void *ptr)
2245 {
2246 	if (!ptr)
2247 		return;
2248 
2249 	kfree(ptr);
2250 }
2251 
2252 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2253 {
2254 	void *ptr;
2255 
2256 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2257 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2258 			     line);
2259 		return NULL;
2260 	}
2261 
2262 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2263 	if (!ptr)
2264 		return NULL;
2265 
2266 	return ptr;
2267 }
2268 #endif
2269 
2270 void *qdf_aligned_malloc_fl(uint32_t *size,
2271 			    void **vaddr_unaligned,
2272 				qdf_dma_addr_t *paddr_unaligned,
2273 				qdf_dma_addr_t *paddr_aligned,
2274 				uint32_t align,
2275 			    const char *func, uint32_t line)
2276 {
2277 	void *vaddr_aligned;
2278 	uint32_t align_alloc_size;
2279 
2280 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2281 			line);
2282 	if (!*vaddr_unaligned) {
2283 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2284 		return NULL;
2285 	}
2286 
2287 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2288 
2289 	/* Re-allocate additional bytes to align base address only if
2290 	 * above allocation returns unaligned address. Reason for
2291 	 * trying exact size allocation above is, OS tries to allocate
2292 	 * blocks of size power-of-2 pages and then free extra pages.
2293 	 * e.g., of a ring size of 1MB, the allocation below will
2294 	 * request 1MB plus 7 bytes for alignment, which will cause a
2295 	 * 2MB block allocation,and that is failing sometimes due to
2296 	 * memory fragmentation.
2297 	 */
2298 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2299 		align_alloc_size = *size + align - 1;
2300 
2301 		qdf_mem_free(*vaddr_unaligned);
2302 		*vaddr_unaligned = qdf_mem_malloc_fl(
2303 				(qdf_size_t)align_alloc_size, func, line);
2304 		if (!*vaddr_unaligned) {
2305 			qdf_warn("Failed to alloc %uB @ %s:%d",
2306 				 align_alloc_size, func, line);
2307 			return NULL;
2308 		}
2309 
2310 		*paddr_unaligned = qdf_mem_virt_to_phys(
2311 				*vaddr_unaligned);
2312 		*size = align_alloc_size;
2313 	}
2314 
2315 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2316 		((unsigned long)(*paddr_unaligned), align);
2317 
2318 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2319 			((unsigned long)(*paddr_aligned) -
2320 			 (unsigned long)(*paddr_unaligned)));
2321 
2322 	return vaddr_aligned;
2323 }
2324 
2325 qdf_export_symbol(qdf_aligned_malloc_fl);
2326 
2327 #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
2328 /**
2329  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
2330  * @ctxt: Context to be passed to the cb
2331  * @pages: Multi page information storage
2332  * @elem_size: Each element size
2333  * @elem_count: Total number of elements in the pool.
2334  * @cacheable: Coherent memory or cacheable memory
2335  * @cb: Callback to free the elements
2336  * @elem_list: elem list for delayed free
2337  *
2338  * Return: 0 on Succscc, or Error code
2339  */
2340 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2341 			       uint32_t elem_size, uint32_t elem_count,
2342 			       uint8_t cacheable, qdf_mem_release_cb cb,
2343 			       void *elem_list)
2344 {
2345 	uint16_t i, i_int;
2346 	void *page_info;
2347 	void *elem;
2348 	uint32_t num_elem = 0;
2349 
2350 	for (i = 0; i < pages->num_pages; i++) {
2351 		if (cacheable)
2352 			page_info = pages->cacheable_pages[i];
2353 		else
2354 			page_info = pages->dma_pages[i].page_v_addr_start;
2355 
2356 		if (!page_info)
2357 			return -ENOMEM;
2358 
2359 		elem = page_info;
2360 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2361 			cb(ctxt, elem, elem_list);
2362 			elem = ((char *)elem + elem_size);
2363 			num_elem++;
2364 
2365 			/* Number of desc pool elements reached */
2366 			if (num_elem == (elem_count - 1))
2367 				break;
2368 		}
2369 	}
2370 
2371 	return 0;
2372 }
2373 
2374 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2375 #endif
2376 
2377 /**
2378  * qdf_mem_multi_page_link() - Make links for multi page elements
2379  * @osdev: OS device handle pointer
2380  * @pages: Multi page information storage
2381  * @elem_size: Single element size
2382  * @elem_count: elements count should be linked
2383  * @cacheable: Coherent memory or cacheable memory
2384  *
2385  * This function will make links for multi page allocated structure
2386  *
2387  * Return: 0 success
2388  */
2389 int qdf_mem_multi_page_link(qdf_device_t osdev,
2390 		struct qdf_mem_multi_page_t *pages,
2391 		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
2392 {
2393 	uint16_t i, i_int;
2394 	void *page_info;
2395 	void **c_elem = NULL;
2396 	uint32_t num_link = 0;
2397 
2398 	for (i = 0; i < pages->num_pages; i++) {
2399 		if (cacheable)
2400 			page_info = pages->cacheable_pages[i];
2401 		else
2402 			page_info = pages->dma_pages[i].page_v_addr_start;
2403 
2404 		if (!page_info)
2405 			return -ENOMEM;
2406 
2407 		c_elem = (void **)page_info;
2408 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2409 			if (i_int == (pages->num_element_per_page - 1)) {
2410 				if ((i + 1) == pages->num_pages)
2411 					break;
2412 				if (cacheable)
2413 					*c_elem = pages->
2414 						cacheable_pages[i + 1];
2415 				else
2416 					*c_elem = pages->
2417 						dma_pages[i + 1].
2418 							page_v_addr_start;
2419 				num_link++;
2420 				break;
2421 			} else {
2422 				*c_elem =
2423 					(void *)(((char *)c_elem) + elem_size);
2424 			}
2425 			num_link++;
2426 			c_elem = (void **)*c_elem;
2427 
2428 			/* Last link established exit */
2429 			if (num_link == (elem_count - 1))
2430 				break;
2431 		}
2432 	}
2433 
2434 	if (c_elem)
2435 		*c_elem = NULL;
2436 
2437 	return 0;
2438 }
2439 qdf_export_symbol(qdf_mem_multi_page_link);
2440 
2441 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2442 {
2443 	/* special case where dst_addr or src_addr can be NULL */
2444 	if (!num_bytes)
2445 		return;
2446 
2447 	QDF_BUG(dst_addr);
2448 	QDF_BUG(src_addr);
2449 	if (!dst_addr || !src_addr)
2450 		return;
2451 
2452 	memcpy(dst_addr, src_addr, num_bytes);
2453 }
2454 qdf_export_symbol(qdf_mem_copy);
2455 
2456 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2457 {
2458 	qdf_shared_mem_t *shared_mem;
2459 	qdf_dma_addr_t dma_addr, paddr;
2460 	int ret;
2461 
2462 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2463 	if (!shared_mem)
2464 		return NULL;
2465 
2466 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2467 				size, qdf_mem_get_dma_addr_ptr(osdev,
2468 						&shared_mem->mem_info));
2469 	if (!shared_mem->vaddr) {
2470 		qdf_err("Unable to allocate DMA memory for shared resource");
2471 		qdf_mem_free(shared_mem);
2472 		return NULL;
2473 	}
2474 
2475 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2476 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2477 
2478 	qdf_mem_zero(shared_mem->vaddr, size);
2479 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2480 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2481 
2482 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2483 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2484 				      shared_mem->vaddr, dma_addr, size);
2485 	if (ret) {
2486 		qdf_err("Unable to get DMA sgtable");
2487 		qdf_mem_free_consistent(osdev, osdev->dev,
2488 					shared_mem->mem_info.size,
2489 					shared_mem->vaddr,
2490 					dma_addr,
2491 					qdf_get_dma_mem_context(shared_mem,
2492 								memctx));
2493 		qdf_mem_free(shared_mem);
2494 		return NULL;
2495 	}
2496 
2497 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2498 
2499 	return shared_mem;
2500 }
2501 
2502 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2503 
2504 /**
2505  * qdf_mem_copy_toio() - copy memory
2506  * @dst_addr: Pointer to destination memory location (to copy to)
2507  * @src_addr: Pointer to source memory location (to copy from)
2508  * @num_bytes: Number of bytes to copy.
2509  *
2510  * Return: none
2511  */
2512 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2513 {
2514 	if (0 == num_bytes) {
2515 		/* special case where dst_addr or src_addr can be NULL */
2516 		return;
2517 	}
2518 
2519 	if ((!dst_addr) || (!src_addr)) {
2520 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2521 			  "%s called with NULL parameter, source:%pK destination:%pK",
2522 			  __func__, src_addr, dst_addr);
2523 		QDF_ASSERT(0);
2524 		return;
2525 	}
2526 	memcpy_toio(dst_addr, src_addr, num_bytes);
2527 }
2528 
2529 qdf_export_symbol(qdf_mem_copy_toio);
2530 
2531 /**
2532  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
2533  * @ptr: Pointer to memory that will be set
2534  * @value: Byte set in memory
2535  * @num_bytes: Number of bytes to be set
2536  *
2537  * Return: None
2538  */
2539 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2540 {
2541 	if (!ptr) {
2542 		qdf_print("%s called with NULL parameter ptr", __func__);
2543 		return;
2544 	}
2545 	memset_io(ptr, value, num_bytes);
2546 }
2547 
2548 qdf_export_symbol(qdf_mem_set_io);
2549 
2550 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2551 {
2552 	QDF_BUG(ptr);
2553 	if (!ptr)
2554 		return;
2555 
2556 	memset(ptr, value, num_bytes);
2557 }
2558 qdf_export_symbol(qdf_mem_set);
2559 
2560 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2561 {
2562 	/* special case where dst_addr or src_addr can be NULL */
2563 	if (!num_bytes)
2564 		return;
2565 
2566 	QDF_BUG(dst_addr);
2567 	QDF_BUG(src_addr);
2568 	if (!dst_addr || !src_addr)
2569 		return;
2570 
2571 	memmove(dst_addr, src_addr, num_bytes);
2572 }
2573 qdf_export_symbol(qdf_mem_move);
2574 
2575 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2576 {
2577 	QDF_BUG(left);
2578 	QDF_BUG(right);
2579 
2580 	return memcmp(left, right, size);
2581 }
2582 qdf_export_symbol(qdf_mem_cmp);
2583 
2584 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2585 /**
2586  * qdf_mem_dma_alloc() - allocates memory for dma
2587  * @osdev: OS device handle
2588  * @dev: Pointer to device handle
2589  * @size: Size to be allocated
2590  * @phy_addr: Physical address
2591  *
2592  * Return: pointer of allocated memory or null if memory alloc fails
2593  */
2594 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2595 				      qdf_size_t size,
2596 				      qdf_dma_addr_t *phy_addr)
2597 {
2598 	void *vaddr;
2599 
2600 	vaddr = qdf_mem_malloc(size);
2601 	*phy_addr = ((uintptr_t) vaddr);
2602 	/* using this type conversion to suppress "cast from pointer to integer
2603 	 * of different size" warning on some platforms
2604 	 */
2605 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2606 	return vaddr;
2607 }
2608 
2609 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2610 	!defined(QCA_WIFI_QCN9000)
2611 
2612 #define QCA8074_RAM_BASE 0x50000000
2613 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
2614 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2615 			qdf_dma_addr_t *phy_addr)
2616 {
2617 	void *vaddr = NULL;
2618 	int i;
2619 
2620 	*phy_addr = 0;
2621 
2622 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2623 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2624 					   qdf_mem_malloc_flags());
2625 
2626 		if (!vaddr) {
2627 			qdf_err("%s failed , size: %zu!", __func__, size);
2628 			return NULL;
2629 		}
2630 
2631 		if (*phy_addr >= QCA8074_RAM_BASE)
2632 			return vaddr;
2633 
2634 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2635 	}
2636 
2637 	return NULL;
2638 }
2639 
2640 #else
2641 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2642 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2643 {
2644 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2645 }
2646 #endif
2647 
2648 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2649 static inline void
2650 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2651 {
2652 	qdf_mem_free(vaddr);
2653 }
2654 #else
2655 
2656 static inline void
2657 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2658 {
2659 	dma_free_coherent(dev, size, vaddr, paddr);
2660 }
2661 #endif
2662 
2663 #ifdef MEMORY_DEBUG
2664 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2665 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2666 				     const char *func, uint32_t line,
2667 				     void *caller)
2668 {
2669 	QDF_STATUS status;
2670 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2671 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2672 	struct qdf_mem_header *header;
2673 	void *vaddr;
2674 
2675 	if (is_initial_mem_debug_disabled)
2676 		return __qdf_mem_alloc_consistent(osdev, dev,
2677 						  size, paddr,
2678 						  func, line);
2679 
2680 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2681 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2682 		return NULL;
2683 	}
2684 
2685 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2686 				   paddr);
2687 
2688 	if (!vaddr) {
2689 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2690 		return NULL;
2691 	}
2692 
2693 	header = qdf_mem_dma_get_header(vaddr, size);
2694 	/* For DMA buffers we only add trailers, this function will init
2695 	 * the header structure at the tail
2696 	 * Prefix the header into DMA buffer causes SMMU faults, so
2697 	 * do not prefix header into the DMA buffers
2698 	 */
2699 	qdf_mem_header_init(header, size, func, line, caller);
2700 
2701 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2702 	status = qdf_list_insert_front(mem_list, &header->node);
2703 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2704 	if (QDF_IS_STATUS_ERROR(status))
2705 		qdf_err("Failed to insert memory header; status %d", status);
2706 
2707 	qdf_mem_dma_inc(size);
2708 
2709 	return vaddr;
2710 }
2711 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2712 
2713 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2714 				   qdf_size_t size, void *vaddr,
2715 				   qdf_dma_addr_t paddr,
2716 				   qdf_dma_context_t memctx,
2717 				   const char *func, uint32_t line)
2718 {
2719 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2720 	struct qdf_mem_header *header;
2721 	enum qdf_mem_validation_bitmap error_bitmap;
2722 
2723 	if (is_initial_mem_debug_disabled) {
2724 		__qdf_mem_free_consistent(
2725 					  osdev, dev,
2726 					  size, vaddr,
2727 					  paddr, memctx);
2728 		return;
2729 	}
2730 
2731 	/* freeing a null pointer is valid */
2732 	if (qdf_unlikely(!vaddr))
2733 		return;
2734 
2735 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2736 
2737 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2738 	/* For DMA buffers we only add trailers, this function will retrieve
2739 	 * the header structure at the tail
2740 	 * Prefix the header into DMA buffer causes SMMU faults, so
2741 	 * do not prefix header into the DMA buffers
2742 	 */
2743 	header = qdf_mem_dma_get_header(vaddr, size);
2744 	error_bitmap = qdf_mem_header_validate(header, domain);
2745 	if (!error_bitmap) {
2746 		header->freed = true;
2747 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2748 				     &header->node);
2749 	}
2750 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2751 
2752 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2753 
2754 	qdf_mem_dma_dec(header->size);
2755 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2756 }
2757 qdf_export_symbol(qdf_mem_free_consistent_debug);
2758 #endif /* MEMORY_DEBUG */
2759 
2760 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2761 			       qdf_size_t size, void *vaddr,
2762 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2763 {
2764 	qdf_mem_dma_dec(size);
2765 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2766 }
2767 
2768 qdf_export_symbol(__qdf_mem_free_consistent);
2769 
2770 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2771 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2772 				 const char *func, uint32_t line)
2773 {
2774 	void *vaddr;
2775 
2776 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2777 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2778 			     size, func, line);
2779 		return NULL;
2780 	}
2781 
2782 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2783 
2784 	if (vaddr)
2785 		qdf_mem_dma_inc(size);
2786 
2787 	return vaddr;
2788 }
2789 
2790 qdf_export_symbol(__qdf_mem_alloc_consistent);
2791 
2792 void *qdf_aligned_mem_alloc_consistent_fl(
2793 	qdf_device_t osdev, uint32_t *size,
2794 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2795 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2796 	const char *func, uint32_t line)
2797 {
2798 	void *vaddr_aligned;
2799 	uint32_t align_alloc_size;
2800 
2801 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2802 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2803 	if (!*vaddr_unaligned) {
2804 		qdf_warn("Failed to alloc %uB @ %s:%d",
2805 			 *size, func, line);
2806 		return NULL;
2807 	}
2808 
2809 	/* Re-allocate additional bytes to align base address only if
2810 	 * above allocation returns unaligned address. Reason for
2811 	 * trying exact size allocation above is, OS tries to allocate
2812 	 * blocks of size power-of-2 pages and then free extra pages.
2813 	 * e.g., of a ring size of 1MB, the allocation below will
2814 	 * request 1MB plus 7 bytes for alignment, which will cause a
2815 	 * 2MB block allocation,and that is failing sometimes due to
2816 	 * memory fragmentation.
2817 	 */
2818 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2819 		align_alloc_size = *size + align - 1;
2820 
2821 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2822 					*vaddr_unaligned,
2823 					*paddr_unaligned, 0);
2824 
2825 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2826 				osdev, osdev->dev, align_alloc_size,
2827 				paddr_unaligned);
2828 		if (!*vaddr_unaligned) {
2829 			qdf_warn("Failed to alloc %uB @ %s:%d",
2830 				 align_alloc_size, func, line);
2831 			return NULL;
2832 		}
2833 
2834 		*size = align_alloc_size;
2835 	}
2836 
2837 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2838 			(unsigned long)(*paddr_unaligned), align);
2839 
2840 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2841 				 ((unsigned long)(*paddr_aligned) -
2842 				  (unsigned long)(*paddr_unaligned)));
2843 
2844 	return vaddr_aligned;
2845 }
2846 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2847 
2848 /**
2849  * qdf_mem_dma_sync_single_for_device() - assign memory to device
2850  * @osdev: OS device handle
2851  * @bus_addr: dma address to give to the device
2852  * @size: Size of the memory block
2853  * @direction: direction data will be DMAed
2854  *
2855  * Assign memory to the remote device.
2856  * The cache lines are flushed to ram or invalidated as needed.
2857  *
2858  * Return: none
2859  */
2860 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2861 					qdf_dma_addr_t bus_addr,
2862 					qdf_size_t size,
2863 					enum dma_data_direction direction)
2864 {
2865 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2866 }
2867 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2868 
2869 /**
2870  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
2871  * @osdev: OS device handle
2872  * @bus_addr: dma address to give to the cpu
2873  * @size: Size of the memory block
2874  * @direction: direction data will be DMAed
2875  *
2876  * Assign memory to the CPU.
2877  *
2878  * Return: none
2879  */
2880 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2881 				     qdf_dma_addr_t bus_addr,
2882 				     qdf_size_t size,
2883 				     enum dma_data_direction direction)
2884 {
2885 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2886 }
2887 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2888 
2889 void qdf_mem_init(void)
2890 {
2891 	qdf_mem_debug_init();
2892 	qdf_net_buf_debug_init();
2893 	qdf_frag_debug_init();
2894 	qdf_mem_debugfs_init();
2895 	qdf_mem_debug_debugfs_init();
2896 }
2897 qdf_export_symbol(qdf_mem_init);
2898 
2899 void qdf_mem_exit(void)
2900 {
2901 	qdf_mem_debug_debugfs_exit();
2902 	qdf_mem_debugfs_exit();
2903 	qdf_frag_debug_exit();
2904 	qdf_net_buf_debug_exit();
2905 	qdf_mem_debug_exit();
2906 }
2907 qdf_export_symbol(qdf_mem_exit);
2908 
2909 /**
2910  * qdf_ether_addr_copy() - copy an Ethernet address
2911  *
2912  * @dst_addr: A six-byte array Ethernet address destination
2913  * @src_addr: A six-byte array Ethernet address source
2914  *
2915  * Please note: dst & src must both be aligned to u16.
2916  *
2917  * Return: none
2918  */
2919 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2920 {
2921 	if ((!dst_addr) || (!src_addr)) {
2922 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2923 			  "%s called with NULL parameter, source:%pK destination:%pK",
2924 			  __func__, src_addr, dst_addr);
2925 		QDF_ASSERT(0);
2926 		return;
2927 	}
2928 	ether_addr_copy(dst_addr, src_addr);
2929 }
2930 qdf_export_symbol(qdf_ether_addr_copy);
2931 
2932 int32_t qdf_dma_mem_stats_read(void)
2933 {
2934 	return qdf_atomic_read(&qdf_mem_stat.dma);
2935 }
2936 
2937 qdf_export_symbol(qdf_dma_mem_stats_read);
2938 
2939 int32_t qdf_heap_mem_stats_read(void)
2940 {
2941 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2942 }
2943 
2944 qdf_export_symbol(qdf_heap_mem_stats_read);
2945 
2946 int32_t qdf_skb_mem_stats_read(void)
2947 {
2948 	return qdf_atomic_read(&qdf_mem_stat.skb);
2949 }
2950 
2951 qdf_export_symbol(qdf_skb_mem_stats_read);
2952 
2953 int32_t qdf_skb_total_mem_stats_read(void)
2954 {
2955 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2956 }
2957 
2958 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2959 
2960 int32_t qdf_skb_max_mem_stats_read(void)
2961 {
2962 	return qdf_mem_stat.skb_mem_max;
2963 }
2964 
2965 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2966 
2967 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2968 {
2969 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2970 }
2971 
2972 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2973 
2974 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2975 {
2976 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2977 }
2978 
2979 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2980 
2981 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2982 {
2983 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2984 }
2985 
2986 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2987 
2988 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2989 {
2990 	return qdf_mem_stat.dp_tx_skb_count_max;
2991 }
2992 
2993 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
2994 
2995 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
2996 {
2997 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
2998 }
2999 
3000 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
3001 
3002 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
3003 {
3004 	return qdf_mem_stat.dp_rx_skb_count_max;
3005 }
3006 
3007 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
3008 
3009 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
3010 {
3011 	return qdf_mem_stat.dp_tx_skb_mem_max;
3012 }
3013 
3014 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
3015 
3016 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
3017 {
3018 	return qdf_mem_stat.dp_rx_skb_mem_max;
3019 }
3020 
3021 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
3022 
3023 int32_t qdf_mem_tx_desc_cnt_read(void)
3024 {
3025 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
3026 }
3027 
3028 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
3029 
3030 int32_t qdf_mem_tx_desc_max_read(void)
3031 {
3032 	return qdf_mem_stat.tx_descs_max;
3033 }
3034 
3035 qdf_export_symbol(qdf_mem_tx_desc_max_read);
3036 
3037 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
3038 				int32_t tx_descs_max)
3039 {
3040 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
3041 	qdf_mem_stat.tx_descs_max = tx_descs_max;
3042 }
3043 
3044 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
3045 
3046 void qdf_mem_stats_init(void)
3047 {
3048 	qdf_mem_stat.skb_mem_max = 0;
3049 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
3050 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
3051 	qdf_mem_stat.dp_tx_skb_count_max = 0;
3052 	qdf_mem_stat.dp_rx_skb_count_max = 0;
3053 	qdf_mem_stat.tx_descs_max = 0;
3054 }
3055 
3056 qdf_export_symbol(qdf_mem_stats_init);
3057 
3058 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
3059 {
3060 	void *ptr;
3061 
3062 	if (!size) {
3063 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
3064 		return NULL;
3065 	}
3066 
3067 	ptr = vzalloc(size);
3068 
3069 	return ptr;
3070 }
3071 
3072 qdf_export_symbol(__qdf_mem_valloc);
3073 
3074 void __qdf_mem_vfree(void *ptr)
3075 {
3076 	if (qdf_unlikely(!ptr))
3077 		return;
3078 
3079 	vfree(ptr);
3080 }
3081 
3082 qdf_export_symbol(__qdf_mem_vfree);
3083 
3084 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
3085 int
3086 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
3087 			  enum qdf_iommu_attr attr, void *data)
3088 {
3089 	return __qdf_iommu_domain_get_attr(domain, attr, data);
3090 }
3091 
3092 qdf_export_symbol(qdf_iommu_domain_get_attr);
3093 #endif
3094 
3095 #ifdef ENHANCED_OS_ABSTRACTION
3096 void qdf_update_mem_map_table(qdf_device_t osdev,
3097 			      qdf_mem_info_t *mem_info,
3098 			      qdf_dma_addr_t dma_addr,
3099 			      uint32_t mem_size)
3100 {
3101 	if (!mem_info) {
3102 		qdf_nofl_err("%s: NULL mem_info", __func__);
3103 		return;
3104 	}
3105 
3106 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
3107 }
3108 
3109 qdf_export_symbol(qdf_update_mem_map_table);
3110 
3111 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
3112 					  qdf_dma_addr_t dma_addr)
3113 {
3114 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
3115 }
3116 
3117 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
3118 #endif
3119 
3120 #ifdef QCA_KMEM_CACHE_SUPPORT
3121 qdf_kmem_cache_t
3122 __qdf_kmem_cache_create(const char *cache_name,
3123 			qdf_size_t size)
3124 {
3125 	struct kmem_cache *cache;
3126 
3127 	cache = kmem_cache_create(cache_name, size,
3128 				  0, 0, NULL);
3129 
3130 	if (!cache)
3131 		return NULL;
3132 
3133 	return cache;
3134 }
3135 qdf_export_symbol(__qdf_kmem_cache_create);
3136 
3137 void
3138 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3139 {
3140 	kmem_cache_destroy(cache);
3141 }
3142 
3143 qdf_export_symbol(__qdf_kmem_cache_destroy);
3144 
3145 void*
3146 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3147 {
3148 	int flags = GFP_KERNEL;
3149 
3150 	if (in_interrupt() || irqs_disabled() || in_atomic())
3151 		flags = GFP_ATOMIC;
3152 
3153 	return kmem_cache_alloc(cache, flags);
3154 }
3155 
3156 qdf_export_symbol(__qdf_kmem_cache_alloc);
3157 
3158 void
3159 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3160 
3161 {
3162 	kmem_cache_free(cache, node);
3163 }
3164 
3165 qdf_export_symbol(__qdf_kmem_cache_free);
3166 #else
3167 qdf_kmem_cache_t
3168 __qdf_kmem_cache_create(const char *cache_name,
3169 			qdf_size_t size)
3170 {
3171 	return NULL;
3172 }
3173 
3174 void
3175 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3176 {
3177 }
3178 
3179 void *
3180 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3181 {
3182 	return NULL;
3183 }
3184 
3185 void
3186 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3187 {
3188 }
3189 #endif
3190