1 /*
2  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf_frag.c
22  * QCA driver framework(QDF) network nbuf frag management APIs
23  */
24 
25 #include <qdf_atomic.h>
26 #include <qdf_list.h>
27 #include <qdf_debugfs.h>
28 #include <qdf_module.h>
29 #include <qdf_nbuf_frag.h>
30 #include <qdf_trace.h>
31 #include "qdf_str.h"
32 
33 #ifdef QDF_NBUF_FRAG_GLOBAL_COUNT
34 #define FRAG_DEBUGFS_NAME    "frag_counters"
35 static qdf_atomic_t frag_count;
36 #endif
37 
38 #if defined(NBUF_FRAG_MEMORY_DEBUG) || defined(QDF_NBUF_FRAG_GLOBAL_COUNT)
39 static bool is_initial_mem_debug_disabled;
40 #endif
41 
42 #ifdef QDF_NBUF_FRAG_GLOBAL_COUNT
43 
__qdf_frag_count_get(void)44 uint32_t __qdf_frag_count_get(void)
45 {
46 	return qdf_atomic_read(&frag_count);
47 }
48 
49 qdf_export_symbol(__qdf_frag_count_get);
50 
__qdf_frag_count_inc(uint32_t value)51 void __qdf_frag_count_inc(uint32_t value)
52 {
53 	if (qdf_likely(is_initial_mem_debug_disabled))
54 		return;
55 
56 	qdf_atomic_add(value, &frag_count);
57 }
58 
59 qdf_export_symbol(__qdf_frag_count_inc);
60 
__qdf_frag_count_dec(uint32_t value)61 void __qdf_frag_count_dec(uint32_t value)
62 {
63 	if (qdf_likely(is_initial_mem_debug_disabled))
64 		return;
65 
66 	qdf_atomic_sub(value, &frag_count);
67 }
68 
69 qdf_export_symbol(__qdf_frag_count_dec);
70 
__qdf_frag_mod_init(void)71 void __qdf_frag_mod_init(void)
72 {
73 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
74 	qdf_atomic_init(&frag_count);
75 	qdf_debugfs_create_atomic(FRAG_DEBUGFS_NAME, S_IRUSR, NULL,
76 				  &frag_count);
77 }
78 
__qdf_frag_mod_exit(void)79 void __qdf_frag_mod_exit(void)
80 {
81 }
82 #endif /* QDF_NBUF_FRAG_GLOBAL_COUNT */
83 
84 #ifdef NBUF_FRAG_MEMORY_DEBUG
85 
86 #define QDF_FRAG_TRACK_MAX_SIZE    1024
87 
88 /**
89  * struct qdf_frag_track_node_t - Network frag tracking node structure
90  * @hnode: list_head for next and prev pointers
91  * @p_frag: Pointer to frag
92  * @alloc_func_name: Function where frag is allocated
93  * @alloc_func_line: Allocation function line no.
94  * @refcount: No. of references to the frag
95  * @last_func_name: Function where frag recently accessed
96  * @last_func_line: Line number of last function
97  *
98  **/
99 struct qdf_frag_track_node_t {
100 	qdf_list_node_t hnode;
101 	qdf_frag_t p_frag;
102 	char alloc_func_name[QDF_MEM_FUNC_NAME_SIZE];
103 	uint32_t alloc_func_line;
104 	uint8_t refcount;
105 	char last_func_name[QDF_MEM_FUNC_NAME_SIZE];
106 	uint32_t last_func_line;
107 };
108 
109 /**
110  * struct qdf_frag_tracking_list_t - Frag node tracking list
111  * @track_list: qdf_list_t for maintaining the list
112  * @list_lock: Lock over the list
113  *
114  */
115 typedef struct qdf_frag_tracking_list_t {
116 	qdf_list_t track_list;
117 	qdf_spinlock_t list_lock;
118 } qdf_frag_tracking_list;
119 
120 typedef struct qdf_frag_track_node_t QDF_FRAG_TRACK;
121 
122 /*
123  * Array of tracking list for maintaining
124  * allocated debug frag nodes as per the calculated
125  * hash value.
126  */
127 static qdf_frag_tracking_list gp_qdf_frag_track_tbl[QDF_FRAG_TRACK_MAX_SIZE];
128 
129 static struct kmem_cache *frag_tracking_cache;
130 
131 /* Tracking list for maintaining the free debug frag nodes */
132 static qdf_frag_tracking_list qdf_frag_track_free_list;
133 
134 /*
135  * Parameters for statistics
136  * qdf_frag_track_free_list_count: No. of free nodes
137  * qdf_frag_track_used_list_count : No. of nodes used
138  * qdf_frag_track_max_used : Max no. of nodes used during execution
139  * qdf_frag_track_max_free : Max free nodes observed during execution
140  * qdf_frag_track_max_allocated: Max no. of allocated nodes
141  */
142 static uint32_t qdf_frag_track_free_list_count;
143 static uint32_t qdf_frag_track_used_list_count;
144 static uint32_t qdf_frag_track_max_used;
145 static uint32_t qdf_frag_track_max_free;
146 static uint32_t qdf_frag_track_max_allocated;
147 
148 /**
149  * qdf_frag_update_max_used() - Update qdf_frag_track_max_used tracking variable
150  *
151  * Tracks the max number of frags that the wlan driver was tracking at any one
152  * time
153  *
154  * Return: none
155  **/
qdf_frag_update_max_used(void)156 static inline void qdf_frag_update_max_used(void)
157 {
158 	int sum;
159 
160 	/* Update max_used if it is less than used list count */
161 	if (qdf_frag_track_max_used < qdf_frag_track_used_list_count)
162 		qdf_frag_track_max_used = qdf_frag_track_used_list_count;
163 
164 	/* Calculate no. of allocated nodes */
165 	sum = qdf_frag_track_used_list_count + qdf_frag_track_free_list_count;
166 
167 	/* Update max allocated if less then no. of allocated nodes */
168 	if (qdf_frag_track_max_allocated < sum)
169 		qdf_frag_track_max_allocated = sum;
170 }
171 
172 /**
173  * qdf_frag_update_max_free() - Update qdf_frag_track_max_free
174  *
175  * Tracks the max number tracking buffers kept in the freelist.
176  *
177  * Return: none
178  */
qdf_frag_update_max_free(void)179 static inline void qdf_frag_update_max_free(void)
180 {
181 	if (qdf_frag_track_max_free < qdf_frag_track_free_list_count)
182 		qdf_frag_track_max_free = qdf_frag_track_free_list_count;
183 }
184 
185 /**
186  * qdf_frag_track_alloc() - Allocate a cookie to track frags allocated by wlan
187  *
188  * This function pulls from freelist if possible,otherwise uses kmem_cache_alloc
189  * This function also adds fexibility to adjust the allocation and freelist
190  * schemes.
191  *
192  * Return: Pointer to an unused QDF_FRAG_TRACK structure which may not be zeroed
193  */
qdf_frag_track_alloc(void)194 static QDF_FRAG_TRACK *qdf_frag_track_alloc(void)
195 {
196 	int flags = GFP_KERNEL;
197 	QDF_FRAG_TRACK *frag_track_node = NULL;
198 	qdf_list_node_t *temp_list_node;
199 
200 	qdf_spin_lock_irqsave(&qdf_frag_track_free_list.list_lock);
201 	qdf_frag_track_used_list_count++;
202 
203 	if (!qdf_list_empty(&qdf_frag_track_free_list.track_list)) {
204 		qdf_list_remove_front(&qdf_frag_track_free_list.track_list,
205 				      &temp_list_node);
206 		frag_track_node = qdf_container_of(temp_list_node,
207 						   struct qdf_frag_track_node_t,
208 						   hnode);
209 		qdf_frag_track_free_list_count--;
210 	}
211 
212 	qdf_frag_update_max_used();
213 	qdf_spin_unlock_irqrestore(&qdf_frag_track_free_list.list_lock);
214 
215 	if (frag_track_node)
216 		return frag_track_node;
217 
218 	if (in_interrupt() || irqs_disabled() || in_atomic())
219 		flags = GFP_ATOMIC;
220 
221 	frag_track_node = kmem_cache_alloc(frag_tracking_cache, flags);
222 	if (frag_track_node)
223 		qdf_init_list_head(&frag_track_node->hnode);
224 
225 	return frag_track_node;
226 }
227 
228 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
229 #define FREEQ_POOLSIZE    2048
230 
231 /**
232  * qdf_frag_track_free() - Free the frag tracking cookie.
233  * @frag_track_node : Debug frag node address
234  *
235  * Matches calls to qdf_frag_track_alloc.
236  * Either frees the tracking cookie to kernel or an internal
237  * freelist based on the size of the freelist.
238  *
239  * Return: none
240  */
qdf_frag_track_free(QDF_FRAG_TRACK * frag_track_node)241 static void qdf_frag_track_free(QDF_FRAG_TRACK *frag_track_node)
242 {
243 	if (!frag_track_node)
244 		return;
245 
246 	/*
247 	 * Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
248 	 * only shrink the freelist if it is bigger than twice the number of
249 	 * frags in use. Otherwise add the frag debug track node to the front
250 	 * of qdf_frag_track_free_list.
251 	 */
252 
253 	qdf_spin_lock_irqsave(&qdf_frag_track_free_list.list_lock);
254 
255 	qdf_frag_track_used_list_count--;
256 	if (qdf_frag_track_free_list_count > FREEQ_POOLSIZE &&
257 	    (qdf_frag_track_free_list_count >
258 	    qdf_frag_track_used_list_count << 1)) {
259 		kmem_cache_free(frag_tracking_cache, frag_track_node);
260 	} else {
261 		qdf_list_insert_front(&qdf_frag_track_free_list.track_list,
262 				      &frag_track_node->hnode);
263 		qdf_frag_track_free_list_count++;
264 	}
265 	qdf_frag_update_max_free();
266 	qdf_spin_unlock_irqrestore(&qdf_frag_track_free_list.list_lock);
267 }
268 
269 /**
270  * qdf_frag_track_prefill() - Prefill the frag tracking cookie freelist
271  *
272  * Return: none
273  */
qdf_frag_track_prefill(void)274 static void qdf_frag_track_prefill(void)
275 {
276 	int index;
277 	QDF_FRAG_TRACK *curr_node, *next_node;
278 	qdf_list_t temp_list;
279 
280 	qdf_list_create(&temp_list, 0);
281 
282 	/* Prepopulate the freelist */
283 	for (index = 0; index < FREEQ_POOLSIZE; index++) {
284 		curr_node = qdf_frag_track_alloc();
285 		if (!curr_node)
286 			continue;
287 		qdf_list_insert_front(&temp_list, &curr_node->hnode);
288 	}
289 
290 	curr_node = NULL;
291 	next_node = NULL;
292 
293 	qdf_list_for_each_del(&temp_list, curr_node, next_node, hnode) {
294 		qdf_list_remove_node(&temp_list, &curr_node->hnode);
295 		qdf_frag_track_free(curr_node);
296 	}
297 
298 	/* prefilled buffers should not count as used */
299 	qdf_frag_track_max_used = 0;
300 
301 	qdf_list_destroy(&temp_list);
302 }
303 
304 /**
305  * qdf_frag_track_memory_manager_create() - Manager for frag tracking cookies
306  *
307  * This initializes the memory manager for the frag tracking cookies. Because
308  * these cookies are all the same size and only used in this feature, we can
309  * use a kmem_cache to provide tracking as well as to speed up allocations.
310  * To avoid the overhead of allocating and freeing the buffers (including SLUB
311  * features) a freelist is prepopulated here.
312  *
313  * Return: none
314  */
qdf_frag_track_memory_manager_create(void)315 static void qdf_frag_track_memory_manager_create(void)
316 {
317 	qdf_spinlock_create(&qdf_frag_track_free_list.list_lock);
318 	qdf_list_create(&qdf_frag_track_free_list.track_list, 0);
319 	frag_tracking_cache = kmem_cache_create("qdf_frag_tracking_cache",
320 						sizeof(QDF_FRAG_TRACK),
321 						0, 0, NULL);
322 
323 	qdf_frag_track_prefill();
324 }
325 
326 /**
327  * qdf_frag_track_memory_manager_destroy() - Manager for frag tracking cookies
328  *
329  * Empty the freelist and print out usage statistics when it is no longer
330  * needed. Also the kmem_cache should be destroyed here so that it can warn if
331  * any frag tracking cookies were leaked.
332  *
333  * Return: none
334  */
qdf_frag_track_memory_manager_destroy(void)335 static void qdf_frag_track_memory_manager_destroy(void)
336 {
337 	QDF_FRAG_TRACK *curr_node, *next_node;
338 
339 	curr_node = next_node = NULL;
340 
341 	qdf_spin_lock_irqsave(&qdf_frag_track_free_list.list_lock);
342 
343 	if (qdf_frag_track_max_used > FREEQ_POOLSIZE * 4)
344 		qdf_info("Unexpectedly large max_used count %d",
345 			  qdf_frag_track_max_used);
346 
347 	if (qdf_frag_track_max_used < qdf_frag_track_max_allocated)
348 		qdf_info("%d Unused trackers were allocated",
349 			  qdf_frag_track_max_allocated -
350 			  qdf_frag_track_max_used);
351 
352 	if (qdf_frag_track_free_list_count > FREEQ_POOLSIZE &&
353 	    qdf_frag_track_free_list_count > 3 * qdf_frag_track_max_used / 4)
354 		qdf_info("Check freelist shrinking functionality");
355 
356 	qdf_info("%d Residual freelist size", qdf_frag_track_free_list_count);
357 
358 	qdf_info("%d Max freelist size observed", qdf_frag_track_max_free);
359 
360 	qdf_info("%d Max buffers used observed", qdf_frag_track_max_used);
361 
362 	qdf_info("%d Max buffers allocated observed",
363 		  qdf_frag_track_max_allocated);
364 
365 	qdf_list_for_each_del(&qdf_frag_track_free_list.track_list,
366 			      curr_node, next_node, hnode) {
367 		qdf_list_remove_node(&qdf_frag_track_free_list.track_list,
368 				     &curr_node->hnode);
369 		kmem_cache_free(frag_tracking_cache, curr_node);
370 		qdf_frag_track_free_list_count--;
371 	}
372 
373 	if (qdf_frag_track_free_list_count != 0)
374 		qdf_info("%d Unfreed tracking memory lost in freelist",
375 			 qdf_frag_track_free_list_count);
376 
377 	if (qdf_frag_track_used_list_count != 0)
378 		qdf_info("%d Unfreed tracking memory still in use",
379 			 qdf_frag_track_used_list_count);
380 
381 	qdf_spin_unlock_irqrestore(&qdf_frag_track_free_list.list_lock);
382 	kmem_cache_destroy(frag_tracking_cache);
383 
384 	qdf_list_destroy(&qdf_frag_track_free_list.track_list);
385 	qdf_spinlock_destroy(&qdf_frag_track_free_list.list_lock);
386 }
387 
388 /**
389  * qdf_frag_debug_init() - Initialize network frag debug functionality
390  *
391  * QDF frag buffer debug feature tracks all frags allocated by WLAN driver
392  * in a hash table and when driver is unloaded it reports about leaked frags.
393  *
394  * Return: none
395  */
qdf_frag_debug_init(void)396 void qdf_frag_debug_init(void)
397 {
398 	uint32_t index;
399 
400 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
401 
402 	if (is_initial_mem_debug_disabled)
403 		return;
404 
405 	qdf_frag_track_memory_manager_create();
406 
407 	for (index = 0; index < QDF_FRAG_TRACK_MAX_SIZE; index++) {
408 		qdf_list_create(&gp_qdf_frag_track_tbl[index].track_list, 0);
409 		qdf_spinlock_create(&gp_qdf_frag_track_tbl[index].list_lock);
410 	}
411 }
412 
413 qdf_export_symbol(qdf_frag_debug_init);
414 
qdf_frag_debug_exit(void)415 void qdf_frag_debug_exit(void)
416 {
417 	uint32_t index;
418 	QDF_FRAG_TRACK *p_node;
419 	QDF_FRAG_TRACK *p_prev;
420 
421 	if (is_initial_mem_debug_disabled)
422 		return;
423 
424 	for (index = 0; index < QDF_FRAG_TRACK_MAX_SIZE; index++) {
425 		qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[index].list_lock);
426 		qdf_list_for_each_del(&gp_qdf_frag_track_tbl[index].track_list,
427 				      p_prev, p_node, hnode) {
428 			qdf_list_remove_node(
429 				&gp_qdf_frag_track_tbl[index].track_list,
430 				&p_prev->hnode);
431 			qdf_info("******Frag Memory Leak******");
432 			qdf_info("@Frag Address: %pK", p_prev->p_frag);
433 			qdf_info("@Refcount: %u", p_prev->refcount);
434 			qdf_info("@Alloc Func Name: %s, @Alloc Func Line: %d",
435 				 p_prev->alloc_func_name,
436 				 p_prev->alloc_func_line);
437 			qdf_info("@Last Func Name: %s, @Last Func Line: %d",
438 				 p_prev->last_func_name,
439 				 p_prev->last_func_line);
440 			qdf_info("****************************");
441 
442 			qdf_frag_track_free(p_prev);
443 		}
444 		qdf_list_destroy(&gp_qdf_frag_track_tbl[index].track_list);
445 		qdf_spin_unlock_irqrestore(
446 				&gp_qdf_frag_track_tbl[index].list_lock);
447 		qdf_spinlock_destroy(&gp_qdf_frag_track_tbl[index].list_lock);
448 	}
449 
450 	qdf_frag_track_memory_manager_destroy();
451 }
452 
453 qdf_export_symbol(qdf_frag_debug_exit);
454 
455 /**
456  * qdf_frag_debug_hash() - Hash network frag pointer
457  * @p_frag: Frag address
458  *
459  * Return: hash value
460  */
qdf_frag_debug_hash(qdf_frag_t p_frag)461 static uint32_t qdf_frag_debug_hash(qdf_frag_t p_frag)
462 {
463 	uint32_t index;
464 
465 	index = (uint32_t)(((uintptr_t)p_frag) >> 4);
466 	index += (uint32_t)(((uintptr_t)p_frag) >> 14);
467 	index &= (QDF_FRAG_TRACK_MAX_SIZE - 1);
468 
469 	return index;
470 }
471 
472 /**
473  * qdf_frag_debug_look_up() - Look up network frag in debug hash table
474  * @p_frag: Frag address
475  *
476  * Return: If frag is found in hash table then return pointer to network frag
477  *	else return NULL
478  */
qdf_frag_debug_look_up(qdf_frag_t p_frag)479 static QDF_FRAG_TRACK *qdf_frag_debug_look_up(qdf_frag_t p_frag)
480 {
481 	uint32_t index;
482 	QDF_FRAG_TRACK *p_node;
483 
484 	index = qdf_frag_debug_hash(p_frag);
485 
486 	qdf_list_for_each(&gp_qdf_frag_track_tbl[index].track_list, p_node,
487 			  hnode) {
488 		if (p_node->p_frag == p_frag)
489 			return p_node;
490 	}
491 
492 	return NULL;
493 }
494 
495 /**
496  * __qdf_frag_debug_add_node()- Add frag node to debug tracker
497  * @fragp: Frag Pointer
498  * @idx: Index
499  * @func_name: Caller function name
500  * @line_num: Caller function line no.
501  *
502  * Return: Allocated frag tracker node address
503  */
__qdf_frag_debug_add_node(qdf_frag_t fragp,uint32_t idx,const char * func_name,uint32_t line_num)504 static QDF_FRAG_TRACK *__qdf_frag_debug_add_node(qdf_frag_t fragp,
505 						 uint32_t idx,
506 						 const char *func_name,
507 						 uint32_t line_num)
508 {
509 	QDF_FRAG_TRACK *p_node;
510 
511 	p_node = qdf_frag_track_alloc();
512 
513 	if (p_node) {
514 		p_node->p_frag = fragp;
515 		qdf_str_lcopy(p_node->alloc_func_name, func_name,
516 			      QDF_MEM_FUNC_NAME_SIZE);
517 		p_node->alloc_func_line = line_num;
518 		p_node->refcount = QDF_NBUF_FRAG_DEBUG_COUNT_ZERO;
519 
520 		qdf_str_lcopy(p_node->last_func_name, func_name,
521 			      QDF_MEM_FUNC_NAME_SIZE);
522 		p_node->last_func_line = line_num;
523 
524 		qdf_list_insert_front(&gp_qdf_frag_track_tbl[idx].track_list,
525 				      &p_node->hnode);
526 	}
527 	return p_node;
528 }
529 
530 /**
531  * __qdf_frag_debug_delete_node()- Remove frag node from debug tracker
532  * @p_node: Frag node address in debug tracker
533  * @idx: Index
534  *
535  * Return: none
536  */
__qdf_frag_debug_delete_node(QDF_FRAG_TRACK * p_node,uint32_t idx)537 static void __qdf_frag_debug_delete_node(QDF_FRAG_TRACK *p_node, uint32_t idx)
538 {
539 	if (idx < QDF_FRAG_TRACK_MAX_SIZE) {
540 		qdf_list_remove_node(&gp_qdf_frag_track_tbl[idx].track_list,
541 				     &p_node->hnode);
542 		qdf_frag_track_free(p_node);
543 	} else {
544 		qdf_info("Index value exceeds %d for delete node operation",
545 			  QDF_FRAG_TRACK_MAX_SIZE);
546 	}
547 }
548 
qdf_frag_debug_add_node(qdf_frag_t fragp,const char * func_name,uint32_t line_num)549 void qdf_frag_debug_add_node(qdf_frag_t fragp, const char *func_name,
550 			     uint32_t line_num)
551 {
552 	uint32_t index;
553 	QDF_FRAG_TRACK *p_node;
554 
555 	if (is_initial_mem_debug_disabled)
556 		return;
557 
558 	index = qdf_frag_debug_hash(fragp);
559 
560 	qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[index].list_lock);
561 
562 	p_node = qdf_frag_debug_look_up(fragp);
563 
564 	if (p_node) {
565 		qdf_info("Double addition of frag %pK to debug tracker!!",
566 			 fragp);
567 		qdf_info("Already added from %s %d Current addition from %s %d",
568 			  p_node->alloc_func_name,
569 			  p_node->alloc_func_line, func_name, line_num);
570 	} else {
571 		p_node = __qdf_frag_debug_add_node(fragp, index, func_name,
572 						   line_num);
573 		if (!p_node)
574 			qdf_info("Memory allocation failed !! "
575 				 "Add node oprt failed for frag %pK from %s %d",
576 				 fragp, func_name, line_num);
577 	}
578 	qdf_spin_unlock_irqrestore(&gp_qdf_frag_track_tbl[index].list_lock);
579 }
580 
qdf_frag_debug_refcount_inc(qdf_frag_t fragp,const char * func_name,uint32_t line_num)581 void qdf_frag_debug_refcount_inc(qdf_frag_t fragp, const char *func_name,
582 				 uint32_t line_num)
583 {
584 	uint32_t index;
585 	QDF_FRAG_TRACK *p_node;
586 
587 	if (is_initial_mem_debug_disabled)
588 		return;
589 
590 	index = qdf_frag_debug_hash(fragp);
591 
592 	qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[index].list_lock);
593 
594 	p_node = qdf_frag_debug_look_up(fragp);
595 
596 	if (p_node) {
597 		(p_node->refcount)++;
598 
599 		qdf_str_lcopy(p_node->last_func_name, func_name,
600 			      QDF_MEM_FUNC_NAME_SIZE);
601 		p_node->last_func_line = line_num;
602 	} else {
603 		p_node = __qdf_frag_debug_add_node(fragp, index, func_name,
604 						   line_num);
605 		if (p_node)
606 			p_node->refcount = QDF_NBUF_FRAG_DEBUG_COUNT_ONE;
607 		else
608 			qdf_info("Memory allocation failed !! "
609 				 "Refcount inc failed for frag %pK from %s %d",
610 				 fragp, func_name, line_num);
611 	}
612 	qdf_spin_unlock_irqrestore(&gp_qdf_frag_track_tbl[index].list_lock);
613 }
614 
qdf_frag_debug_refcount_dec(qdf_frag_t fragp,const char * func_name,uint32_t line_num)615 void qdf_frag_debug_refcount_dec(qdf_frag_t fragp, const char *func_name,
616 				 uint32_t line_num)
617 {
618 	uint32_t index;
619 	QDF_FRAG_TRACK *p_node;
620 
621 	if (is_initial_mem_debug_disabled)
622 		return;
623 
624 	index = qdf_frag_debug_hash(fragp);
625 
626 	qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[index].list_lock);
627 
628 	p_node = qdf_frag_debug_look_up(fragp);
629 
630 	if (p_node) {
631 		if (!(p_node->refcount)) {
632 			qdf_info("Refcount dec oprt for frag %pK not permitted "
633 				 "as refcount=0", fragp);
634 			goto done;
635 		}
636 		(p_node->refcount)--;
637 
638 		if (!(p_node->refcount)) {
639 			/* Remove frag debug node when refcount reaches 0 */
640 			__qdf_frag_debug_delete_node(p_node, index);
641 		} else {
642 			qdf_str_lcopy(p_node->last_func_name, func_name,
643 				      QDF_MEM_FUNC_NAME_SIZE);
644 			p_node->last_func_line = line_num;
645 		}
646 	} else {
647 		qdf_info("Unallocated frag !! Could not track frag %pK", fragp);
648 		qdf_info("Refcount dec oprt failed for frag %pK from %s %d",
649 			 fragp, func_name, line_num);
650 	}
651 done:
652 	qdf_spin_unlock_irqrestore(&gp_qdf_frag_track_tbl[index].list_lock);
653 }
654 
qdf_frag_debug_delete_node(qdf_frag_t fragp,const char * func_name,uint32_t line_num)655 void qdf_frag_debug_delete_node(qdf_frag_t fragp, const char *func_name,
656 				uint32_t line_num)
657 {
658 	uint32_t index;
659 	QDF_FRAG_TRACK *p_node;
660 
661 	if (is_initial_mem_debug_disabled)
662 		return;
663 
664 	index = qdf_frag_debug_hash(fragp);
665 
666 	qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[index].list_lock);
667 
668 	p_node = qdf_frag_debug_look_up(fragp);
669 
670 	if (p_node) {
671 		if (p_node->refcount) {
672 			qdf_info("Frag %pK has refcount %d", fragp,
673 				 p_node->refcount);
674 			qdf_info("Delete oprt failed for frag %pK from %s %d",
675 				 fragp, func_name, line_num);
676 		} else {
677 			/* Remove node from tracker as refcount=0 */
678 			__qdf_frag_debug_delete_node(p_node, index);
679 		}
680 	} else {
681 		qdf_info("Unallocated frag !! Double free of frag %pK", fragp);
682 		qdf_info("Could not track frag %pK for delete oprt from %s %d",
683 			 fragp, func_name, line_num);
684 	}
685 
686 	qdf_spin_unlock_irqrestore(&gp_qdf_frag_track_tbl[index].list_lock);
687 }
688 
qdf_frag_debug_update_addr(qdf_frag_t p_fragp,qdf_frag_t n_fragp,const char * func_name,uint32_t line_num)689 void qdf_frag_debug_update_addr(qdf_frag_t p_fragp, qdf_frag_t n_fragp,
690 				const char *func_name, uint32_t line_num)
691 {
692 	uint32_t prev_index, new_index;
693 	QDF_FRAG_TRACK *p_node;
694 
695 	if (is_initial_mem_debug_disabled)
696 		return;
697 
698 	prev_index = qdf_frag_debug_hash(p_fragp);
699 
700 	new_index = qdf_frag_debug_hash(n_fragp);
701 
702 	qdf_spin_lock_irqsave(&gp_qdf_frag_track_tbl[prev_index].list_lock);
703 
704 	p_node = qdf_frag_debug_look_up(p_fragp);
705 
706 	if (!p_node) {
707 		qdf_info("Unallocated frag !! Could not track frag %pK",
708 			 p_fragp);
709 		qdf_info("Update address oprt failed for frag %pK from %s %d",
710 			 p_fragp, func_name, line_num);
711 		qdf_spin_unlock_irqrestore(
712 				&gp_qdf_frag_track_tbl[prev_index].list_lock);
713 	} else {
714 		/* Update frag address */
715 		p_node->p_frag = n_fragp;
716 
717 		qdf_str_lcopy(p_node->last_func_name, func_name,
718 			      QDF_MEM_FUNC_NAME_SIZE);
719 		p_node->last_func_line = line_num;
720 
721 		if (prev_index != new_index) {
722 			qdf_list_remove_node(
723 				&gp_qdf_frag_track_tbl[prev_index].track_list,
724 				&p_node->hnode);
725 
726 			qdf_spin_unlock_irqrestore(
727 				&gp_qdf_frag_track_tbl[prev_index].list_lock);
728 
729 			qdf_spin_lock_irqsave(
730 				&gp_qdf_frag_track_tbl[new_index].list_lock);
731 
732 			qdf_list_insert_front(
733 				&gp_qdf_frag_track_tbl[new_index].track_list,
734 				&p_node->hnode);
735 
736 			qdf_spin_unlock_irqrestore(
737 				&gp_qdf_frag_track_tbl[new_index].list_lock);
738 		} else {
739 			qdf_spin_unlock_irqrestore(
740 				&gp_qdf_frag_track_tbl[prev_index].list_lock);
741 		}
742 	}
743 }
744 
qdf_frag_alloc_debug(qdf_frag_cache_t * pf_cache,unsigned int frag_size,const char * func_name,uint32_t line_num)745 qdf_frag_t qdf_frag_alloc_debug(qdf_frag_cache_t *pf_cache,
746 				unsigned int frag_size,
747 				const char *func_name,
748 				uint32_t line_num)
749 {
750 	qdf_frag_t p_frag;
751 
752 	if (is_initial_mem_debug_disabled)
753 		return __qdf_frag_alloc(pf_cache, frag_size);
754 
755 	p_frag =  __qdf_frag_alloc(pf_cache, frag_size);
756 
757 	/* Store frag in QDF Frag Tracking Table */
758 	if (qdf_likely(p_frag))
759 		qdf_frag_debug_add_node(p_frag, func_name, line_num);
760 
761 	return p_frag;
762 }
763 
764 qdf_export_symbol(qdf_frag_alloc_debug);
765 
qdf_frag_free_debug(qdf_frag_t vaddr,const char * func_name,uint32_t line_num)766 void qdf_frag_free_debug(qdf_frag_t vaddr, const char *func_name,
767 			 uint32_t line_num)
768 {
769 	if (qdf_unlikely(!vaddr))
770 		return;
771 
772 	if (is_initial_mem_debug_disabled)
773 		goto free_frag;
774 
775 	qdf_frag_debug_delete_node(vaddr, func_name, line_num);
776 free_frag:
777 	__qdf_frag_free(vaddr);
778 }
779 
780 qdf_export_symbol(qdf_frag_free_debug);
781 
782 #endif /* NBUF_FRAG_MEMORY_DEBUG */
783 
784 #if defined(HIF_PCI)
__qdf_mem_map_page(qdf_device_t osdev,__qdf_frag_t buf,qdf_dma_dir_t dir,size_t nbytes,qdf_dma_addr_t * phy_addr)785 QDF_STATUS __qdf_mem_map_page(qdf_device_t osdev, __qdf_frag_t buf,
786 			      qdf_dma_dir_t dir, size_t nbytes,
787 			      qdf_dma_addr_t *phy_addr)
788 {
789 	struct page *page;
790 	unsigned long offset;
791 
792 	page = virt_to_head_page(buf);
793 	offset = buf - page_address(page);
794 	*phy_addr = dma_map_page(osdev->dev, page, offset, nbytes,
795 				 __qdf_dma_dir_to_os(dir));
796 
797 	return dma_mapping_error(osdev->dev, *phy_addr) ?
798 		QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
799 }
800 #else
__qdf_mem_map_page(qdf_device_t osdev,__qdf_frag_t buf,qdf_dma_dir_t dir,size_t nbytes,qdf_dma_addr_t * phy_addr)801 QDF_STATUS __qdf_mem_map_page(qdf_device_t osdev, __qdf_frag_t buf,
802 			      qdf_dma_dir_t dir, size_t nbytes,
803 			      qdf_dma_addr_t *phy_addr)
804 {
805 	return QDF_STATUS_SUCCESS;
806 }
807 #endif
808 
809 qdf_export_symbol(__qdf_mem_map_page);
810 
811 #if defined(HIF_PCI)
__qdf_mem_unmap_page(qdf_device_t osdev,qdf_dma_addr_t paddr,size_t nbytes,qdf_dma_dir_t dir)812 void __qdf_mem_unmap_page(qdf_device_t osdev, qdf_dma_addr_t paddr,
813 			  size_t nbytes, qdf_dma_dir_t dir)
814 {
815 	dma_unmap_page(osdev->dev, paddr, nbytes,
816 		       __qdf_dma_dir_to_os(dir));
817 }
818 #else
__qdf_mem_unmap_page(qdf_device_t osdev,qdf_dma_addr_t paddr,size_t nbytes,qdf_dma_dir_t dir)819 void __qdf_mem_unmap_page(qdf_device_t osdev, qdf_dma_addr_t paddr,
820 			  size_t nbytes, qdf_dma_dir_t dir)
821 {
822 }
823 #endif
824 
825 qdf_export_symbol(__qdf_mem_unmap_page);
826 
827 #if defined(QDF_FRAG_CACHE_SUPPORT)
__qdf_frag_cache_drain(qdf_frag_cache_t * pf_cache)828 void __qdf_frag_cache_drain(qdf_frag_cache_t *pf_cache)
829 {
830 	struct page *page;
831 
832 	if (!pf_cache->va)
833 		return;
834 
835 	page  = virt_to_page(pf_cache->va);
836 	__page_frag_cache_drain(page, pf_cache->pagecnt_bias);
837 	memset(pf_cache, 0, sizeof(*pf_cache));
838 }
839 #else
__qdf_frag_cache_drain(qdf_frag_cache_t * pf_cache)840 void __qdf_frag_cache_drain(qdf_frag_cache_t *pf_cache)
841 {
842 }
843 #endif
844 
845 qdf_export_symbol(__qdf_frag_cache_drain);
846