xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: qdf_nbuf.c
21  * QCA driver framework(QDF) network buffer management APIs
22  */
23 
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <qdf_atomic.h>
31 #include <qdf_debugfs.h>
32 #include <qdf_lock.h>
33 #include <qdf_mem.h>
34 #include <qdf_module.h>
35 #include <qdf_nbuf.h>
36 #include <qdf_status.h>
37 #include "qdf_str.h"
38 #include <qdf_trace.h>
39 #include "qdf_tracker.h"
40 #include <qdf_types.h>
41 #include <net/ieee80211_radiotap.h>
42 #include <pld_common.h>
43 
44 #if defined(FEATURE_TSO)
45 #include <net/ipv6.h>
46 #include <linux/ipv6.h>
47 #include <linux/tcp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/ip.h>
50 #endif /* FEATURE_TSO */
51 
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
53 
54 #define qdf_nbuf_users_inc atomic_inc
55 #define qdf_nbuf_users_dec atomic_dec
56 #define qdf_nbuf_users_set atomic_set
57 #define qdf_nbuf_users_read atomic_read
58 #else
59 #define qdf_nbuf_users_inc refcount_inc
60 #define qdf_nbuf_users_dec refcount_dec
61 #define qdf_nbuf_users_set refcount_set
62 #define qdf_nbuf_users_read refcount_read
63 #endif /* KERNEL_VERSION(4, 13, 0) */
64 
65 #define IEEE80211_RADIOTAP_VHT_BW_20	0
66 #define IEEE80211_RADIOTAP_VHT_BW_40	1
67 #define IEEE80211_RADIOTAP_VHT_BW_80	2
68 #define IEEE80211_RADIOTAP_VHT_BW_160	3
69 
70 #define RADIOTAP_VHT_BW_20	0
71 #define RADIOTAP_VHT_BW_40	1
72 #define RADIOTAP_VHT_BW_80	4
73 #define RADIOTAP_VHT_BW_160	11
74 
75 /* channel number to freq conversion */
76 #define CHANNEL_NUM_14 14
77 #define CHANNEL_NUM_15 15
78 #define CHANNEL_NUM_27 27
79 #define CHANNEL_NUM_35 35
80 #define CHANNEL_NUM_182 182
81 #define CHANNEL_NUM_197 197
82 #define CHANNEL_FREQ_2484 2484
83 #define CHANNEL_FREQ_2407 2407
84 #define CHANNEL_FREQ_2512 2512
85 #define CHANNEL_FREQ_5000 5000
86 #define CHANNEL_FREQ_4000 4000
87 #define CHANNEL_FREQ_5150 5150
88 #define FREQ_MULTIPLIER_CONST_5MHZ 5
89 #define FREQ_MULTIPLIER_CONST_20MHZ 20
90 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
91 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
92 #define RADIOTAP_CCK_CHANNEL 0x0020
93 #define RADIOTAP_OFDM_CHANNEL 0x0040
94 
95 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
96 #include <qdf_mc_timer.h>
97 
98 struct qdf_track_timer {
99 	qdf_mc_timer_t track_timer;
100 	qdf_atomic_t alloc_fail_cnt;
101 };
102 
103 static struct qdf_track_timer alloc_track_timer;
104 
105 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
106 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
107 #endif
108 
109 #ifdef NBUF_MEMORY_DEBUG
110 /* SMMU crash indication*/
111 static qdf_atomic_t smmu_crashed;
112 /* Number of nbuf not added to history*/
113 unsigned long g_histroy_add_drop;
114 #endif
115 
116 /* Packet Counter */
117 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
118 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
119 #ifdef QDF_NBUF_GLOBAL_COUNT
120 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
121 static qdf_atomic_t nbuf_count;
122 #endif
123 
124 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
125 static bool is_initial_mem_debug_disabled;
126 #endif
127 
128 /**
129  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
130  * @data: Pointer to network data buffer
131  *
132  * Get the IP header offset in case of 8021Q and 8021AD
133  * tag is present in L2 header.
134  *
135  * Return: IP header offset
136  */
137 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
138 {
139 	uint16_t ether_type;
140 
141 	ether_type = *(uint16_t *)(data +
142 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
143 
144 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
145 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
146 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
147 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
148 
149 	return QDF_NBUF_TRAC_IP_OFFSET;
150 }
151 
152 /**
153  *  __qdf_nbuf_get_ether_type - Get the ether type
154  * @data: Pointer to network data buffer
155  *
156  * Get the ether type in case of 8021Q and 8021AD tag
157  * is present in L2 header, e.g for the returned ether type
158  * value, if IPV4 data ether type 0x0800, return 0x0008.
159  *
160  * Return ether type.
161  */
162 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
163 {
164 	uint16_t ether_type;
165 
166 	ether_type = *(uint16_t *)(data +
167 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
168 
169 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
170 		ether_type = *(uint16_t *)(data +
171 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
172 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
173 		ether_type = *(uint16_t *)(data +
174 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
175 
176 	return ether_type;
177 }
178 
179 /**
180  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
181  *
182  * Return: none
183  */
184 void qdf_nbuf_tx_desc_count_display(void)
185 {
186 	qdf_debug("Current Snapshot of the Driver:");
187 	qdf_debug("Data Packets:");
188 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
189 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
190 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
191 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
192 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
193 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
194 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
195 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
196 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
198 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
199 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
205 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
207 	qdf_debug("Mgmt Packets:");
208 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
209 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
210 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
211 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
212 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
213 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
214 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
215 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
216 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
222 }
223 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
224 
225 /**
226  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
227  * @packet_type   : packet type either mgmt/data
228  * @current_state : layer at which the packet currently present
229  *
230  * Return: none
231  */
232 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
233 			uint8_t current_state)
234 {
235 	switch (packet_type) {
236 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
237 		nbuf_tx_mgmt[current_state]++;
238 		break;
239 	case QDF_NBUF_TX_PKT_DATA_TRACK:
240 		nbuf_tx_data[current_state]++;
241 		break;
242 	default:
243 		break;
244 	}
245 }
246 
247 /**
248  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
249  *
250  * Return: none
251  */
252 void qdf_nbuf_tx_desc_count_clear(void)
253 {
254 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
255 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
256 }
257 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
258 
259 /**
260  * qdf_nbuf_set_state() - Updates the packet state
261  * @nbuf:            network buffer
262  * @current_state :  layer at which the packet currently is
263  *
264  * This function updates the packet state to the layer at which the packet
265  * currently is
266  *
267  * Return: none
268  */
269 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
270 {
271 	/*
272 	 * Only Mgmt, Data Packets are tracked. WMI messages
273 	 * such as scan commands are not tracked
274 	 */
275 	uint8_t packet_type;
276 
277 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
278 
279 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
280 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
281 		return;
282 	}
283 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
284 	qdf_nbuf_tx_desc_count_update(packet_type,
285 					current_state);
286 }
287 qdf_export_symbol(qdf_nbuf_set_state);
288 
289 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
290 /**
291  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
292  *
293  * This function starts the alloc fail replenish timer.
294  *
295  * Return: void
296  */
297 static void __qdf_nbuf_start_replenish_timer(void)
298 {
299 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
300 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
301 	    QDF_TIMER_STATE_RUNNING)
302 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
303 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
304 }
305 
306 /**
307  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
308  *
309  * This function stops the alloc fail replenish timer.
310  *
311  * Return: void
312  */
313 static void __qdf_nbuf_stop_replenish_timer(void)
314 {
315 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
316 		return;
317 
318 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
319 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
320 	    QDF_TIMER_STATE_RUNNING)
321 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
322 }
323 
324 /**
325  * qdf_replenish_expire_handler - Replenish expire handler
326  *
327  * This function triggers when the alloc fail replenish timer expires.
328  *
329  * Return: void
330  */
331 static void qdf_replenish_expire_handler(void *arg)
332 {
333 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
334 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
335 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
336 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
337 
338 		/* Error handling here */
339 	}
340 }
341 
342 /**
343  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
344  *
345  * This function initializes the nbuf alloc fail replenish timer.
346  *
347  * Return: void
348  */
349 void __qdf_nbuf_init_replenish_timer(void)
350 {
351 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
352 			  qdf_replenish_expire_handler, NULL);
353 }
354 
355 /**
356  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
357  *
358  * This function deinitializes the nbuf alloc fail replenish timer.
359  *
360  * Return: void
361  */
362 void __qdf_nbuf_deinit_replenish_timer(void)
363 {
364 	__qdf_nbuf_stop_replenish_timer();
365 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
366 }
367 #else
368 
369 static inline void __qdf_nbuf_start_replenish_timer(void) {}
370 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
371 #endif
372 
373 /* globals do not need to be initialized to NULL/0 */
374 qdf_nbuf_trace_update_t qdf_trace_update_cb;
375 qdf_nbuf_free_t nbuf_free_cb;
376 
377 #ifdef QDF_NBUF_GLOBAL_COUNT
378 
379 /**
380  * __qdf_nbuf_count_get() - get nbuf global count
381  *
382  * Return: nbuf global count
383  */
384 int __qdf_nbuf_count_get(void)
385 {
386 	return qdf_atomic_read(&nbuf_count);
387 }
388 qdf_export_symbol(__qdf_nbuf_count_get);
389 
390 /**
391  * __qdf_nbuf_count_inc() - increment nbuf global count
392  *
393  * @buf: sk buff
394  *
395  * Return: void
396  */
397 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
398 {
399 	int num_nbuf = 1;
400 	qdf_nbuf_t ext_list;
401 
402 	if (qdf_likely(is_initial_mem_debug_disabled))
403 		return;
404 
405 	ext_list = qdf_nbuf_get_ext_list(nbuf);
406 
407 	/* Take care to account for frag_list */
408 	while (ext_list) {
409 		++num_nbuf;
410 		ext_list = qdf_nbuf_queue_next(ext_list);
411 	}
412 
413 	qdf_atomic_add(num_nbuf, &nbuf_count);
414 }
415 qdf_export_symbol(__qdf_nbuf_count_inc);
416 
417 /**
418  * __qdf_nbuf_count_dec() - decrement nbuf global count
419  *
420  * @buf: sk buff
421  *
422  * Return: void
423  */
424 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
425 {
426 	qdf_nbuf_t ext_list;
427 	int num_nbuf;
428 
429 	if (qdf_likely(is_initial_mem_debug_disabled))
430 		return;
431 
432 	if (qdf_nbuf_get_users(nbuf) > 1)
433 		return;
434 
435 	num_nbuf = 1;
436 
437 	/* Take care to account for frag_list */
438 	ext_list = qdf_nbuf_get_ext_list(nbuf);
439 	while (ext_list) {
440 		if (qdf_nbuf_get_users(ext_list) == 1)
441 			++num_nbuf;
442 		ext_list = qdf_nbuf_queue_next(ext_list);
443 	}
444 
445 	qdf_atomic_sub(num_nbuf, &nbuf_count);
446 }
447 qdf_export_symbol(__qdf_nbuf_count_dec);
448 #endif
449 
450 #ifdef NBUF_FRAG_MEMORY_DEBUG
451 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
452 {
453 	qdf_nbuf_t ext_list;
454 	uint32_t num_nr_frags;
455 	uint32_t total_num_nr_frags;
456 
457 	if (qdf_likely(is_initial_mem_debug_disabled))
458 		return;
459 
460 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
461 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
462 
463 	total_num_nr_frags = num_nr_frags;
464 
465 	/* Take into account the frags attached to frag_list */
466 	ext_list = qdf_nbuf_get_ext_list(nbuf);
467 	while (ext_list) {
468 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
469 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
470 		total_num_nr_frags += num_nr_frags;
471 		ext_list = qdf_nbuf_queue_next(ext_list);
472 	}
473 
474 	qdf_frag_count_inc(total_num_nr_frags);
475 }
476 
477 qdf_export_symbol(qdf_nbuf_frag_count_inc);
478 
479 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
480 {
481 	qdf_nbuf_t ext_list;
482 	uint32_t num_nr_frags;
483 	uint32_t total_num_nr_frags;
484 
485 	if (qdf_likely(is_initial_mem_debug_disabled))
486 		return;
487 
488 	if (qdf_nbuf_get_users(nbuf) > 1)
489 		return;
490 
491 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
492 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
493 
494 	total_num_nr_frags = num_nr_frags;
495 
496 	/* Take into account the frags attached to frag_list */
497 	ext_list = qdf_nbuf_get_ext_list(nbuf);
498 	while (ext_list) {
499 		if (qdf_nbuf_get_users(ext_list) == 1) {
500 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
501 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
502 			total_num_nr_frags += num_nr_frags;
503 		}
504 		ext_list = qdf_nbuf_queue_next(ext_list);
505 	}
506 
507 	qdf_frag_count_dec(total_num_nr_frags);
508 }
509 
510 qdf_export_symbol(qdf_nbuf_frag_count_dec);
511 
512 #endif
513 
514 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
515 	!defined(QCA_WIFI_QCN9000)
516 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
517 				 int align, int prio, const char *func,
518 				 uint32_t line)
519 {
520 	struct sk_buff *skb;
521 	unsigned long offset;
522 	uint32_t lowmem_alloc_tries = 0;
523 
524 	if (align)
525 		size += (align - 1);
526 
527 realloc:
528 	skb = dev_alloc_skb(size);
529 
530 	if (skb)
531 		goto skb_alloc;
532 
533 	skb = pld_nbuf_pre_alloc(size);
534 
535 	if (!skb) {
536 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
537 				size, func, line);
538 		return NULL;
539 	}
540 
541 skb_alloc:
542 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
543 	 * Though we are trying to reserve low memory upfront to prevent this,
544 	 * we sometimes see SKBs allocated from low memory.
545 	 */
546 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
547 		lowmem_alloc_tries++;
548 		if (lowmem_alloc_tries > 100) {
549 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
550 				     size, func, line);
551 			return NULL;
552 		} else {
553 			/* Not freeing to make sure it
554 			 * will not get allocated again
555 			 */
556 			goto realloc;
557 		}
558 	}
559 	memset(skb->cb, 0x0, sizeof(skb->cb));
560 
561 	/*
562 	 * The default is for netbuf fragments to be interpreted
563 	 * as wordstreams rather than bytestreams.
564 	 */
565 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
566 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
567 
568 	/*
569 	 * XXX:how about we reserve first then align
570 	 * Align & make sure that the tail & data are adjusted properly
571 	 */
572 
573 	if (align) {
574 		offset = ((unsigned long)skb->data) % align;
575 		if (offset)
576 			skb_reserve(skb, align - offset);
577 	}
578 
579 	/*
580 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
581 	 * pointer
582 	 */
583 	skb_reserve(skb, reserve);
584 	qdf_nbuf_count_inc(skb);
585 
586 	return skb;
587 }
588 #else
589 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
590 				 int align, int prio, const char *func,
591 				 uint32_t line)
592 {
593 	struct sk_buff *skb;
594 	unsigned long offset;
595 	int flags = GFP_KERNEL;
596 
597 	if (align)
598 		size += (align - 1);
599 
600 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
601 		flags = GFP_ATOMIC;
602 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
603 		/*
604 		 * Observed that kcompactd burns out CPU to make order-3 page.
605 		 *__netdev_alloc_skb has 4k page fallback option just in case of
606 		 * failing high order page allocation so we don't need to be
607 		 * hard. Make kcompactd rest in piece.
608 		 */
609 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
610 #endif
611 	}
612 
613 	skb = __netdev_alloc_skb(NULL, size, flags);
614 
615 	if (skb)
616 		goto skb_alloc;
617 
618 	skb = pld_nbuf_pre_alloc(size);
619 
620 	if (!skb) {
621 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
622 				size, func, line);
623 		__qdf_nbuf_start_replenish_timer();
624 		return NULL;
625 	} else {
626 		__qdf_nbuf_stop_replenish_timer();
627 	}
628 
629 skb_alloc:
630 	memset(skb->cb, 0x0, sizeof(skb->cb));
631 
632 	/*
633 	 * The default is for netbuf fragments to be interpreted
634 	 * as wordstreams rather than bytestreams.
635 	 */
636 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
637 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
638 
639 	/*
640 	 * XXX:how about we reserve first then align
641 	 * Align & make sure that the tail & data are adjusted properly
642 	 */
643 
644 	if (align) {
645 		offset = ((unsigned long)skb->data) % align;
646 		if (offset)
647 			skb_reserve(skb, align - offset);
648 	}
649 
650 	/*
651 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
652 	 * pointer
653 	 */
654 	skb_reserve(skb, reserve);
655 	qdf_nbuf_count_inc(skb);
656 
657 	return skb;
658 }
659 #endif
660 qdf_export_symbol(__qdf_nbuf_alloc);
661 
662 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
663 					  const char *func, uint32_t line)
664 {
665 	qdf_nbuf_t nbuf;
666 	unsigned long offset;
667 
668 	if (align)
669 		size += (align - 1);
670 
671 	nbuf = alloc_skb(size, GFP_ATOMIC);
672 	if (!nbuf)
673 		goto ret_nbuf;
674 
675 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
676 
677 	skb_reserve(nbuf, reserve);
678 
679 	if (align) {
680 		offset = ((unsigned long)nbuf->data) % align;
681 		if (offset)
682 			skb_reserve(nbuf, align - offset);
683 	}
684 
685 	qdf_nbuf_count_inc(nbuf);
686 
687 ret_nbuf:
688 	return nbuf;
689 }
690 
691 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
692 
693 /**
694  * __qdf_nbuf_free() - free the nbuf its interrupt safe
695  * @skb: Pointer to network buffer
696  *
697  * Return: none
698  */
699 
700 void __qdf_nbuf_free(struct sk_buff *skb)
701 {
702 	if (pld_nbuf_pre_alloc_free(skb))
703 		return;
704 
705 	qdf_nbuf_frag_count_dec(skb);
706 
707 	qdf_nbuf_count_dec(skb);
708 	if (nbuf_free_cb)
709 		nbuf_free_cb(skb);
710 	else
711 		dev_kfree_skb_any(skb);
712 }
713 
714 qdf_export_symbol(__qdf_nbuf_free);
715 
716 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
717 {
718 	qdf_nbuf_t skb_new = NULL;
719 
720 	skb_new = skb_clone(skb, GFP_ATOMIC);
721 	if (skb_new) {
722 		qdf_nbuf_frag_count_inc(skb_new);
723 		qdf_nbuf_count_inc(skb_new);
724 	}
725 	return skb_new;
726 }
727 
728 qdf_export_symbol(__qdf_nbuf_clone);
729 
730 #ifdef NBUF_MEMORY_DEBUG
731 enum qdf_nbuf_event_type {
732 	QDF_NBUF_ALLOC,
733 	QDF_NBUF_ALLOC_CLONE,
734 	QDF_NBUF_ALLOC_COPY,
735 	QDF_NBUF_ALLOC_FAILURE,
736 	QDF_NBUF_FREE,
737 	QDF_NBUF_MAP,
738 	QDF_NBUF_UNMAP,
739 	QDF_NBUF_ALLOC_COPY_EXPAND,
740 };
741 
742 struct qdf_nbuf_event {
743 	qdf_nbuf_t nbuf;
744 	char func[QDF_MEM_FUNC_NAME_SIZE];
745 	uint32_t line;
746 	enum qdf_nbuf_event_type type;
747 	uint64_t timestamp;
748 	qdf_dma_addr_t iova;
749 };
750 
751 #ifndef QDF_NBUF_HISTORY_SIZE
752 #define QDF_NBUF_HISTORY_SIZE 4096
753 #endif
754 static qdf_atomic_t qdf_nbuf_history_index;
755 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
756 
757 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
758 {
759 	int32_t next = qdf_atomic_inc_return(index);
760 
761 	if (next == size)
762 		qdf_atomic_sub(size, index);
763 
764 	return next % size;
765 }
766 
767 static void
768 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
769 		     enum qdf_nbuf_event_type type)
770 {
771 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
772 						   QDF_NBUF_HISTORY_SIZE);
773 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
774 
775 	if (qdf_atomic_read(&smmu_crashed)) {
776 		g_histroy_add_drop++;
777 		return;
778 	}
779 
780 	event->nbuf = nbuf;
781 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
782 	event->line = line;
783 	event->type = type;
784 	event->timestamp = qdf_get_log_timestamp();
785 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP)
786 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
787 	else
788 		event->iova = 0;
789 }
790 
791 void qdf_set_smmu_fault_state(bool smmu_fault_state)
792 {
793 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
794 	if (!smmu_fault_state)
795 		g_histroy_add_drop = 0;
796 }
797 qdf_export_symbol(qdf_set_smmu_fault_state);
798 #endif /* NBUF_MEMORY_DEBUG */
799 
800 #ifdef NBUF_MAP_UNMAP_DEBUG
801 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
802 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
803 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
804 
805 static void qdf_nbuf_map_tracking_init(void)
806 {
807 	qdf_tracker_init(&qdf_nbuf_map_tracker);
808 }
809 
810 static void qdf_nbuf_map_tracking_deinit(void)
811 {
812 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
813 }
814 
815 static QDF_STATUS
816 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
817 {
818 	if (is_initial_mem_debug_disabled)
819 		return QDF_STATUS_SUCCESS;
820 
821 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
822 }
823 
824 static void
825 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
826 {
827 	if (is_initial_mem_debug_disabled)
828 		return;
829 
830 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
831 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
832 }
833 
834 void qdf_nbuf_map_check_for_leaks(void)
835 {
836 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
837 }
838 
839 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
840 			      qdf_nbuf_t buf,
841 			      qdf_dma_dir_t dir,
842 			      const char *func,
843 			      uint32_t line)
844 {
845 	QDF_STATUS status;
846 
847 	status = qdf_nbuf_track_map(buf, func, line);
848 	if (QDF_IS_STATUS_ERROR(status))
849 		return status;
850 
851 	status = __qdf_nbuf_map(osdev, buf, dir);
852 	if (QDF_IS_STATUS_ERROR(status)) {
853 		qdf_nbuf_untrack_map(buf, func, line);
854 	} else {
855 		if (!is_initial_mem_debug_disabled)
856 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
857 		qdf_net_buf_debug_update_map_node(buf, func, line);
858 	}
859 
860 	return status;
861 }
862 
863 qdf_export_symbol(qdf_nbuf_map_debug);
864 
865 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
866 			  qdf_nbuf_t buf,
867 			  qdf_dma_dir_t dir,
868 			  const char *func,
869 			  uint32_t line)
870 {
871 	qdf_nbuf_untrack_map(buf, func, line);
872 	__qdf_nbuf_unmap_single(osdev, buf, dir);
873 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
874 }
875 
876 qdf_export_symbol(qdf_nbuf_unmap_debug);
877 
878 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
879 				     qdf_nbuf_t buf,
880 				     qdf_dma_dir_t dir,
881 				     const char *func,
882 				     uint32_t line)
883 {
884 	QDF_STATUS status;
885 
886 	status = qdf_nbuf_track_map(buf, func, line);
887 	if (QDF_IS_STATUS_ERROR(status))
888 		return status;
889 
890 	status = __qdf_nbuf_map_single(osdev, buf, dir);
891 	if (QDF_IS_STATUS_ERROR(status)) {
892 		qdf_nbuf_untrack_map(buf, func, line);
893 	} else {
894 		if (!is_initial_mem_debug_disabled)
895 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
896 		qdf_net_buf_debug_update_map_node(buf, func, line);
897 	}
898 
899 	return status;
900 }
901 
902 qdf_export_symbol(qdf_nbuf_map_single_debug);
903 
904 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
905 				 qdf_nbuf_t buf,
906 				 qdf_dma_dir_t dir,
907 				 const char *func,
908 				 uint32_t line)
909 {
910 	qdf_nbuf_untrack_map(buf, func, line);
911 	__qdf_nbuf_unmap_single(osdev, buf, dir);
912 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
913 }
914 
915 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
916 
917 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
918 				     qdf_nbuf_t buf,
919 				     qdf_dma_dir_t dir,
920 				     int nbytes,
921 				     const char *func,
922 				     uint32_t line)
923 {
924 	QDF_STATUS status;
925 
926 	status = qdf_nbuf_track_map(buf, func, line);
927 	if (QDF_IS_STATUS_ERROR(status))
928 		return status;
929 
930 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
931 	if (QDF_IS_STATUS_ERROR(status)) {
932 		qdf_nbuf_untrack_map(buf, func, line);
933 	} else {
934 		if (!is_initial_mem_debug_disabled)
935 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
936 		qdf_net_buf_debug_update_map_node(buf, func, line);
937 	}
938 
939 	return status;
940 }
941 
942 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
943 
944 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
945 				 qdf_nbuf_t buf,
946 				 qdf_dma_dir_t dir,
947 				 int nbytes,
948 				 const char *func,
949 				 uint32_t line)
950 {
951 	qdf_nbuf_untrack_map(buf, func, line);
952 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
953 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
954 }
955 
956 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
957 
958 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
959 					    qdf_nbuf_t buf,
960 					    qdf_dma_dir_t dir,
961 					    int nbytes,
962 					    const char *func,
963 					    uint32_t line)
964 {
965 	QDF_STATUS status;
966 
967 	status = qdf_nbuf_track_map(buf, func, line);
968 	if (QDF_IS_STATUS_ERROR(status))
969 		return status;
970 
971 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
972 	if (QDF_IS_STATUS_ERROR(status)) {
973 		qdf_nbuf_untrack_map(buf, func, line);
974 	} else {
975 		if (!is_initial_mem_debug_disabled)
976 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
977 		qdf_net_buf_debug_update_map_node(buf, func, line);
978 	}
979 
980 	return status;
981 }
982 
983 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
984 
985 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
986 					qdf_nbuf_t buf,
987 					qdf_dma_dir_t dir,
988 					int nbytes,
989 					const char *func,
990 					uint32_t line)
991 {
992 	qdf_nbuf_untrack_map(buf, func, line);
993 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
994 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
995 }
996 
997 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
998 
999 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1000 					      qdf_nbuf_t buf,
1001 					      qdf_dma_addr_t phy_addr,
1002 					      qdf_dma_dir_t dir, int nbytes,
1003 					      const char *func, uint32_t line)
1004 {
1005 	qdf_nbuf_untrack_map(buf, func, line);
1006 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1007 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1008 }
1009 
1010 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1011 
1012 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1013 					     const char *func,
1014 					     uint32_t line)
1015 {
1016 	char map_func[QDF_TRACKER_FUNC_SIZE];
1017 	uint32_t map_line;
1018 
1019 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1020 				&map_func, &map_line))
1021 		return;
1022 
1023 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1024 			   func, line, map_func, map_line);
1025 }
1026 #else
1027 static inline void qdf_nbuf_map_tracking_init(void)
1028 {
1029 }
1030 
1031 static inline void qdf_nbuf_map_tracking_deinit(void)
1032 {
1033 }
1034 
1035 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1036 						    const char *func,
1037 						    uint32_t line)
1038 {
1039 }
1040 #endif /* NBUF_MAP_UNMAP_DEBUG */
1041 
1042 /**
1043  * __qdf_nbuf_map() - map a buffer to local bus address space
1044  * @osdev: OS device
1045  * @bmap: Bitmap
1046  * @skb: Pointer to network buffer
1047  * @dir: Direction
1048  *
1049  * Return: QDF_STATUS
1050  */
1051 #ifdef QDF_OS_DEBUG
1052 QDF_STATUS
1053 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1054 {
1055 	struct skb_shared_info *sh = skb_shinfo(skb);
1056 
1057 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1058 			|| (dir == QDF_DMA_FROM_DEVICE));
1059 
1060 	/*
1061 	 * Assume there's only a single fragment.
1062 	 * To support multiple fragments, it would be necessary to change
1063 	 * qdf_nbuf_t to be a separate object that stores meta-info
1064 	 * (including the bus address for each fragment) and a pointer
1065 	 * to the underlying sk_buff.
1066 	 */
1067 	qdf_assert(sh->nr_frags == 0);
1068 
1069 	return __qdf_nbuf_map_single(osdev, skb, dir);
1070 }
1071 qdf_export_symbol(__qdf_nbuf_map);
1072 
1073 #else
1074 QDF_STATUS
1075 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1076 {
1077 	return __qdf_nbuf_map_single(osdev, skb, dir);
1078 }
1079 qdf_export_symbol(__qdf_nbuf_map);
1080 #endif
1081 /**
1082  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1083  * @osdev: OS device
1084  * @skb: Pointer to network buffer
1085  * @dir: dma direction
1086  *
1087  * Return: none
1088  */
1089 void
1090 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1091 			qdf_dma_dir_t dir)
1092 {
1093 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1094 		   || (dir == QDF_DMA_FROM_DEVICE));
1095 
1096 	/*
1097 	 * Assume there's a single fragment.
1098 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1099 	 */
1100 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1101 }
1102 qdf_export_symbol(__qdf_nbuf_unmap);
1103 
1104 /**
1105  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1106  * @osdev: OS device
1107  * @skb: Pointer to network buffer
1108  * @dir: Direction
1109  *
1110  * Return: QDF_STATUS
1111  */
1112 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1113 QDF_STATUS
1114 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1115 {
1116 	qdf_dma_addr_t paddr;
1117 
1118 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1119 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1120 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1121 	return QDF_STATUS_SUCCESS;
1122 }
1123 qdf_export_symbol(__qdf_nbuf_map_single);
1124 #else
1125 QDF_STATUS
1126 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1127 {
1128 	qdf_dma_addr_t paddr;
1129 
1130 	/* assume that the OS only provides a single fragment */
1131 	QDF_NBUF_CB_PADDR(buf) = paddr =
1132 		dma_map_single(osdev->dev, buf->data,
1133 				skb_end_pointer(buf) - buf->data,
1134 				__qdf_dma_dir_to_os(dir));
1135 	__qdf_record_nbuf_nbytes(
1136 		__qdf_nbuf_get_end_offset(buf), dir, true);
1137 	return dma_mapping_error(osdev->dev, paddr)
1138 		? QDF_STATUS_E_FAILURE
1139 		: QDF_STATUS_SUCCESS;
1140 }
1141 qdf_export_symbol(__qdf_nbuf_map_single);
1142 #endif
1143 /**
1144  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1145  * @osdev: OS device
1146  * @skb: Pointer to network buffer
1147  * @dir: Direction
1148  *
1149  * Return: none
1150  */
1151 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1152 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1153 				qdf_dma_dir_t dir)
1154 {
1155 }
1156 #else
1157 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1158 					qdf_dma_dir_t dir)
1159 {
1160 	if (QDF_NBUF_CB_PADDR(buf)) {
1161 		__qdf_record_nbuf_nbytes(
1162 			__qdf_nbuf_get_end_offset(buf), dir, false);
1163 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1164 			skb_end_pointer(buf) - buf->data,
1165 			__qdf_dma_dir_to_os(dir));
1166 	}
1167 }
1168 #endif
1169 qdf_export_symbol(__qdf_nbuf_unmap_single);
1170 
1171 /**
1172  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1173  * @skb: Pointer to network buffer
1174  * @cksum: Pointer to checksum value
1175  *
1176  * Return: QDF_STATUS
1177  */
1178 QDF_STATUS
1179 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1180 {
1181 	switch (cksum->l4_result) {
1182 	case QDF_NBUF_RX_CKSUM_NONE:
1183 		skb->ip_summed = CHECKSUM_NONE;
1184 		break;
1185 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1186 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1187 		break;
1188 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1189 		skb->ip_summed = CHECKSUM_PARTIAL;
1190 		skb->csum = cksum->val;
1191 		break;
1192 	default:
1193 		pr_err("Unknown checksum type\n");
1194 		qdf_assert(0);
1195 		return QDF_STATUS_E_NOSUPPORT;
1196 	}
1197 	return QDF_STATUS_SUCCESS;
1198 }
1199 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1200 
1201 /**
1202  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1203  * @skb: Pointer to network buffer
1204  *
1205  * Return: TX checksum value
1206  */
1207 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1208 {
1209 	switch (skb->ip_summed) {
1210 	case CHECKSUM_NONE:
1211 		return QDF_NBUF_TX_CKSUM_NONE;
1212 	case CHECKSUM_PARTIAL:
1213 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1214 	case CHECKSUM_COMPLETE:
1215 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1216 	default:
1217 		return QDF_NBUF_TX_CKSUM_NONE;
1218 	}
1219 }
1220 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1221 
1222 /**
1223  * __qdf_nbuf_get_tid() - get tid
1224  * @skb: Pointer to network buffer
1225  *
1226  * Return: tid
1227  */
1228 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1229 {
1230 	return skb->priority;
1231 }
1232 qdf_export_symbol(__qdf_nbuf_get_tid);
1233 
1234 /**
1235  * __qdf_nbuf_set_tid() - set tid
1236  * @skb: Pointer to network buffer
1237  *
1238  * Return: none
1239  */
1240 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1241 {
1242 	skb->priority = tid;
1243 }
1244 qdf_export_symbol(__qdf_nbuf_set_tid);
1245 
1246 /**
1247  * __qdf_nbuf_set_tid() - set tid
1248  * @skb: Pointer to network buffer
1249  *
1250  * Return: none
1251  */
1252 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1253 {
1254 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1255 }
1256 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1257 
1258 /**
1259  * __qdf_nbuf_reg_trace_cb() - register trace callback
1260  * @cb_func_ptr: Pointer to trace callback function
1261  *
1262  * Return: none
1263  */
1264 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1265 {
1266 	qdf_trace_update_cb = cb_func_ptr;
1267 }
1268 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1269 
1270 /**
1271  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1272  *              of DHCP packet.
1273  * @data: Pointer to DHCP packet data buffer
1274  *
1275  * This func. returns the subtype of DHCP packet.
1276  *
1277  * Return: subtype of the DHCP packet.
1278  */
1279 enum qdf_proto_subtype
1280 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1281 {
1282 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1283 
1284 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1285 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1286 					QDF_DHCP_OPTION53_LENGTH)) {
1287 
1288 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1289 		case QDF_DHCP_DISCOVER:
1290 			subtype = QDF_PROTO_DHCP_DISCOVER;
1291 			break;
1292 		case QDF_DHCP_REQUEST:
1293 			subtype = QDF_PROTO_DHCP_REQUEST;
1294 			break;
1295 		case QDF_DHCP_OFFER:
1296 			subtype = QDF_PROTO_DHCP_OFFER;
1297 			break;
1298 		case QDF_DHCP_ACK:
1299 			subtype = QDF_PROTO_DHCP_ACK;
1300 			break;
1301 		case QDF_DHCP_NAK:
1302 			subtype = QDF_PROTO_DHCP_NACK;
1303 			break;
1304 		case QDF_DHCP_RELEASE:
1305 			subtype = QDF_PROTO_DHCP_RELEASE;
1306 			break;
1307 		case QDF_DHCP_INFORM:
1308 			subtype = QDF_PROTO_DHCP_INFORM;
1309 			break;
1310 		case QDF_DHCP_DECLINE:
1311 			subtype = QDF_PROTO_DHCP_DECLINE;
1312 			break;
1313 		default:
1314 			break;
1315 		}
1316 	}
1317 
1318 	return subtype;
1319 }
1320 
1321 /**
1322  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1323  *            of EAPOL packet.
1324  * @data: Pointer to EAPOL packet data buffer
1325  *
1326  * This func. returns the subtype of EAPOL packet.
1327  *
1328  * Return: subtype of the EAPOL packet.
1329  */
1330 enum qdf_proto_subtype
1331 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1332 {
1333 	uint16_t eapol_key_info;
1334 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1335 	uint16_t mask;
1336 
1337 	eapol_key_info = (uint16_t)(*(uint16_t *)
1338 			(data + EAPOL_KEY_INFO_OFFSET));
1339 
1340 	mask = eapol_key_info & EAPOL_MASK;
1341 	switch (mask) {
1342 	case EAPOL_M1_BIT_MASK:
1343 		subtype = QDF_PROTO_EAPOL_M1;
1344 		break;
1345 	case EAPOL_M2_BIT_MASK:
1346 		subtype = QDF_PROTO_EAPOL_M2;
1347 		break;
1348 	case EAPOL_M3_BIT_MASK:
1349 		subtype = QDF_PROTO_EAPOL_M3;
1350 		break;
1351 	case EAPOL_M4_BIT_MASK:
1352 		subtype = QDF_PROTO_EAPOL_M4;
1353 		break;
1354 	default:
1355 		break;
1356 	}
1357 
1358 	return subtype;
1359 }
1360 
1361 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1362 
1363 /**
1364  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1365  *            of ARP packet.
1366  * @data: Pointer to ARP packet data buffer
1367  *
1368  * This func. returns the subtype of ARP packet.
1369  *
1370  * Return: subtype of the ARP packet.
1371  */
1372 enum qdf_proto_subtype
1373 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1374 {
1375 	uint16_t subtype;
1376 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1377 
1378 	subtype = (uint16_t)(*(uint16_t *)
1379 			(data + ARP_SUB_TYPE_OFFSET));
1380 
1381 	switch (QDF_SWAP_U16(subtype)) {
1382 	case ARP_REQUEST:
1383 		proto_subtype = QDF_PROTO_ARP_REQ;
1384 		break;
1385 	case ARP_RESPONSE:
1386 		proto_subtype = QDF_PROTO_ARP_RES;
1387 		break;
1388 	default:
1389 		break;
1390 	}
1391 
1392 	return proto_subtype;
1393 }
1394 
1395 /**
1396  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1397  *            of IPV4 ICMP packet.
1398  * @data: Pointer to IPV4 ICMP packet data buffer
1399  *
1400  * This func. returns the subtype of ICMP packet.
1401  *
1402  * Return: subtype of the ICMP packet.
1403  */
1404 enum qdf_proto_subtype
1405 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1406 {
1407 	uint8_t subtype;
1408 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1409 
1410 	subtype = (uint8_t)(*(uint8_t *)
1411 			(data + ICMP_SUBTYPE_OFFSET));
1412 
1413 	switch (subtype) {
1414 	case ICMP_REQUEST:
1415 		proto_subtype = QDF_PROTO_ICMP_REQ;
1416 		break;
1417 	case ICMP_RESPONSE:
1418 		proto_subtype = QDF_PROTO_ICMP_RES;
1419 		break;
1420 	default:
1421 		break;
1422 	}
1423 
1424 	return proto_subtype;
1425 }
1426 
1427 /**
1428  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1429  *            of IPV6 ICMPV6 packet.
1430  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1431  *
1432  * This func. returns the subtype of ICMPV6 packet.
1433  *
1434  * Return: subtype of the ICMPV6 packet.
1435  */
1436 enum qdf_proto_subtype
1437 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1438 {
1439 	uint8_t subtype;
1440 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1441 
1442 	subtype = (uint8_t)(*(uint8_t *)
1443 			(data + ICMPV6_SUBTYPE_OFFSET));
1444 
1445 	switch (subtype) {
1446 	case ICMPV6_REQUEST:
1447 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1448 		break;
1449 	case ICMPV6_RESPONSE:
1450 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1451 		break;
1452 	case ICMPV6_RS:
1453 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1454 		break;
1455 	case ICMPV6_RA:
1456 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1457 		break;
1458 	case ICMPV6_NS:
1459 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1460 		break;
1461 	case ICMPV6_NA:
1462 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1463 		break;
1464 	default:
1465 		break;
1466 	}
1467 
1468 	return proto_subtype;
1469 }
1470 
1471 /**
1472  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1473  *            of IPV4 packet.
1474  * @data: Pointer to IPV4 packet data buffer
1475  *
1476  * This func. returns the proto type of IPV4 packet.
1477  *
1478  * Return: proto type of IPV4 packet.
1479  */
1480 uint8_t
1481 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1482 {
1483 	uint8_t proto_type;
1484 
1485 	proto_type = (uint8_t)(*(uint8_t *)(data +
1486 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1487 	return proto_type;
1488 }
1489 
1490 /**
1491  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1492  *            of IPV6 packet.
1493  * @data: Pointer to IPV6 packet data buffer
1494  *
1495  * This func. returns the proto type of IPV6 packet.
1496  *
1497  * Return: proto type of IPV6 packet.
1498  */
1499 uint8_t
1500 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1501 {
1502 	uint8_t proto_type;
1503 
1504 	proto_type = (uint8_t)(*(uint8_t *)(data +
1505 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1506 	return proto_type;
1507 }
1508 
1509 /**
1510  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1511  * @data: Pointer to network data
1512  *
1513  * This api is for Tx packets.
1514  *
1515  * Return: true if packet is ipv4 packet
1516  *	   false otherwise
1517  */
1518 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1519 {
1520 	uint16_t ether_type;
1521 
1522 	ether_type = (uint16_t)(*(uint16_t *)(data +
1523 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1524 
1525 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1526 		return true;
1527 	else
1528 		return false;
1529 }
1530 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1531 
1532 /**
1533  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1534  * @data: Pointer to network data buffer
1535  *
1536  * This api is for ipv4 packet.
1537  *
1538  * Return: true if packet is DHCP packet
1539  *	   false otherwise
1540  */
1541 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1542 {
1543 	uint16_t sport;
1544 	uint16_t dport;
1545 	uint8_t ipv4_offset;
1546 	uint8_t ipv4_hdr_len;
1547 	struct iphdr *iphdr;
1548 
1549 	if (__qdf_nbuf_get_ether_type(data) !=
1550 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1551 		return false;
1552 
1553 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1554 	iphdr = (struct iphdr *)(data + ipv4_offset);
1555 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1556 
1557 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1558 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1559 			      sizeof(uint16_t));
1560 
1561 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1562 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1563 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1564 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1565 		return true;
1566 	else
1567 		return false;
1568 }
1569 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1570 
1571 /**
1572  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1573  * @data: Pointer to network data buffer
1574  *
1575  * This api is for ipv4 packet.
1576  *
1577  * Return: true if packet is EAPOL packet
1578  *	   false otherwise.
1579  */
1580 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1581 {
1582 	uint16_t ether_type;
1583 
1584 	ether_type = __qdf_nbuf_get_ether_type(data);
1585 
1586 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1587 		return true;
1588 	else
1589 		return false;
1590 }
1591 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1592 
1593 /**
1594  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1595  * @skb: Pointer to network buffer
1596  *
1597  * This api is for ipv4 packet.
1598  *
1599  * Return: true if packet is WAPI packet
1600  *	   false otherwise.
1601  */
1602 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1603 {
1604 	uint16_t ether_type;
1605 
1606 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1607 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1608 
1609 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1610 		return true;
1611 	else
1612 		return false;
1613 }
1614 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1615 
1616 /**
1617  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1618  * @skb: Pointer to network buffer
1619  *
1620  * This api is for ipv4 packet.
1621  *
1622  * Return: true if packet is tdls packet
1623  *	   false otherwise.
1624  */
1625 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1626 {
1627 	uint16_t ether_type;
1628 
1629 	ether_type = *(uint16_t *)(skb->data +
1630 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1631 
1632 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1633 		return true;
1634 	else
1635 		return false;
1636 }
1637 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1638 
1639 /**
1640  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1641  * @data: Pointer to network data buffer
1642  *
1643  * This api is for ipv4 packet.
1644  *
1645  * Return: true if packet is ARP packet
1646  *	   false otherwise.
1647  */
1648 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1649 {
1650 	uint16_t ether_type;
1651 
1652 	ether_type = __qdf_nbuf_get_ether_type(data);
1653 
1654 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1655 		return true;
1656 	else
1657 		return false;
1658 }
1659 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1660 
1661 /**
1662  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1663  * @data: Pointer to network data buffer
1664  *
1665  * This api is for ipv4 packet.
1666  *
1667  * Return: true if packet is ARP request
1668  *	   false otherwise.
1669  */
1670 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1671 {
1672 	uint16_t op_code;
1673 
1674 	op_code = (uint16_t)(*(uint16_t *)(data +
1675 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1676 
1677 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1678 		return true;
1679 	return false;
1680 }
1681 
1682 /**
1683  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1684  * @data: Pointer to network data buffer
1685  *
1686  * This api is for ipv4 packet.
1687  *
1688  * Return: true if packet is ARP response
1689  *	   false otherwise.
1690  */
1691 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1692 {
1693 	uint16_t op_code;
1694 
1695 	op_code = (uint16_t)(*(uint16_t *)(data +
1696 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1697 
1698 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1699 		return true;
1700 	return false;
1701 }
1702 
1703 /**
1704  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1705  * @data: Pointer to network data buffer
1706  *
1707  * This api is for ipv4 packet.
1708  *
1709  * Return: ARP packet source IP value.
1710  */
1711 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1712 {
1713 	uint32_t src_ip;
1714 
1715 	src_ip = (uint32_t)(*(uint32_t *)(data +
1716 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1717 
1718 	return src_ip;
1719 }
1720 
1721 /**
1722  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1723  * @data: Pointer to network data buffer
1724  *
1725  * This api is for ipv4 packet.
1726  *
1727  * Return: ARP packet target IP value.
1728  */
1729 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1730 {
1731 	uint32_t tgt_ip;
1732 
1733 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1734 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1735 
1736 	return tgt_ip;
1737 }
1738 
1739 /**
1740  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1741  * @data: Pointer to network data buffer
1742  * @len: length to copy
1743  *
1744  * This api is for dns domain name
1745  *
1746  * Return: dns domain name.
1747  */
1748 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1749 {
1750 	uint8_t *domain_name;
1751 
1752 	domain_name = (uint8_t *)
1753 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1754 	return domain_name;
1755 }
1756 
1757 
1758 /**
1759  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1760  * @data: Pointer to network data buffer
1761  *
1762  * This api is for dns query packet.
1763  *
1764  * Return: true if packet is dns query packet.
1765  *	   false otherwise.
1766  */
1767 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1768 {
1769 	uint16_t op_code;
1770 	uint16_t tgt_port;
1771 
1772 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1773 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1774 	/* Standard DNS query always happen on Dest Port 53. */
1775 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1776 		op_code = (uint16_t)(*(uint16_t *)(data +
1777 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1778 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1779 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1780 			return true;
1781 	}
1782 	return false;
1783 }
1784 
1785 /**
1786  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1787  * @data: Pointer to network data buffer
1788  *
1789  * This api is for dns query response.
1790  *
1791  * Return: true if packet is dns response packet.
1792  *	   false otherwise.
1793  */
1794 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1795 {
1796 	uint16_t op_code;
1797 	uint16_t src_port;
1798 
1799 	src_port = (uint16_t)(*(uint16_t *)(data +
1800 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1801 	/* Standard DNS response always comes on Src Port 53. */
1802 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1803 		op_code = (uint16_t)(*(uint16_t *)(data +
1804 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1805 
1806 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1807 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1808 			return true;
1809 	}
1810 	return false;
1811 }
1812 
1813 /**
1814  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1815  * @data: Pointer to network data buffer
1816  *
1817  * This api is for tcp syn packet.
1818  *
1819  * Return: true if packet is tcp syn packet.
1820  *	   false otherwise.
1821  */
1822 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1823 {
1824 	uint8_t op_code;
1825 
1826 	op_code = (uint8_t)(*(uint8_t *)(data +
1827 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1828 
1829 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1830 		return true;
1831 	return false;
1832 }
1833 
1834 /**
1835  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1836  * @data: Pointer to network data buffer
1837  *
1838  * This api is for tcp syn ack packet.
1839  *
1840  * Return: true if packet is tcp syn ack packet.
1841  *	   false otherwise.
1842  */
1843 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1844 {
1845 	uint8_t op_code;
1846 
1847 	op_code = (uint8_t)(*(uint8_t *)(data +
1848 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1849 
1850 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1851 		return true;
1852 	return false;
1853 }
1854 
1855 /**
1856  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1857  * @data: Pointer to network data buffer
1858  *
1859  * This api is for tcp ack packet.
1860  *
1861  * Return: true if packet is tcp ack packet.
1862  *	   false otherwise.
1863  */
1864 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1865 {
1866 	uint8_t op_code;
1867 
1868 	op_code = (uint8_t)(*(uint8_t *)(data +
1869 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1870 
1871 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1872 		return true;
1873 	return false;
1874 }
1875 
1876 /**
1877  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1878  * @data: Pointer to network data buffer
1879  *
1880  * This api is for tcp packet.
1881  *
1882  * Return: tcp source port value.
1883  */
1884 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
1885 {
1886 	uint16_t src_port;
1887 
1888 	src_port = (uint16_t)(*(uint16_t *)(data +
1889 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
1890 
1891 	return src_port;
1892 }
1893 
1894 /**
1895  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1896  * @data: Pointer to network data buffer
1897  *
1898  * This api is for tcp packet.
1899  *
1900  * Return: tcp destination port value.
1901  */
1902 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
1903 {
1904 	uint16_t tgt_port;
1905 
1906 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1907 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
1908 
1909 	return tgt_port;
1910 }
1911 
1912 /**
1913  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1914  * @data: Pointer to network data buffer
1915  *
1916  * This api is for ipv4 req packet.
1917  *
1918  * Return: true if packet is icmpv4 request
1919  *	   false otherwise.
1920  */
1921 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
1922 {
1923 	uint8_t op_code;
1924 
1925 	op_code = (uint8_t)(*(uint8_t *)(data +
1926 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1927 
1928 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
1929 		return true;
1930 	return false;
1931 }
1932 
1933 /**
1934  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1935  * @data: Pointer to network data buffer
1936  *
1937  * This api is for ipv4 res packet.
1938  *
1939  * Return: true if packet is icmpv4 response
1940  *	   false otherwise.
1941  */
1942 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
1943 {
1944 	uint8_t op_code;
1945 
1946 	op_code = (uint8_t)(*(uint8_t *)(data +
1947 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
1948 
1949 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
1950 		return true;
1951 	return false;
1952 }
1953 
1954 /**
1955  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
1956  * @data: Pointer to network data buffer
1957  *
1958  * This api is for ipv4 packet.
1959  *
1960  * Return: icmpv4 packet source IP value.
1961  */
1962 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
1963 {
1964 	uint32_t src_ip;
1965 
1966 	src_ip = (uint32_t)(*(uint32_t *)(data +
1967 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
1968 
1969 	return src_ip;
1970 }
1971 
1972 /**
1973  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
1974  * @data: Pointer to network data buffer
1975  *
1976  * This api is for ipv4 packet.
1977  *
1978  * Return: icmpv4 packet target IP value.
1979  */
1980 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
1981 {
1982 	uint32_t tgt_ip;
1983 
1984 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1985 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
1986 
1987 	return tgt_ip;
1988 }
1989 
1990 
1991 /**
1992  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
1993  * @data: Pointer to IPV6 packet data buffer
1994  *
1995  * This func. checks whether it is a IPV6 packet or not.
1996  *
1997  * Return: TRUE if it is a IPV6 packet
1998  *         FALSE if not
1999  */
2000 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2001 {
2002 	uint16_t ether_type;
2003 
2004 	ether_type = (uint16_t)(*(uint16_t *)(data +
2005 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2006 
2007 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2008 		return true;
2009 	else
2010 		return false;
2011 }
2012 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2013 
2014 /**
2015  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2016  * @data: Pointer to network data buffer
2017  *
2018  * This api is for ipv6 packet.
2019  *
2020  * Return: true if packet is DHCP packet
2021  *	   false otherwise
2022  */
2023 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2024 {
2025 	uint16_t sport;
2026 	uint16_t dport;
2027 	uint8_t ipv6_offset;
2028 
2029 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2030 	sport = *(uint16_t *)(data + ipv6_offset +
2031 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2032 	dport = *(uint16_t *)(data + ipv6_offset +
2033 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2034 			      sizeof(uint16_t));
2035 
2036 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2037 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2038 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2039 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2040 		return true;
2041 	else
2042 		return false;
2043 }
2044 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2045 
2046 /**
2047  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2048  * @data: Pointer to network data buffer
2049  *
2050  * This api is for ipv6 packet.
2051  *
2052  * Return: true if packet is MDNS packet
2053  *	   false otherwise
2054  */
2055 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2056 {
2057 	uint16_t sport;
2058 	uint16_t dport;
2059 
2060 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2061 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2062 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2063 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2064 					sizeof(uint16_t));
2065 
2066 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2067 	    dport == sport)
2068 		return true;
2069 	else
2070 		return false;
2071 }
2072 
2073 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2074 
2075 /**
2076  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2077  * @data: Pointer to IPV4 packet data buffer
2078  *
2079  * This func. checks whether it is a IPV4 multicast packet or not.
2080  *
2081  * Return: TRUE if it is a IPV4 multicast packet
2082  *         FALSE if not
2083  */
2084 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2085 {
2086 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2087 		uint32_t *dst_addr =
2088 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2089 
2090 		/*
2091 		 * Check first word of the IPV4 address and if it is
2092 		 * equal to 0xE then it represents multicast IP.
2093 		 */
2094 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2095 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2096 			return true;
2097 		else
2098 			return false;
2099 	} else
2100 		return false;
2101 }
2102 
2103 /**
2104  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2105  * @data: Pointer to IPV6 packet data buffer
2106  *
2107  * This func. checks whether it is a IPV6 multicast packet or not.
2108  *
2109  * Return: TRUE if it is a IPV6 multicast packet
2110  *         FALSE if not
2111  */
2112 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2113 {
2114 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2115 		uint16_t *dst_addr;
2116 
2117 		dst_addr = (uint16_t *)
2118 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2119 
2120 		/*
2121 		 * Check first byte of the IP address and if it
2122 		 * 0xFF00 then it is a IPV6 mcast packet.
2123 		 */
2124 		if (*dst_addr ==
2125 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2126 			return true;
2127 		else
2128 			return false;
2129 	} else
2130 		return false;
2131 }
2132 
2133 /**
2134  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2135  * @data: Pointer to IPV4 ICMP packet data buffer
2136  *
2137  * This func. checks whether it is a ICMP packet or not.
2138  *
2139  * Return: TRUE if it is a ICMP packet
2140  *         FALSE if not
2141  */
2142 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2143 {
2144 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2145 		uint8_t pkt_type;
2146 
2147 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2148 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2149 
2150 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2151 			return true;
2152 		else
2153 			return false;
2154 	} else
2155 		return false;
2156 }
2157 
2158 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2159 
2160 /**
2161  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2162  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2163  *
2164  * This func. checks whether it is a ICMPV6 packet or not.
2165  *
2166  * Return: TRUE if it is a ICMPV6 packet
2167  *         FALSE if not
2168  */
2169 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2170 {
2171 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2172 		uint8_t pkt_type;
2173 
2174 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2175 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2176 
2177 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2178 			return true;
2179 		else
2180 			return false;
2181 	} else
2182 		return false;
2183 }
2184 
2185 /**
2186  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2187  * @data: Pointer to IPV4 UDP packet data buffer
2188  *
2189  * This func. checks whether it is a IPV4 UDP packet or not.
2190  *
2191  * Return: TRUE if it is a IPV4 UDP packet
2192  *         FALSE if not
2193  */
2194 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2195 {
2196 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2197 		uint8_t pkt_type;
2198 
2199 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2200 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2201 
2202 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2203 			return true;
2204 		else
2205 			return false;
2206 	} else
2207 		return false;
2208 }
2209 
2210 /**
2211  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2212  * @data: Pointer to IPV4 TCP packet data buffer
2213  *
2214  * This func. checks whether it is a IPV4 TCP packet or not.
2215  *
2216  * Return: TRUE if it is a IPV4 TCP packet
2217  *         FALSE if not
2218  */
2219 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2220 {
2221 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2222 		uint8_t pkt_type;
2223 
2224 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2225 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2226 
2227 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2228 			return true;
2229 		else
2230 			return false;
2231 	} else
2232 		return false;
2233 }
2234 
2235 /**
2236  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2237  * @data: Pointer to IPV6 UDP packet data buffer
2238  *
2239  * This func. checks whether it is a IPV6 UDP packet or not.
2240  *
2241  * Return: TRUE if it is a IPV6 UDP packet
2242  *         FALSE if not
2243  */
2244 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2245 {
2246 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2247 		uint8_t pkt_type;
2248 
2249 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2250 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2251 
2252 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2253 			return true;
2254 		else
2255 			return false;
2256 	} else
2257 		return false;
2258 }
2259 
2260 /**
2261  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2262  * @data: Pointer to IPV6 TCP packet data buffer
2263  *
2264  * This func. checks whether it is a IPV6 TCP packet or not.
2265  *
2266  * Return: TRUE if it is a IPV6 TCP packet
2267  *         FALSE if not
2268  */
2269 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2270 {
2271 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2272 		uint8_t pkt_type;
2273 
2274 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2275 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2276 
2277 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2278 			return true;
2279 		else
2280 			return false;
2281 	} else
2282 		return false;
2283 }
2284 
2285 /**
2286  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2287  * @nbuf - sk buff
2288  *
2289  * Return: true if packet is broadcast
2290  *	   false otherwise
2291  */
2292 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2293 {
2294 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2295 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2296 }
2297 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2298 
2299 #ifdef NBUF_MEMORY_DEBUG
2300 
2301 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2302 
2303 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2304 static struct kmem_cache *nbuf_tracking_cache;
2305 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2306 static spinlock_t qdf_net_buf_track_free_list_lock;
2307 static uint32_t qdf_net_buf_track_free_list_count;
2308 static uint32_t qdf_net_buf_track_used_list_count;
2309 static uint32_t qdf_net_buf_track_max_used;
2310 static uint32_t qdf_net_buf_track_max_free;
2311 static uint32_t qdf_net_buf_track_max_allocated;
2312 static uint32_t qdf_net_buf_track_fail_count;
2313 
2314 /**
2315  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2316  *
2317  * tracks the max number of network buffers that the wlan driver was tracking
2318  * at any one time.
2319  *
2320  * Return: none
2321  */
2322 static inline void update_max_used(void)
2323 {
2324 	int sum;
2325 
2326 	if (qdf_net_buf_track_max_used <
2327 	    qdf_net_buf_track_used_list_count)
2328 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2329 	sum = qdf_net_buf_track_free_list_count +
2330 		qdf_net_buf_track_used_list_count;
2331 	if (qdf_net_buf_track_max_allocated < sum)
2332 		qdf_net_buf_track_max_allocated = sum;
2333 }
2334 
2335 /**
2336  * update_max_free() - update qdf_net_buf_track_free_list_count
2337  *
2338  * tracks the max number tracking buffers kept in the freelist.
2339  *
2340  * Return: none
2341  */
2342 static inline void update_max_free(void)
2343 {
2344 	if (qdf_net_buf_track_max_free <
2345 	    qdf_net_buf_track_free_list_count)
2346 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2347 }
2348 
2349 /**
2350  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2351  *
2352  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2353  * This function also ads fexibility to adjust the allocation and freelist
2354  * scheems.
2355  *
2356  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2357  */
2358 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2359 {
2360 	int flags = GFP_KERNEL;
2361 	unsigned long irq_flag;
2362 	QDF_NBUF_TRACK *new_node = NULL;
2363 
2364 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2365 	qdf_net_buf_track_used_list_count++;
2366 	if (qdf_net_buf_track_free_list) {
2367 		new_node = qdf_net_buf_track_free_list;
2368 		qdf_net_buf_track_free_list =
2369 			qdf_net_buf_track_free_list->p_next;
2370 		qdf_net_buf_track_free_list_count--;
2371 	}
2372 	update_max_used();
2373 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2374 
2375 	if (new_node)
2376 		return new_node;
2377 
2378 	if (in_interrupt() || irqs_disabled() || in_atomic())
2379 		flags = GFP_ATOMIC;
2380 
2381 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2382 }
2383 
2384 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2385 #define FREEQ_POOLSIZE 2048
2386 
2387 /**
2388  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2389  *
2390  * Matches calls to qdf_nbuf_track_alloc.
2391  * Either frees the tracking cookie to kernel or an internal
2392  * freelist based on the size of the freelist.
2393  *
2394  * Return: none
2395  */
2396 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2397 {
2398 	unsigned long irq_flag;
2399 
2400 	if (!node)
2401 		return;
2402 
2403 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2404 	 * only shrink the freelist if it is bigger than twice the number of
2405 	 * nbufs in use. If the driver is stalling in a consistent bursty
2406 	 * fasion, this will keep 3/4 of thee allocations from the free list
2407 	 * while also allowing the system to recover memory as less frantic
2408 	 * traffic occurs.
2409 	 */
2410 
2411 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2412 
2413 	qdf_net_buf_track_used_list_count--;
2414 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2415 	   (qdf_net_buf_track_free_list_count >
2416 	    qdf_net_buf_track_used_list_count << 1)) {
2417 		kmem_cache_free(nbuf_tracking_cache, node);
2418 	} else {
2419 		node->p_next = qdf_net_buf_track_free_list;
2420 		qdf_net_buf_track_free_list = node;
2421 		qdf_net_buf_track_free_list_count++;
2422 	}
2423 	update_max_free();
2424 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2425 }
2426 
2427 /**
2428  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2429  *
2430  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2431  * the freelist first makes it performant for the first iperf udp burst
2432  * as well as steady state.
2433  *
2434  * Return: None
2435  */
2436 static void qdf_nbuf_track_prefill(void)
2437 {
2438 	int i;
2439 	QDF_NBUF_TRACK *node, *head;
2440 
2441 	/* prepopulate the freelist */
2442 	head = NULL;
2443 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2444 		node = qdf_nbuf_track_alloc();
2445 		if (!node)
2446 			continue;
2447 		node->p_next = head;
2448 		head = node;
2449 	}
2450 	while (head) {
2451 		node = head->p_next;
2452 		qdf_nbuf_track_free(head);
2453 		head = node;
2454 	}
2455 
2456 	/* prefilled buffers should not count as used */
2457 	qdf_net_buf_track_max_used = 0;
2458 }
2459 
2460 /**
2461  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2462  *
2463  * This initializes the memory manager for the nbuf tracking cookies.  Because
2464  * these cookies are all the same size and only used in this feature, we can
2465  * use a kmem_cache to provide tracking as well as to speed up allocations.
2466  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2467  * features) a freelist is prepopulated here.
2468  *
2469  * Return: None
2470  */
2471 static void qdf_nbuf_track_memory_manager_create(void)
2472 {
2473 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2474 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2475 						sizeof(QDF_NBUF_TRACK),
2476 						0, 0, NULL);
2477 
2478 	qdf_nbuf_track_prefill();
2479 }
2480 
2481 /**
2482  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2483  *
2484  * Empty the freelist and print out usage statistics when it is no longer
2485  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2486  * any nbuf tracking cookies were leaked.
2487  *
2488  * Return: None
2489  */
2490 static void qdf_nbuf_track_memory_manager_destroy(void)
2491 {
2492 	QDF_NBUF_TRACK *node, *tmp;
2493 	unsigned long irq_flag;
2494 
2495 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2496 	node = qdf_net_buf_track_free_list;
2497 
2498 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2499 		qdf_print("%s: unexpectedly large max_used count %d",
2500 			  __func__, qdf_net_buf_track_max_used);
2501 
2502 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2503 		qdf_print("%s: %d unused trackers were allocated",
2504 			  __func__,
2505 			  qdf_net_buf_track_max_allocated -
2506 			  qdf_net_buf_track_max_used);
2507 
2508 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2509 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2510 		qdf_print("%s: check freelist shrinking functionality",
2511 			  __func__);
2512 
2513 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2514 		  "%s: %d residual freelist size",
2515 		  __func__, qdf_net_buf_track_free_list_count);
2516 
2517 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2518 		  "%s: %d max freelist size observed",
2519 		  __func__, qdf_net_buf_track_max_free);
2520 
2521 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2522 		  "%s: %d max buffers used observed",
2523 		  __func__, qdf_net_buf_track_max_used);
2524 
2525 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2526 		  "%s: %d max buffers allocated observed",
2527 		  __func__, qdf_net_buf_track_max_allocated);
2528 
2529 	while (node) {
2530 		tmp = node;
2531 		node = node->p_next;
2532 		kmem_cache_free(nbuf_tracking_cache, tmp);
2533 		qdf_net_buf_track_free_list_count--;
2534 	}
2535 
2536 	if (qdf_net_buf_track_free_list_count != 0)
2537 		qdf_info("%d unfreed tracking memory lost in freelist",
2538 			 qdf_net_buf_track_free_list_count);
2539 
2540 	if (qdf_net_buf_track_used_list_count != 0)
2541 		qdf_info("%d unfreed tracking memory still in use",
2542 			 qdf_net_buf_track_used_list_count);
2543 
2544 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2545 	kmem_cache_destroy(nbuf_tracking_cache);
2546 	qdf_net_buf_track_free_list = NULL;
2547 }
2548 
2549 /**
2550  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2551  *
2552  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2553  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2554  * WLAN driver module whose allocated SKB is freed by network stack are
2555  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2556  * reported as memory leak.
2557  *
2558  * Return: none
2559  */
2560 void qdf_net_buf_debug_init(void)
2561 {
2562 	uint32_t i;
2563 
2564 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2565 
2566 	if (is_initial_mem_debug_disabled)
2567 		return;
2568 
2569 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2570 
2571 	qdf_nbuf_map_tracking_init();
2572 	qdf_nbuf_track_memory_manager_create();
2573 
2574 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2575 		gp_qdf_net_buf_track_tbl[i] = NULL;
2576 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2577 	}
2578 }
2579 qdf_export_symbol(qdf_net_buf_debug_init);
2580 
2581 /**
2582  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2583  *
2584  * Exit network buffer tracking debug functionality and log SKB memory leaks
2585  * As part of exiting the functionality, free the leaked memory and
2586  * cleanup the tracking buffers.
2587  *
2588  * Return: none
2589  */
2590 void qdf_net_buf_debug_exit(void)
2591 {
2592 	uint32_t i;
2593 	uint32_t count = 0;
2594 	unsigned long irq_flag;
2595 	QDF_NBUF_TRACK *p_node;
2596 	QDF_NBUF_TRACK *p_prev;
2597 
2598 	if (is_initial_mem_debug_disabled)
2599 		return;
2600 
2601 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2602 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2603 		p_node = gp_qdf_net_buf_track_tbl[i];
2604 		while (p_node) {
2605 			p_prev = p_node;
2606 			p_node = p_node->p_next;
2607 			count++;
2608 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2609 				 p_prev->func_name, p_prev->line_num,
2610 				 p_prev->size, p_prev->net_buf);
2611 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2612 				 p_prev->map_func_name,
2613 				 p_prev->map_line_num,
2614 				 p_prev->unmap_func_name,
2615 				 p_prev->unmap_line_num,
2616 				 p_prev->is_nbuf_mapped);
2617 			qdf_nbuf_track_free(p_prev);
2618 		}
2619 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2620 	}
2621 
2622 	qdf_nbuf_track_memory_manager_destroy();
2623 	qdf_nbuf_map_tracking_deinit();
2624 
2625 #ifdef CONFIG_HALT_KMEMLEAK
2626 	if (count) {
2627 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2628 		QDF_BUG(0);
2629 	}
2630 #endif
2631 }
2632 qdf_export_symbol(qdf_net_buf_debug_exit);
2633 
2634 /**
2635  * qdf_net_buf_debug_hash() - hash network buffer pointer
2636  *
2637  * Return: hash value
2638  */
2639 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2640 {
2641 	uint32_t i;
2642 
2643 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2644 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2645 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2646 
2647 	return i;
2648 }
2649 
2650 /**
2651  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2652  *
2653  * Return: If skb is found in hash table then return pointer to network buffer
2654  *	else return %NULL
2655  */
2656 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2657 {
2658 	uint32_t i;
2659 	QDF_NBUF_TRACK *p_node;
2660 
2661 	i = qdf_net_buf_debug_hash(net_buf);
2662 	p_node = gp_qdf_net_buf_track_tbl[i];
2663 
2664 	while (p_node) {
2665 		if (p_node->net_buf == net_buf)
2666 			return p_node;
2667 		p_node = p_node->p_next;
2668 	}
2669 
2670 	return NULL;
2671 }
2672 
2673 /**
2674  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2675  *
2676  * Return: none
2677  */
2678 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2679 				const char *func_name, uint32_t line_num)
2680 {
2681 	uint32_t i;
2682 	unsigned long irq_flag;
2683 	QDF_NBUF_TRACK *p_node;
2684 	QDF_NBUF_TRACK *new_node;
2685 
2686 	if (is_initial_mem_debug_disabled)
2687 		return;
2688 
2689 	new_node = qdf_nbuf_track_alloc();
2690 
2691 	i = qdf_net_buf_debug_hash(net_buf);
2692 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2693 
2694 	p_node = qdf_net_buf_debug_look_up(net_buf);
2695 
2696 	if (p_node) {
2697 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2698 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2699 			  net_buf, func_name, line_num);
2700 		qdf_nbuf_track_free(new_node);
2701 	} else {
2702 		p_node = new_node;
2703 		if (p_node) {
2704 			p_node->net_buf = net_buf;
2705 			qdf_str_lcopy(p_node->func_name, func_name,
2706 				      QDF_MEM_FUNC_NAME_SIZE);
2707 			p_node->line_num = line_num;
2708 			p_node->is_nbuf_mapped = false;
2709 			p_node->map_line_num = 0;
2710 			p_node->unmap_line_num = 0;
2711 			p_node->map_func_name[0] = '\0';
2712 			p_node->unmap_func_name[0] = '\0';
2713 			p_node->size = size;
2714 			p_node->time = qdf_get_log_timestamp();
2715 			qdf_mem_skb_inc(size);
2716 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2717 			gp_qdf_net_buf_track_tbl[i] = p_node;
2718 		} else {
2719 			qdf_net_buf_track_fail_count++;
2720 			qdf_print(
2721 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2722 				  func_name, line_num, size);
2723 		}
2724 	}
2725 
2726 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2727 }
2728 qdf_export_symbol(qdf_net_buf_debug_add_node);
2729 
2730 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2731 				   uint32_t line_num)
2732 {
2733 	uint32_t i;
2734 	unsigned long irq_flag;
2735 	QDF_NBUF_TRACK *p_node;
2736 
2737 	if (is_initial_mem_debug_disabled)
2738 		return;
2739 
2740 	i = qdf_net_buf_debug_hash(net_buf);
2741 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2742 
2743 	p_node = qdf_net_buf_debug_look_up(net_buf);
2744 
2745 	if (p_node) {
2746 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2747 			      QDF_MEM_FUNC_NAME_SIZE);
2748 		p_node->line_num = line_num;
2749 	}
2750 
2751 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2752 }
2753 
2754 qdf_export_symbol(qdf_net_buf_debug_update_node);
2755 
2756 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2757 				       const char *func_name,
2758 				       uint32_t line_num)
2759 {
2760 	uint32_t i;
2761 	unsigned long irq_flag;
2762 	QDF_NBUF_TRACK *p_node;
2763 
2764 	if (is_initial_mem_debug_disabled)
2765 		return;
2766 
2767 	i = qdf_net_buf_debug_hash(net_buf);
2768 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2769 
2770 	p_node = qdf_net_buf_debug_look_up(net_buf);
2771 
2772 	if (p_node) {
2773 		qdf_str_lcopy(p_node->map_func_name, func_name,
2774 			      QDF_MEM_FUNC_NAME_SIZE);
2775 		p_node->map_line_num = line_num;
2776 		p_node->is_nbuf_mapped = true;
2777 	}
2778 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2779 }
2780 
2781 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
2782 					 const char *func_name,
2783 					 uint32_t line_num)
2784 {
2785 	uint32_t i;
2786 	unsigned long irq_flag;
2787 	QDF_NBUF_TRACK *p_node;
2788 
2789 	if (is_initial_mem_debug_disabled)
2790 		return;
2791 
2792 	i = qdf_net_buf_debug_hash(net_buf);
2793 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2794 
2795 	p_node = qdf_net_buf_debug_look_up(net_buf);
2796 
2797 	if (p_node) {
2798 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
2799 			      QDF_MEM_FUNC_NAME_SIZE);
2800 		p_node->unmap_line_num = line_num;
2801 		p_node->is_nbuf_mapped = false;
2802 	}
2803 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2804 }
2805 
2806 /**
2807  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
2808  *
2809  * Return: none
2810  */
2811 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
2812 {
2813 	uint32_t i;
2814 	QDF_NBUF_TRACK *p_head;
2815 	QDF_NBUF_TRACK *p_node = NULL;
2816 	unsigned long irq_flag;
2817 	QDF_NBUF_TRACK *p_prev;
2818 
2819 	if (is_initial_mem_debug_disabled)
2820 		return;
2821 
2822 	i = qdf_net_buf_debug_hash(net_buf);
2823 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2824 
2825 	p_head = gp_qdf_net_buf_track_tbl[i];
2826 
2827 	/* Unallocated SKB */
2828 	if (!p_head)
2829 		goto done;
2830 
2831 	p_node = p_head;
2832 	/* Found at head of the table */
2833 	if (p_head->net_buf == net_buf) {
2834 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
2835 		goto done;
2836 	}
2837 
2838 	/* Search in collision list */
2839 	while (p_node) {
2840 		p_prev = p_node;
2841 		p_node = p_node->p_next;
2842 		if ((p_node) && (p_node->net_buf == net_buf)) {
2843 			p_prev->p_next = p_node->p_next;
2844 			break;
2845 		}
2846 	}
2847 
2848 done:
2849 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2850 
2851 	if (p_node) {
2852 		qdf_mem_skb_dec(p_node->size);
2853 		qdf_nbuf_track_free(p_node);
2854 	} else {
2855 		if (qdf_net_buf_track_fail_count) {
2856 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
2857 				  net_buf, qdf_net_buf_track_fail_count);
2858 		} else
2859 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
2860 					   net_buf);
2861 	}
2862 }
2863 qdf_export_symbol(qdf_net_buf_debug_delete_node);
2864 
2865 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
2866 				   const char *func_name, uint32_t line_num)
2867 {
2868 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2869 
2870 	if (is_initial_mem_debug_disabled)
2871 		return;
2872 
2873 	while (ext_list) {
2874 		/*
2875 		 * Take care to add if it is Jumbo packet connected using
2876 		 * frag_list
2877 		 */
2878 		qdf_nbuf_t next;
2879 
2880 		next = qdf_nbuf_queue_next(ext_list);
2881 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
2882 		ext_list = next;
2883 	}
2884 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
2885 }
2886 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
2887 
2888 /**
2889  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
2890  * @net_buf: Network buf holding head segment (single)
2891  *
2892  * WLAN driver module whose allocated SKB is freed by network stack are
2893  * suppose to call this API before returning SKB to network stack such
2894  * that the SKB is not reported as memory leak.
2895  *
2896  * Return: none
2897  */
2898 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
2899 {
2900 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
2901 
2902 	if (is_initial_mem_debug_disabled)
2903 		return;
2904 
2905 	while (ext_list) {
2906 		/*
2907 		 * Take care to free if it is Jumbo packet connected using
2908 		 * frag_list
2909 		 */
2910 		qdf_nbuf_t next;
2911 
2912 		next = qdf_nbuf_queue_next(ext_list);
2913 
2914 		if (qdf_nbuf_get_users(ext_list) > 1) {
2915 			ext_list = next;
2916 			continue;
2917 		}
2918 
2919 		qdf_net_buf_debug_delete_node(ext_list);
2920 		ext_list = next;
2921 	}
2922 
2923 	if (qdf_nbuf_get_users(net_buf) > 1)
2924 		return;
2925 
2926 	qdf_net_buf_debug_delete_node(net_buf);
2927 }
2928 qdf_export_symbol(qdf_net_buf_debug_release_skb);
2929 
2930 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
2931 				int reserve, int align, int prio,
2932 				const char *func, uint32_t line)
2933 {
2934 	qdf_nbuf_t nbuf;
2935 
2936 	if (is_initial_mem_debug_disabled)
2937 		return __qdf_nbuf_alloc(osdev, size,
2938 					reserve, align,
2939 					prio, func, line);
2940 
2941 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
2942 
2943 	/* Store SKB in internal QDF tracking table */
2944 	if (qdf_likely(nbuf)) {
2945 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2946 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2947 	} else {
2948 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2949 	}
2950 
2951 	return nbuf;
2952 }
2953 qdf_export_symbol(qdf_nbuf_alloc_debug);
2954 
2955 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
2956 					    const char *func, uint32_t line)
2957 {
2958 	qdf_nbuf_t nbuf;
2959 
2960 	if (is_initial_mem_debug_disabled)
2961 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
2962 						    line);
2963 
2964 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
2965 
2966 	/* Store SKB in internal QDF tracking table */
2967 	if (qdf_likely(nbuf)) {
2968 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
2969 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
2970 	} else {
2971 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
2972 	}
2973 
2974 	return nbuf;
2975 }
2976 
2977 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
2978 
2979 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
2980 {
2981 	qdf_nbuf_t ext_list;
2982 	qdf_frag_t p_frag;
2983 	uint32_t num_nr_frags;
2984 	uint32_t idx = 0;
2985 
2986 	if (qdf_unlikely(!nbuf))
2987 		return;
2988 
2989 	if (is_initial_mem_debug_disabled)
2990 		goto free_buf;
2991 
2992 	if (qdf_nbuf_get_users(nbuf) > 1)
2993 		goto free_buf;
2994 
2995 	/* Remove SKB from internal QDF tracking table */
2996 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
2997 	qdf_net_buf_debug_delete_node(nbuf);
2998 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
2999 
3000 	/* Take care to delete the debug entries for frags */
3001 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3002 
3003 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3004 
3005 	while (idx < num_nr_frags) {
3006 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3007 		if (qdf_likely(p_frag))
3008 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3009 		idx++;
3010 	}
3011 
3012 	/**
3013 	 * Take care to update the debug entries for frag_list and also
3014 	 * for the frags attached to frag_list
3015 	 */
3016 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3017 	while (ext_list) {
3018 		if (qdf_nbuf_get_users(ext_list) == 1) {
3019 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3020 			idx = 0;
3021 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3022 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3023 			while (idx < num_nr_frags) {
3024 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3025 				if (qdf_likely(p_frag))
3026 					qdf_frag_debug_refcount_dec(p_frag,
3027 								    func, line);
3028 				idx++;
3029 			}
3030 			qdf_net_buf_debug_delete_node(ext_list);
3031 		}
3032 
3033 		ext_list = qdf_nbuf_queue_next(ext_list);
3034 	}
3035 
3036 free_buf:
3037 	__qdf_nbuf_free(nbuf);
3038 }
3039 qdf_export_symbol(qdf_nbuf_free_debug);
3040 
3041 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3042 {
3043 	uint32_t num_nr_frags;
3044 	uint32_t idx = 0;
3045 	qdf_nbuf_t ext_list;
3046 	qdf_frag_t p_frag;
3047 
3048 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3049 
3050 	if (is_initial_mem_debug_disabled)
3051 		return cloned_buf;
3052 
3053 	if (qdf_unlikely(!cloned_buf))
3054 		return NULL;
3055 
3056 	/* Take care to update the debug entries for frags */
3057 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3058 
3059 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3060 
3061 	while (idx < num_nr_frags) {
3062 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3063 		if (qdf_likely(p_frag))
3064 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3065 		idx++;
3066 	}
3067 
3068 	/* Take care to update debug entries for frags attached to frag_list */
3069 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3070 	while (ext_list) {
3071 		idx = 0;
3072 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3073 
3074 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3075 
3076 		while (idx < num_nr_frags) {
3077 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3078 			if (qdf_likely(p_frag))
3079 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3080 			idx++;
3081 		}
3082 		ext_list = qdf_nbuf_queue_next(ext_list);
3083 	}
3084 
3085 	/* Store SKB in internal QDF tracking table */
3086 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3087 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3088 
3089 	return cloned_buf;
3090 }
3091 qdf_export_symbol(qdf_nbuf_clone_debug);
3092 
3093 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3094 {
3095 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3096 
3097 	if (is_initial_mem_debug_disabled)
3098 		return copied_buf;
3099 
3100 	if (qdf_unlikely(!copied_buf))
3101 		return NULL;
3102 
3103 	/* Store SKB in internal QDF tracking table */
3104 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3105 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3106 
3107 	return copied_buf;
3108 }
3109 qdf_export_symbol(qdf_nbuf_copy_debug);
3110 
3111 qdf_nbuf_t
3112 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3113 			   const char *func, uint32_t line)
3114 {
3115 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3116 
3117 	if (qdf_unlikely(!copied_buf))
3118 		return NULL;
3119 
3120 	if (is_initial_mem_debug_disabled)
3121 		return copied_buf;
3122 
3123 	/* Store SKB in internal QDF tracking table */
3124 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3125 	qdf_nbuf_history_add(copied_buf, func, line,
3126 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3127 
3128 	return copied_buf;
3129 }
3130 
3131 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3132 
3133 qdf_nbuf_t
3134 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3135 		       uint32_t line_num)
3136 {
3137 	qdf_nbuf_t unshared_buf;
3138 	qdf_frag_t p_frag;
3139 	uint32_t num_nr_frags;
3140 	uint32_t idx = 0;
3141 
3142 	if (is_initial_mem_debug_disabled)
3143 		return __qdf_nbuf_unshare(buf);
3144 
3145 	/* Not a shared buffer, nothing to do */
3146 	if (!qdf_nbuf_is_cloned(buf))
3147 		return buf;
3148 
3149 	/* Take care to delete the debug entries for frags */
3150 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3151 
3152 	while (idx < num_nr_frags) {
3153 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3154 		if (qdf_likely(p_frag))
3155 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3156 						    line_num);
3157 		idx++;
3158 	}
3159 
3160 	qdf_net_buf_debug_delete_node(buf);
3161 
3162 	unshared_buf = __qdf_nbuf_unshare(buf);
3163 
3164 	if (qdf_likely(unshared_buf)) {
3165 		qdf_net_buf_debug_add_node(unshared_buf, 0,
3166 					   func_name, line_num);
3167 
3168 		/* Take care to add the debug entries for frags */
3169 		num_nr_frags = qdf_nbuf_get_nr_frags(unshared_buf);
3170 
3171 		idx = 0;
3172 		while (idx < num_nr_frags) {
3173 			p_frag = qdf_nbuf_get_frag_addr(unshared_buf, idx);
3174 			if (qdf_likely(p_frag))
3175 				qdf_frag_debug_refcount_inc(p_frag, func_name,
3176 							    line_num);
3177 			idx++;
3178 		}
3179 	}
3180 
3181 	return unshared_buf;
3182 }
3183 
3184 qdf_export_symbol(qdf_nbuf_unshare_debug);
3185 
3186 #endif /* NBUF_MEMORY_DEBUG */
3187 
3188 #if defined(FEATURE_TSO)
3189 
3190 /**
3191  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3192  *
3193  * @ethproto: ethernet type of the msdu
3194  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3195  * @l2_len: L2 length for the msdu
3196  * @eit_hdr: pointer to EIT header
3197  * @eit_hdr_len: EIT header length for the msdu
3198  * @eit_hdr_dma_map_addr: dma addr for EIT header
3199  * @tcphdr: pointer to tcp header
3200  * @ipv4_csum_en: ipv4 checksum enable
3201  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3202  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3203  * @ip_id: IP id
3204  * @tcp_seq_num: TCP sequence number
3205  *
3206  * This structure holds the TSO common info that is common
3207  * across all the TCP segments of the jumbo packet.
3208  */
3209 struct qdf_tso_cmn_seg_info_t {
3210 	uint16_t ethproto;
3211 	uint16_t ip_tcp_hdr_len;
3212 	uint16_t l2_len;
3213 	uint8_t *eit_hdr;
3214 	uint32_t eit_hdr_len;
3215 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3216 	struct tcphdr *tcphdr;
3217 	uint16_t ipv4_csum_en;
3218 	uint16_t tcp_ipv4_csum_en;
3219 	uint16_t tcp_ipv6_csum_en;
3220 	uint16_t ip_id;
3221 	uint32_t tcp_seq_num;
3222 };
3223 
3224 /**
3225  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3226  *
3227  * @skb: network buffer
3228  *
3229  * Return: byte offset length of 8 bytes aligned.
3230  */
3231 #ifdef FIX_TXDMA_LIMITATION
3232 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3233 {
3234 	uint32_t eit_hdr_len;
3235 	uint8_t *eit_hdr;
3236 	uint8_t byte_8_align_offset;
3237 
3238 	eit_hdr = skb->data;
3239 	eit_hdr_len = (skb_transport_header(skb)
3240 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3241 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3242 	if (qdf_unlikely(byte_8_align_offset)) {
3243 		TSO_DEBUG("%pK,Len %d %d",
3244 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3245 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3246 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3247 				  __LINE__, skb->head, skb->data,
3248 				 byte_8_align_offset);
3249 			return 0;
3250 		}
3251 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3252 		qdf_mem_move(skb->data,
3253 			     skb->data + byte_8_align_offset,
3254 			     eit_hdr_len);
3255 		skb->len -= byte_8_align_offset;
3256 		skb->mac_header -= byte_8_align_offset;
3257 		skb->network_header -= byte_8_align_offset;
3258 		skb->transport_header -= byte_8_align_offset;
3259 	}
3260 	return byte_8_align_offset;
3261 }
3262 #else
3263 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3264 {
3265 	return 0;
3266 }
3267 #endif
3268 
3269 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
3270 void qdf_record_nbuf_nbytes(
3271 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
3272 {
3273 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
3274 }
3275 
3276 qdf_export_symbol(qdf_record_nbuf_nbytes);
3277 
3278 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
3279 
3280 /**
3281  * qdf_nbuf_tso_map_frag() - Map TSO segment
3282  * @osdev: qdf device handle
3283  * @tso_frag_vaddr: addr of tso fragment
3284  * @nbytes: number of bytes
3285  * @dir: direction
3286  *
3287  * Map TSO segment and for MCL record the amount of memory mapped
3288  *
3289  * Return: DMA address of mapped TSO fragment in success and
3290  * NULL in case of DMA mapping failure
3291  */
3292 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
3293 	qdf_device_t osdev, void *tso_frag_vaddr,
3294 	uint32_t nbytes, qdf_dma_dir_t dir)
3295 {
3296 	qdf_dma_addr_t tso_frag_paddr = 0;
3297 
3298 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
3299 					nbytes, __qdf_dma_dir_to_os(dir));
3300 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
3301 		qdf_err("DMA mapping error!");
3302 		qdf_assert_always(0);
3303 		return 0;
3304 	}
3305 	qdf_record_nbuf_nbytes(nbytes, dir, true);
3306 	return tso_frag_paddr;
3307 }
3308 
3309 /**
3310  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
3311  * @osdev: qdf device handle
3312  * @tso_frag_paddr: DMA addr of tso fragment
3313  * @dir: direction
3314  * @nbytes: number of bytes
3315  *
3316  * Unmap TSO segment and for MCL record the amount of memory mapped
3317  *
3318  * Return: None
3319  */
3320 static inline void qdf_nbuf_tso_unmap_frag(
3321 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
3322 	uint32_t nbytes, qdf_dma_dir_t dir)
3323 {
3324 	qdf_record_nbuf_nbytes(nbytes, dir, false);
3325 	dma_unmap_single(osdev->dev, tso_frag_paddr,
3326 			 nbytes, __qdf_dma_dir_to_os(dir));
3327 }
3328 
3329 /**
3330  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3331  * information
3332  * @osdev: qdf device handle
3333  * @skb: skb buffer
3334  * @tso_info: Parameters common to all segements
3335  *
3336  * Get the TSO information that is common across all the TCP
3337  * segments of the jumbo packet
3338  *
3339  * Return: 0 - success 1 - failure
3340  */
3341 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3342 			struct sk_buff *skb,
3343 			struct qdf_tso_cmn_seg_info_t *tso_info)
3344 {
3345 	/* Get ethernet type and ethernet header length */
3346 	tso_info->ethproto = vlan_get_protocol(skb);
3347 
3348 	/* Determine whether this is an IPv4 or IPv6 packet */
3349 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3350 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3351 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3352 
3353 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3354 		tso_info->ipv4_csum_en = 1;
3355 		tso_info->tcp_ipv4_csum_en = 1;
3356 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3357 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3358 				ipv4_hdr->protocol);
3359 			return 1;
3360 		}
3361 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3362 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3363 		tso_info->tcp_ipv6_csum_en = 1;
3364 	} else {
3365 		qdf_err("TSO: ethertype 0x%x is not supported!",
3366 			tso_info->ethproto);
3367 		return 1;
3368 	}
3369 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3370 	tso_info->tcphdr = tcp_hdr(skb);
3371 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3372 	/* get pointer to the ethernet + IP + TCP header and their length */
3373 	tso_info->eit_hdr = skb->data;
3374 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3375 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3376 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
3377 						osdev, tso_info->eit_hdr,
3378 						tso_info->eit_hdr_len,
3379 						QDF_DMA_TO_DEVICE);
3380 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
3381 		return 1;
3382 
3383 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3384 		/* inlcude IPv4 header length for IPV4 (total length) */
3385 		tso_info->ip_tcp_hdr_len =
3386 			tso_info->eit_hdr_len - tso_info->l2_len;
3387 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3388 		/* exclude IPv6 header length for IPv6 (payload length) */
3389 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3390 	}
3391 	/*
3392 	 * The length of the payload (application layer data) is added to
3393 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3394 	 * descriptor.
3395 	 */
3396 
3397 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3398 		tso_info->tcp_seq_num,
3399 		tso_info->eit_hdr_len,
3400 		tso_info->l2_len,
3401 		skb->len);
3402 	return 0;
3403 }
3404 
3405 
3406 /**
3407  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3408  *
3409  * @curr_seg: Segment whose contents are initialized
3410  * @tso_cmn_info: Parameters common to all segements
3411  *
3412  * Return: None
3413  */
3414 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3415 				struct qdf_tso_seg_elem_t *curr_seg,
3416 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3417 {
3418 	/* Initialize the flags to 0 */
3419 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3420 
3421 	/*
3422 	 * The following fields remain the same across all segments of
3423 	 * a jumbo packet
3424 	 */
3425 	curr_seg->seg.tso_flags.tso_enable = 1;
3426 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3427 		tso_cmn_info->ipv4_csum_en;
3428 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3429 		tso_cmn_info->tcp_ipv6_csum_en;
3430 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3431 		tso_cmn_info->tcp_ipv4_csum_en;
3432 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3433 
3434 	/* The following fields change for the segments */
3435 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3436 	tso_cmn_info->ip_id++;
3437 
3438 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3439 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3440 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3441 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3442 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3443 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3444 
3445 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3446 
3447 	/*
3448 	 * First fragment for each segment always contains the ethernet,
3449 	 * IP and TCP header
3450 	 */
3451 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3452 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3453 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3454 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3455 
3456 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3457 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3458 		   tso_cmn_info->eit_hdr_len,
3459 		   curr_seg->seg.tso_flags.tcp_seq_num,
3460 		   curr_seg->seg.total_len);
3461 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3462 }
3463 
3464 /**
3465  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3466  * into segments
3467  * @nbuf: network buffer to be segmented
3468  * @tso_info: This is the output. The information about the
3469  *           TSO segments will be populated within this.
3470  *
3471  * This function fragments a TCP jumbo packet into smaller
3472  * segments to be transmitted by the driver. It chains the TSO
3473  * segments created into a list.
3474  *
3475  * Return: number of TSO segments
3476  */
3477 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3478 		struct qdf_tso_info_t *tso_info)
3479 {
3480 	/* common across all segments */
3481 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3482 	/* segment specific */
3483 	void *tso_frag_vaddr;
3484 	qdf_dma_addr_t tso_frag_paddr = 0;
3485 	uint32_t num_seg = 0;
3486 	struct qdf_tso_seg_elem_t *curr_seg;
3487 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3488 	skb_frag_t *frag = NULL;
3489 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3490 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3491 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3492 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3493 	int j = 0; /* skb fragment index */
3494 	uint8_t byte_8_align_offset;
3495 
3496 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3497 	total_num_seg = tso_info->tso_num_seg_list;
3498 	curr_seg = tso_info->tso_seg_list;
3499 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3500 
3501 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3502 
3503 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3504 						skb, &tso_cmn_info))) {
3505 		qdf_warn("TSO: error getting common segment info");
3506 		return 0;
3507 	}
3508 
3509 	/* length of the first chunk of data in the skb */
3510 	skb_frag_len = skb_headlen(skb);
3511 
3512 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3513 	/* update the remaining skb fragment length and TSO segment length */
3514 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3515 	skb_proc -= tso_cmn_info.eit_hdr_len;
3516 
3517 	/* get the address to the next tso fragment */
3518 	tso_frag_vaddr = skb->data +
3519 			 tso_cmn_info.eit_hdr_len +
3520 			 byte_8_align_offset;
3521 	/* get the length of the next tso fragment */
3522 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3523 
3524 	if (tso_frag_len != 0) {
3525 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
3526 					osdev, tso_frag_vaddr, tso_frag_len,
3527 					QDF_DMA_TO_DEVICE);
3528 		if (qdf_unlikely(!tso_frag_paddr))
3529 			return 0;
3530 	}
3531 
3532 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3533 		__LINE__, skb_frag_len, tso_frag_len);
3534 	num_seg = tso_info->num_segs;
3535 	tso_info->num_segs = 0;
3536 	tso_info->is_tso = 1;
3537 
3538 	while (num_seg && curr_seg) {
3539 		int i = 1; /* tso fragment index */
3540 		uint8_t more_tso_frags = 1;
3541 
3542 		curr_seg->seg.num_frags = 0;
3543 		tso_info->num_segs++;
3544 		total_num_seg->num_seg.tso_cmn_num_seg++;
3545 
3546 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3547 						 &tso_cmn_info);
3548 
3549 		/* If TCP PSH flag is set, set it in the last or only segment */
3550 		if (num_seg == 1)
3551 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3552 
3553 		if (unlikely(skb_proc == 0))
3554 			return tso_info->num_segs;
3555 
3556 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3557 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3558 		/* frag len is added to ip_len in while loop below*/
3559 
3560 		curr_seg->seg.num_frags++;
3561 
3562 		while (more_tso_frags) {
3563 			if (tso_frag_len != 0) {
3564 				curr_seg->seg.tso_frags[i].vaddr =
3565 					tso_frag_vaddr;
3566 				curr_seg->seg.tso_frags[i].length =
3567 					tso_frag_len;
3568 				curr_seg->seg.total_len += tso_frag_len;
3569 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3570 				curr_seg->seg.num_frags++;
3571 				skb_proc = skb_proc - tso_frag_len;
3572 
3573 				/* increment the TCP sequence number */
3574 
3575 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3576 				curr_seg->seg.tso_frags[i].paddr =
3577 					tso_frag_paddr;
3578 
3579 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
3580 			}
3581 
3582 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3583 					__func__, __LINE__,
3584 					i,
3585 					tso_frag_len,
3586 					curr_seg->seg.total_len,
3587 					curr_seg->seg.tso_frags[i].vaddr);
3588 
3589 			/* if there is no more data left in the skb */
3590 			if (!skb_proc)
3591 				return tso_info->num_segs;
3592 
3593 			/* get the next payload fragment information */
3594 			/* check if there are more fragments in this segment */
3595 			if (tso_frag_len < tso_seg_size) {
3596 				more_tso_frags = 1;
3597 				if (tso_frag_len != 0) {
3598 					tso_seg_size = tso_seg_size -
3599 						tso_frag_len;
3600 					i++;
3601 					if (curr_seg->seg.num_frags ==
3602 								FRAG_NUM_MAX) {
3603 						more_tso_frags = 0;
3604 						/*
3605 						 * reset i and the tso
3606 						 * payload size
3607 						 */
3608 						i = 1;
3609 						tso_seg_size =
3610 							skb_shinfo(skb)->
3611 								gso_size;
3612 					}
3613 				}
3614 			} else {
3615 				more_tso_frags = 0;
3616 				/* reset i and the tso payload size */
3617 				i = 1;
3618 				tso_seg_size = skb_shinfo(skb)->gso_size;
3619 			}
3620 
3621 			/* if the next fragment is contiguous */
3622 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3623 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3624 				skb_frag_len = skb_frag_len - tso_frag_len;
3625 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3626 
3627 			} else { /* the next fragment is not contiguous */
3628 				if (skb_shinfo(skb)->nr_frags == 0) {
3629 					qdf_info("TSO: nr_frags == 0!");
3630 					qdf_assert(0);
3631 					return 0;
3632 				}
3633 				if (j >= skb_shinfo(skb)->nr_frags) {
3634 					qdf_info("TSO: nr_frags %d j %d",
3635 						 skb_shinfo(skb)->nr_frags, j);
3636 					qdf_assert(0);
3637 					return 0;
3638 				}
3639 				frag = &skb_shinfo(skb)->frags[j];
3640 				skb_frag_len = skb_frag_size(frag);
3641 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3642 				tso_frag_vaddr = skb_frag_address_safe(frag);
3643 				j++;
3644 			}
3645 
3646 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3647 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3648 				tso_seg_size);
3649 
3650 			if (!(tso_frag_vaddr)) {
3651 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3652 						__func__);
3653 				return 0;
3654 			}
3655 
3656 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
3657 						osdev, tso_frag_vaddr,
3658 						tso_frag_len,
3659 						QDF_DMA_TO_DEVICE);
3660 			if (qdf_unlikely(!tso_frag_paddr))
3661 				return 0;
3662 		}
3663 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3664 				curr_seg->seg.tso_flags.tcp_seq_num);
3665 		num_seg--;
3666 		/* if TCP FIN flag was set, set it in the last segment */
3667 		if (!num_seg)
3668 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3669 
3670 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3671 		curr_seg = curr_seg->next;
3672 	}
3673 	return tso_info->num_segs;
3674 }
3675 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3676 
3677 /**
3678  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3679  *
3680  * @osdev: qdf device handle
3681  * @tso_seg: TSO segment element to be unmapped
3682  * @is_last_seg: whether this is last tso seg or not
3683  *
3684  * Return: none
3685  */
3686 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3687 			  struct qdf_tso_seg_elem_t *tso_seg,
3688 			  bool is_last_seg)
3689 {
3690 	uint32_t num_frags = 0;
3691 
3692 	if (tso_seg->seg.num_frags > 0)
3693 		num_frags = tso_seg->seg.num_frags - 1;
3694 
3695 	/*Num of frags in a tso seg cannot be less than 2 */
3696 	if (num_frags < 1) {
3697 		/*
3698 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3699 		 * this may happen when qdf_nbuf_get_tso_info failed,
3700 		 * do dma unmap for the 0th frag in this seg.
3701 		 */
3702 		if (is_last_seg && tso_seg->seg.num_frags == 1)
3703 			goto last_seg_free_first_frag;
3704 
3705 		qdf_assert(0);
3706 		qdf_err("ERROR: num of frags in a tso segment is %d",
3707 			(num_frags + 1));
3708 		return;
3709 	}
3710 
3711 	while (num_frags) {
3712 		/*Do dma unmap the tso seg except the 0th frag */
3713 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
3714 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
3715 				num_frags);
3716 			qdf_assert(0);
3717 			return;
3718 		}
3719 		qdf_nbuf_tso_unmap_frag(
3720 			osdev,
3721 			tso_seg->seg.tso_frags[num_frags].paddr,
3722 			tso_seg->seg.tso_frags[num_frags].length,
3723 			QDF_DMA_TO_DEVICE);
3724 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
3725 		num_frags--;
3726 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
3727 	}
3728 
3729 last_seg_free_first_frag:
3730 	if (is_last_seg) {
3731 		/*Do dma unmap for the tso seg 0th frag */
3732 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
3733 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
3734 			qdf_assert(0);
3735 			return;
3736 		}
3737 		qdf_nbuf_tso_unmap_frag(osdev,
3738 					tso_seg->seg.tso_frags[0].paddr,
3739 					tso_seg->seg.tso_frags[0].length,
3740 					QDF_DMA_TO_DEVICE);
3741 		tso_seg->seg.tso_frags[0].paddr = 0;
3742 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
3743 	}
3744 }
3745 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
3746 
3747 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
3748 {
3749 	size_t packet_len;
3750 
3751 	packet_len = skb->len -
3752 		((skb_transport_header(skb) - skb_mac_header(skb)) +
3753 		 tcp_hdrlen(skb));
3754 
3755 	return packet_len;
3756 }
3757 
3758 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
3759 
3760 /**
3761  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
3762  * into segments
3763  * @nbuf:   network buffer to be segmented
3764  * @tso_info:  This is the output. The information about the
3765  *      TSO segments will be populated within this.
3766  *
3767  * This function fragments a TCP jumbo packet into smaller
3768  * segments to be transmitted by the driver. It chains the TSO
3769  * segments created into a list.
3770  *
3771  * Return: 0 - success, 1 - failure
3772  */
3773 #ifndef BUILD_X86
3774 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3775 {
3776 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3777 	uint32_t remainder, num_segs = 0;
3778 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
3779 	uint8_t frags_per_tso = 0;
3780 	uint32_t skb_frag_len = 0;
3781 	uint32_t eit_hdr_len = (skb_transport_header(skb)
3782 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3783 	skb_frag_t *frag = NULL;
3784 	int j = 0;
3785 	uint32_t temp_num_seg = 0;
3786 
3787 	/* length of the first chunk of data in the skb minus eit header*/
3788 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
3789 
3790 	/* Calculate num of segs for skb's first chunk of data*/
3791 	remainder = skb_frag_len % tso_seg_size;
3792 	num_segs = skb_frag_len / tso_seg_size;
3793 	/**
3794 	 * Remainder non-zero and nr_frags zero implies end of skb data.
3795 	 * In that case, one more tso seg is required to accommodate
3796 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
3797 	 * then remaining data will be accomodated while doing the calculation
3798 	 * for nr_frags data. Hence, frags_per_tso++.
3799 	 */
3800 	if (remainder) {
3801 		if (!skb_nr_frags)
3802 			num_segs++;
3803 		else
3804 			frags_per_tso++;
3805 	}
3806 
3807 	while (skb_nr_frags) {
3808 		if (j >= skb_shinfo(skb)->nr_frags) {
3809 			qdf_info("TSO: nr_frags %d j %d",
3810 				 skb_shinfo(skb)->nr_frags, j);
3811 			qdf_assert(0);
3812 			return 0;
3813 		}
3814 		/**
3815 		 * Calculate the number of tso seg for nr_frags data:
3816 		 * Get the length of each frag in skb_frag_len, add to
3817 		 * remainder.Get the number of segments by dividing it to
3818 		 * tso_seg_size and calculate the new remainder.
3819 		 * Decrement the nr_frags value and keep
3820 		 * looping all the skb_fragments.
3821 		 */
3822 		frag = &skb_shinfo(skb)->frags[j];
3823 		skb_frag_len = skb_frag_size(frag);
3824 		temp_num_seg = num_segs;
3825 		remainder += skb_frag_len;
3826 		num_segs += remainder / tso_seg_size;
3827 		remainder = remainder % tso_seg_size;
3828 		skb_nr_frags--;
3829 		if (remainder) {
3830 			if (num_segs > temp_num_seg)
3831 				frags_per_tso = 0;
3832 			/**
3833 			 * increment the tso per frags whenever remainder is
3834 			 * positive. If frags_per_tso reaches the (max-1),
3835 			 * [First frags always have EIT header, therefore max-1]
3836 			 * increment the num_segs as no more data can be
3837 			 * accomodated in the curr tso seg. Reset the remainder
3838 			 * and frags per tso and keep looping.
3839 			 */
3840 			frags_per_tso++;
3841 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
3842 				num_segs++;
3843 				frags_per_tso = 0;
3844 				remainder = 0;
3845 			}
3846 			/**
3847 			 * If this is the last skb frag and still remainder is
3848 			 * non-zero(frags_per_tso is not reached to the max-1)
3849 			 * then increment the num_segs to take care of the
3850 			 * remaining length.
3851 			 */
3852 			if (!skb_nr_frags && remainder) {
3853 				num_segs++;
3854 				frags_per_tso = 0;
3855 			}
3856 		} else {
3857 			 /* Whenever remainder is 0, reset the frags_per_tso. */
3858 			frags_per_tso = 0;
3859 		}
3860 		j++;
3861 	}
3862 
3863 	return num_segs;
3864 }
3865 #elif !defined(QCA_WIFI_QCN9000)
3866 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3867 {
3868 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3869 	skb_frag_t *frag = NULL;
3870 
3871 	/*
3872 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
3873 	 * region which cannot be accessed by Target
3874 	 */
3875 	if (virt_to_phys(skb->data) < 0x50000040) {
3876 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
3877 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
3878 				virt_to_phys(skb->data));
3879 		goto fail;
3880 
3881 	}
3882 
3883 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3884 		frag = &skb_shinfo(skb)->frags[i];
3885 
3886 		if (!frag)
3887 			goto fail;
3888 
3889 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
3890 			goto fail;
3891 	}
3892 
3893 
3894 	gso_size = skb_shinfo(skb)->gso_size;
3895 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3896 			+ tcp_hdrlen(skb));
3897 	while (tmp_len) {
3898 		num_segs++;
3899 		if (tmp_len > gso_size)
3900 			tmp_len -= gso_size;
3901 		else
3902 			break;
3903 	}
3904 
3905 	return num_segs;
3906 
3907 	/*
3908 	 * Do not free this frame, just do socket level accounting
3909 	 * so that this is not reused.
3910 	 */
3911 fail:
3912 	if (skb->sk)
3913 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3914 
3915 	return 0;
3916 }
3917 #else
3918 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
3919 {
3920 	uint32_t i, gso_size, tmp_len, num_segs = 0;
3921 	skb_frag_t *frag = NULL;
3922 
3923 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3924 		frag = &skb_shinfo(skb)->frags[i];
3925 
3926 		if (!frag)
3927 			goto fail;
3928 	}
3929 
3930 	gso_size = skb_shinfo(skb)->gso_size;
3931 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
3932 			+ tcp_hdrlen(skb));
3933 	while (tmp_len) {
3934 		num_segs++;
3935 		if (tmp_len > gso_size)
3936 			tmp_len -= gso_size;
3937 		else
3938 			break;
3939 	}
3940 
3941 	return num_segs;
3942 
3943 	/*
3944 	 * Do not free this frame, just do socket level accounting
3945 	 * so that this is not reused.
3946 	 */
3947 fail:
3948 	if (skb->sk)
3949 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
3950 
3951 	return 0;
3952 }
3953 #endif
3954 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
3955 
3956 #endif /* FEATURE_TSO */
3957 
3958 /**
3959  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
3960  *
3961  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
3962  *
3963  * Return: N/A
3964  */
3965 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
3966 			  uint32_t *lo, uint32_t *hi)
3967 {
3968 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
3969 		*lo = lower_32_bits(dmaaddr);
3970 		*hi = upper_32_bits(dmaaddr);
3971 	} else {
3972 		*lo = dmaaddr;
3973 		*hi = 0;
3974 	}
3975 }
3976 
3977 qdf_export_symbol(__qdf_dmaaddr_to_32s);
3978 
3979 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
3980 {
3981 	qdf_nbuf_users_inc(&skb->users);
3982 	return skb;
3983 }
3984 qdf_export_symbol(__qdf_nbuf_inc_users);
3985 
3986 int __qdf_nbuf_get_users(struct sk_buff *skb)
3987 {
3988 	return qdf_nbuf_users_read(&skb->users);
3989 }
3990 qdf_export_symbol(__qdf_nbuf_get_users);
3991 
3992 /**
3993  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
3994  * @skb: sk_buff handle
3995  *
3996  * Return: none
3997  */
3998 
3999 void __qdf_nbuf_ref(struct sk_buff *skb)
4000 {
4001 	skb_get(skb);
4002 }
4003 qdf_export_symbol(__qdf_nbuf_ref);
4004 
4005 /**
4006  * __qdf_nbuf_shared() - Check whether the buffer is shared
4007  *  @skb: sk_buff buffer
4008  *
4009  *  Return: true if more than one person has a reference to this buffer.
4010  */
4011 int __qdf_nbuf_shared(struct sk_buff *skb)
4012 {
4013 	return skb_shared(skb);
4014 }
4015 qdf_export_symbol(__qdf_nbuf_shared);
4016 
4017 /**
4018  * __qdf_nbuf_dmamap_create() - create a DMA map.
4019  * @osdev: qdf device handle
4020  * @dmap: dma map handle
4021  *
4022  * This can later be used to map networking buffers. They :
4023  * - need space in adf_drv's software descriptor
4024  * - are typically created during adf_drv_create
4025  * - need to be created before any API(qdf_nbuf_map) that uses them
4026  *
4027  * Return: QDF STATUS
4028  */
4029 QDF_STATUS
4030 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4031 {
4032 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4033 	/*
4034 	 * driver can tell its SG capablity, it must be handled.
4035 	 * Bounce buffers if they are there
4036 	 */
4037 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4038 	if (!(*dmap))
4039 		error = QDF_STATUS_E_NOMEM;
4040 
4041 	return error;
4042 }
4043 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4044 /**
4045  * __qdf_nbuf_dmamap_destroy() - delete a dma map
4046  * @osdev: qdf device handle
4047  * @dmap: dma map handle
4048  *
4049  * Return: none
4050  */
4051 void
4052 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4053 {
4054 	kfree(dmap);
4055 }
4056 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4057 
4058 /**
4059  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
4060  * @osdev: os device
4061  * @skb: skb handle
4062  * @dir: dma direction
4063  * @nbytes: number of bytes to be mapped
4064  *
4065  * Return: QDF_STATUS
4066  */
4067 #ifdef QDF_OS_DEBUG
4068 QDF_STATUS
4069 __qdf_nbuf_map_nbytes(
4070 	qdf_device_t osdev,
4071 	struct sk_buff *skb,
4072 	qdf_dma_dir_t dir,
4073 	int nbytes)
4074 {
4075 	struct skb_shared_info  *sh = skb_shinfo(skb);
4076 
4077 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4078 
4079 	/*
4080 	 * Assume there's only a single fragment.
4081 	 * To support multiple fragments, it would be necessary to change
4082 	 * adf_nbuf_t to be a separate object that stores meta-info
4083 	 * (including the bus address for each fragment) and a pointer
4084 	 * to the underlying sk_buff.
4085 	 */
4086 	qdf_assert(sh->nr_frags == 0);
4087 
4088 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4089 }
4090 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4091 #else
4092 QDF_STATUS
4093 __qdf_nbuf_map_nbytes(
4094 	qdf_device_t osdev,
4095 	struct sk_buff *skb,
4096 	qdf_dma_dir_t dir,
4097 	int nbytes)
4098 {
4099 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4100 }
4101 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4102 #endif
4103 /**
4104  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4105  * @osdev: OS device
4106  * @skb: skb handle
4107  * @dir: direction
4108  * @nbytes: number of bytes
4109  *
4110  * Return: none
4111  */
4112 void
4113 __qdf_nbuf_unmap_nbytes(
4114 	qdf_device_t osdev,
4115 	struct sk_buff *skb,
4116 	qdf_dma_dir_t dir,
4117 	int nbytes)
4118 {
4119 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4120 
4121 	/*
4122 	 * Assume there's a single fragment.
4123 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4124 	 */
4125 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4126 }
4127 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4128 
4129 /**
4130  * __qdf_nbuf_dma_map_info() - return the dma map info
4131  * @bmap: dma map
4132  * @sg: dma map info
4133  *
4134  * Return: none
4135  */
4136 void
4137 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4138 {
4139 	qdf_assert(bmap->mapped);
4140 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4141 
4142 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4143 			sizeof(struct __qdf_segment));
4144 	sg->nsegs = bmap->nsegs;
4145 }
4146 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4147 /**
4148  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4149  *			specified by the index
4150  * @skb: sk buff
4151  * @sg: scatter/gather list of all the frags
4152  *
4153  * Return: none
4154  */
4155 #if defined(__QDF_SUPPORT_FRAG_MEM)
4156 void
4157 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4158 {
4159 	qdf_assert(skb);
4160 	sg->sg_segs[0].vaddr = skb->data;
4161 	sg->sg_segs[0].len   = skb->len;
4162 	sg->nsegs            = 1;
4163 
4164 	for (int i = 1; i <= sh->nr_frags; i++) {
4165 		skb_frag_t    *f        = &sh->frags[i - 1];
4166 
4167 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4168 			f->page_offset);
4169 		sg->sg_segs[i].len      = f->size;
4170 
4171 		qdf_assert(i < QDF_MAX_SGLIST);
4172 	}
4173 	sg->nsegs += i;
4174 
4175 }
4176 qdf_export_symbol(__qdf_nbuf_frag_info);
4177 #else
4178 #ifdef QDF_OS_DEBUG
4179 void
4180 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4181 {
4182 
4183 	struct skb_shared_info  *sh = skb_shinfo(skb);
4184 
4185 	qdf_assert(skb);
4186 	sg->sg_segs[0].vaddr = skb->data;
4187 	sg->sg_segs[0].len   = skb->len;
4188 	sg->nsegs            = 1;
4189 
4190 	qdf_assert(sh->nr_frags == 0);
4191 }
4192 qdf_export_symbol(__qdf_nbuf_frag_info);
4193 #else
4194 void
4195 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4196 {
4197 	sg->sg_segs[0].vaddr = skb->data;
4198 	sg->sg_segs[0].len   = skb->len;
4199 	sg->nsegs            = 1;
4200 }
4201 qdf_export_symbol(__qdf_nbuf_frag_info);
4202 #endif
4203 #endif
4204 /**
4205  * __qdf_nbuf_get_frag_size() - get frag size
4206  * @nbuf: sk buffer
4207  * @cur_frag: current frag
4208  *
4209  * Return: frag size
4210  */
4211 uint32_t
4212 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4213 {
4214 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4215 	const skb_frag_t *frag = sh->frags + cur_frag;
4216 
4217 	return skb_frag_size(frag);
4218 }
4219 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4220 
4221 /**
4222  * __qdf_nbuf_frag_map() - dma map frag
4223  * @osdev: os device
4224  * @nbuf: sk buff
4225  * @offset: offset
4226  * @dir: direction
4227  * @cur_frag: current fragment
4228  *
4229  * Return: QDF status
4230  */
4231 #ifdef A_SIMOS_DEVHOST
4232 QDF_STATUS __qdf_nbuf_frag_map(
4233 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4234 	int offset, qdf_dma_dir_t dir, int cur_frag)
4235 {
4236 	int32_t paddr, frag_len;
4237 
4238 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4239 	return QDF_STATUS_SUCCESS;
4240 }
4241 qdf_export_symbol(__qdf_nbuf_frag_map);
4242 #else
4243 QDF_STATUS __qdf_nbuf_frag_map(
4244 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4245 	int offset, qdf_dma_dir_t dir, int cur_frag)
4246 {
4247 	dma_addr_t paddr, frag_len;
4248 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4249 	const skb_frag_t *frag = sh->frags + cur_frag;
4250 
4251 	frag_len = skb_frag_size(frag);
4252 
4253 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4254 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4255 					__qdf_dma_dir_to_os(dir));
4256 	return dma_mapping_error(osdev->dev, paddr) ?
4257 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4258 }
4259 qdf_export_symbol(__qdf_nbuf_frag_map);
4260 #endif
4261 /**
4262  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4263  * @dmap: dma map
4264  * @cb: callback
4265  * @arg: argument
4266  *
4267  * Return: none
4268  */
4269 void
4270 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4271 {
4272 	return;
4273 }
4274 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4275 
4276 
4277 /**
4278  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4279  * @osdev: os device
4280  * @buf: sk buff
4281  * @dir: direction
4282  *
4283  * Return: none
4284  */
4285 #if defined(A_SIMOS_DEVHOST)
4286 static void __qdf_nbuf_sync_single_for_cpu(
4287 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4288 {
4289 	return;
4290 }
4291 #else
4292 static void __qdf_nbuf_sync_single_for_cpu(
4293 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4294 {
4295 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4296 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4297 		return;
4298 	}
4299 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4300 		skb_end_offset(buf) - skb_headroom(buf),
4301 		__qdf_dma_dir_to_os(dir));
4302 }
4303 #endif
4304 /**
4305  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4306  * @osdev: os device
4307  * @skb: sk buff
4308  * @dir: direction
4309  *
4310  * Return: none
4311  */
4312 void
4313 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4314 	struct sk_buff *skb, qdf_dma_dir_t dir)
4315 {
4316 	qdf_assert(
4317 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4318 
4319 	/*
4320 	 * Assume there's a single fragment.
4321 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4322 	 */
4323 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4324 }
4325 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4326 
4327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4328 /**
4329  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4330  * @rx_status: Pointer to rx_status.
4331  * @rtap_buf: Buf to which VHT info has to be updated.
4332  * @rtap_len: Current length of radiotap buffer
4333  *
4334  * Return: Length of radiotap after VHT flags updated.
4335  */
4336 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4337 					struct mon_rx_status *rx_status,
4338 					int8_t *rtap_buf,
4339 					uint32_t rtap_len)
4340 {
4341 	uint16_t vht_flags = 0;
4342 
4343 	rtap_len = qdf_align(rtap_len, 2);
4344 
4345 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4346 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4347 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4348 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4349 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4350 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4351 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4352 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4353 	rtap_len += 2;
4354 
4355 	rtap_buf[rtap_len] |=
4356 		(rx_status->is_stbc ?
4357 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4358 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4359 		(rx_status->ldpc ?
4360 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4361 		(rx_status->beamformed ?
4362 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4363 	rtap_len += 1;
4364 	switch (rx_status->vht_flag_values2) {
4365 	case IEEE80211_RADIOTAP_VHT_BW_20:
4366 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4367 		break;
4368 	case IEEE80211_RADIOTAP_VHT_BW_40:
4369 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4370 		break;
4371 	case IEEE80211_RADIOTAP_VHT_BW_80:
4372 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4373 		break;
4374 	case IEEE80211_RADIOTAP_VHT_BW_160:
4375 		rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4376 		break;
4377 	}
4378 	rtap_len += 1;
4379 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4380 	rtap_len += 1;
4381 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4382 	rtap_len += 1;
4383 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4384 	rtap_len += 1;
4385 	rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4386 	rtap_len += 1;
4387 	rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4388 	rtap_len += 1;
4389 	rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4390 	rtap_len += 1;
4391 	put_unaligned_le16(rx_status->vht_flag_values6,
4392 			   &rtap_buf[rtap_len]);
4393 	rtap_len += 2;
4394 
4395 	return rtap_len;
4396 }
4397 
4398 /**
4399  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4400  * @rx_status: Pointer to rx_status.
4401  * @rtap_buf: buffer to which radiotap has to be updated
4402  * @rtap_len: radiotap length
4403  *
4404  * API update high-efficiency (11ax) fields in the radiotap header
4405  *
4406  * Return: length of rtap_len updated.
4407  */
4408 static unsigned int
4409 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4410 				     int8_t *rtap_buf, uint32_t rtap_len)
4411 {
4412 	/*
4413 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4414 	 * Enable all "known" HE radiotap flags for now
4415 	 */
4416 	rtap_len = qdf_align(rtap_len, 2);
4417 
4418 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4419 	rtap_len += 2;
4420 
4421 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4422 	rtap_len += 2;
4423 
4424 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4425 	rtap_len += 2;
4426 
4427 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4428 	rtap_len += 2;
4429 
4430 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4431 	rtap_len += 2;
4432 
4433 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4434 	rtap_len += 2;
4435 	qdf_rl_debug("he data %x %x %x %x %x %x",
4436 		     rx_status->he_data1,
4437 		     rx_status->he_data2, rx_status->he_data3,
4438 		     rx_status->he_data4, rx_status->he_data5,
4439 		     rx_status->he_data6);
4440 	return rtap_len;
4441 }
4442 
4443 
4444 /**
4445  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4446  * @rx_status: Pointer to rx_status.
4447  * @rtap_buf: buffer to which radiotap has to be updated
4448  * @rtap_len: radiotap length
4449  *
4450  * API update HE-MU fields in the radiotap header
4451  *
4452  * Return: length of rtap_len updated.
4453  */
4454 static unsigned int
4455 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4456 				     int8_t *rtap_buf, uint32_t rtap_len)
4457 {
4458 	rtap_len = qdf_align(rtap_len, 2);
4459 
4460 	/*
4461 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4462 	 * Enable all "known" he-mu radiotap flags for now
4463 	 */
4464 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4465 	rtap_len += 2;
4466 
4467 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4468 	rtap_len += 2;
4469 
4470 	rtap_buf[rtap_len] = rx_status->he_RU[0];
4471 	rtap_len += 1;
4472 
4473 	rtap_buf[rtap_len] = rx_status->he_RU[1];
4474 	rtap_len += 1;
4475 
4476 	rtap_buf[rtap_len] = rx_status->he_RU[2];
4477 	rtap_len += 1;
4478 
4479 	rtap_buf[rtap_len] = rx_status->he_RU[3];
4480 	rtap_len += 1;
4481 	qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4482 		  rx_status->he_flags1,
4483 		  rx_status->he_flags2, rx_status->he_RU[0],
4484 		  rx_status->he_RU[1], rx_status->he_RU[2],
4485 		  rx_status->he_RU[3]);
4486 
4487 	return rtap_len;
4488 }
4489 
4490 /**
4491  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4492  * @rx_status: Pointer to rx_status.
4493  * @rtap_buf: buffer to which radiotap has to be updated
4494  * @rtap_len: radiotap length
4495  *
4496  * API update he-mu-other fields in the radiotap header
4497  *
4498  * Return: length of rtap_len updated.
4499  */
4500 static unsigned int
4501 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4502 				     int8_t *rtap_buf, uint32_t rtap_len)
4503 {
4504 	rtap_len = qdf_align(rtap_len, 2);
4505 
4506 	/*
4507 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4508 	 * Enable all "known" he-mu-other radiotap flags for now
4509 	 */
4510 	put_unaligned_le16(rx_status->he_per_user_1, &rtap_buf[rtap_len]);
4511 	rtap_len += 2;
4512 
4513 	put_unaligned_le16(rx_status->he_per_user_2, &rtap_buf[rtap_len]);
4514 	rtap_len += 2;
4515 
4516 	rtap_buf[rtap_len] = rx_status->he_per_user_position;
4517 	rtap_len += 1;
4518 
4519 	rtap_buf[rtap_len] = rx_status->he_per_user_known;
4520 	rtap_len += 1;
4521 	qdf_debug("he_per_user %x %x pos %x knwn %x",
4522 		  rx_status->he_per_user_1,
4523 		  rx_status->he_per_user_2, rx_status->he_per_user_position,
4524 		  rx_status->he_per_user_known);
4525 	return rtap_len;
4526 }
4527 
4528 #define IEEE80211_RADIOTAP_TX_STATUS 0
4529 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
4530 #define IEEE80211_RADIOTAP_EXTENSION2 2
4531 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
4532 
4533 /**
4534  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
4535  * @rx_status: Pointer to rx_status.
4536  * @rtap_buf: Buf to which AMPDU info has to be updated.
4537  * @rtap_len: Current length of radiotap buffer
4538  *
4539  * Return: Length of radiotap after AMPDU flags updated.
4540  */
4541 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4542 					struct mon_rx_status *rx_status,
4543 					uint8_t *rtap_buf,
4544 					uint32_t rtap_len)
4545 {
4546 	/*
4547 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
4548 	 * First 32 bits of AMPDU represents the reference number
4549 	 */
4550 
4551 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
4552 	uint16_t ampdu_flags = 0;
4553 	uint16_t ampdu_reserved_flags = 0;
4554 
4555 	rtap_len = qdf_align(rtap_len, 4);
4556 
4557 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
4558 	rtap_len += 4;
4559 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
4560 	rtap_len += 2;
4561 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
4562 	rtap_len += 2;
4563 
4564 	return rtap_len;
4565 }
4566 
4567 /**
4568  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
4569  * @rx_status: Pointer to rx_status.
4570  * @nbuf:      nbuf pointer to which radiotap has to be updated
4571  * @headroom_sz: Available headroom size.
4572  *
4573  * Return: length of rtap_len updated.
4574  */
4575 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4576 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4577 {
4578 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
4579 	struct ieee80211_radiotap_header *rthdr =
4580 		(struct ieee80211_radiotap_header *)rtap_buf;
4581 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
4582 	uint32_t rtap_len = rtap_hdr_len;
4583 	uint8_t length = rtap_len;
4584 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
4585 	struct qdf_radiotap_ext2 *rtap_ext2;
4586 	uint32_t *rtap_ext = NULL;
4587 
4588 	/* Adding Extended Header space */
4589 	if (rx_status->add_rtap_ext) {
4590 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
4591 		rtap_len = rtap_hdr_len;
4592 	}
4593 	length = rtap_len;
4594 
4595 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
4596 	rthdr->it_present = (1 << IEEE80211_RADIOTAP_TSFT);
4597 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
4598 	rtap_len += 8;
4599 
4600 	/* IEEE80211_RADIOTAP_FLAGS u8 */
4601 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_FLAGS);
4602 
4603 	if (rx_status->rs_fcs_err)
4604 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4605 
4606 	rtap_buf[rtap_len] = rx_status->rtap_flags;
4607 	rtap_len += 1;
4608 
4609 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
4610 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
4611 	    !rx_status->he_flags) {
4612 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
4613 		rtap_buf[rtap_len] = rx_status->rate;
4614 	} else
4615 		rtap_buf[rtap_len] = 0;
4616 	rtap_len += 1;
4617 
4618 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
4619 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
4620 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
4621 	rtap_len += 2;
4622 	/* Channel flags. */
4623 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
4624 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
4625 	else
4626 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
4627 	if (rx_status->cck_flag)
4628 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
4629 	if (rx_status->ofdm_flag)
4630 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
4631 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
4632 	rtap_len += 2;
4633 
4634 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
4635 	 *					(dBm)
4636 	 */
4637 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
4638 	/*
4639 	 * rssi_comb is int dB, need to convert it to dBm.
4640 	 * normalize value to noise floor of -96 dBm
4641 	 */
4642 	rtap_buf[rtap_len] = rx_status->rssi_comb + rx_status->chan_noise_floor;
4643 	rtap_len += 1;
4644 
4645 	/* RX signal noise floor */
4646 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
4647 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
4648 	rtap_len += 1;
4649 
4650 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
4651 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_ANTENNA);
4652 	rtap_buf[rtap_len] = rx_status->nr_ant;
4653 	rtap_len += 1;
4654 
4655 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
4656 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
4657 		return 0;
4658 	}
4659 
4660 	if (rx_status->ht_flags) {
4661 		length = rtap_len;
4662 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
4663 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_MCS);
4664 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
4665 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
4666 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
4667 		rtap_len += 1;
4668 
4669 		if (rx_status->sgi)
4670 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
4671 		if (rx_status->bw)
4672 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
4673 		else
4674 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
4675 		rtap_len += 1;
4676 
4677 		rtap_buf[rtap_len] = rx_status->ht_mcs;
4678 		rtap_len += 1;
4679 
4680 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
4681 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
4682 			return 0;
4683 		}
4684 	}
4685 
4686 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
4687 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
4688 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
4689 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
4690 								rtap_buf,
4691 								rtap_len);
4692 	}
4693 
4694 	if (rx_status->vht_flags) {
4695 		length = rtap_len;
4696 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4697 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VHT);
4698 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
4699 								rtap_buf,
4700 								rtap_len);
4701 
4702 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
4703 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
4704 			return 0;
4705 		}
4706 	}
4707 
4708 	if (rx_status->he_flags) {
4709 		length = rtap_len;
4710 		/* IEEE80211_RADIOTAP_HE */
4711 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE);
4712 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
4713 								rtap_buf,
4714 								rtap_len);
4715 
4716 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
4717 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
4718 			return 0;
4719 		}
4720 	}
4721 
4722 	if (rx_status->he_mu_flags) {
4723 		length = rtap_len;
4724 		/* IEEE80211_RADIOTAP_HE-MU */
4725 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU);
4726 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
4727 								rtap_buf,
4728 								rtap_len);
4729 
4730 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
4731 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
4732 			return 0;
4733 		}
4734 	}
4735 
4736 	if (rx_status->he_mu_other_flags) {
4737 		length = rtap_len;
4738 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
4739 		rthdr->it_present |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
4740 		rtap_len =
4741 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
4742 								rtap_buf,
4743 								rtap_len);
4744 
4745 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
4746 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
4747 			return 0;
4748 		}
4749 	}
4750 
4751 	rtap_len = qdf_align(rtap_len, 2);
4752 	/*
4753 	 * Radiotap Vendor Namespace
4754 	 */
4755 	rthdr->it_present |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
4756 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
4757 					(rtap_buf + rtap_len);
4758 	/*
4759 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
4760 	 */
4761 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
4762 	/*
4763 	 * Name space selector = 0
4764 	 * We only will have one namespace for now
4765 	 */
4766 	radiotap_vendor_ns_ath->hdr.selector = 0;
4767 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
4768 					sizeof(*radiotap_vendor_ns_ath) -
4769 					sizeof(radiotap_vendor_ns_ath->hdr));
4770 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
4771 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
4772 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
4773 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
4774 				cpu_to_le32(rx_status->ppdu_timestamp);
4775 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
4776 
4777 	/* Add Extension to Radiotap Header & corresponding data */
4778 	if (rx_status->add_rtap_ext) {
4779 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXT);
4780 		rtap_ext = (uint32_t *)&rthdr->it_present;
4781 		rtap_ext++;
4782 		*rtap_ext = cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_STATUS);
4783 		*rtap_ext |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RETRY_COUNT);
4784 
4785 		rtap_buf[rtap_len] = rx_status->tx_status;
4786 		rtap_len += 1;
4787 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
4788 		rtap_len += 1;
4789 	}
4790 
4791 	/* Add Extension2 to Radiotap Header */
4792 	if (rx_status->add_rtap_ext2) {
4793 		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXT);
4794 		rtap_ext = (uint32_t *)&rthdr->it_present;
4795 		rtap_ext++;
4796 		*rtap_ext |= cpu_to_le32(1 << IEEE80211_RADIOTAP_EXTENSION2);
4797 
4798 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
4799 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
4800 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
4801 		rtap_ext2->tid = rx_status->tid;
4802 		rtap_ext2->start_seq = rx_status->start_seq;
4803 		qdf_mem_copy(rtap_ext2->ba_bitmap,
4804 			     rx_status->ba_bitmap, 8 * (sizeof(uint32_t)));
4805 
4806 		rtap_len += sizeof(*rtap_ext2);
4807 	}
4808 
4809 	rthdr->it_len = cpu_to_le16(rtap_len);
4810 	rthdr->it_present = cpu_to_le32(rthdr->it_present);
4811 
4812 	if (headroom_sz < rtap_len) {
4813 		qdf_debug("DEBUG: Not enough space to update radiotap");
4814 		return 0;
4815 	}
4816 	qdf_nbuf_push_head(nbuf, rtap_len);
4817 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
4818 	return rtap_len;
4819 }
4820 #else
4821 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4822 					struct mon_rx_status *rx_status,
4823 					int8_t *rtap_buf,
4824 					uint32_t rtap_len)
4825 {
4826 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4827 	return 0;
4828 }
4829 
4830 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4831 				      int8_t *rtap_buf, uint32_t rtap_len)
4832 {
4833 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4834 	return 0;
4835 }
4836 
4837 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
4838 					struct mon_rx_status *rx_status,
4839 					uint8_t *rtap_buf,
4840 					uint32_t rtap_len)
4841 {
4842 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4843 	return 0;
4844 }
4845 
4846 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
4847 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
4848 {
4849 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
4850 	return 0;
4851 }
4852 #endif
4853 qdf_export_symbol(qdf_nbuf_update_radiotap);
4854 
4855 /**
4856  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
4857  * @cb_func_ptr: function pointer to the nbuf free callback
4858  *
4859  * This function registers a callback function for nbuf free.
4860  *
4861  * Return: none
4862  */
4863 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
4864 {
4865 	nbuf_free_cb = cb_func_ptr;
4866 }
4867 
4868 /**
4869  * qdf_nbuf_classify_pkt() - classify packet
4870  * @skb - sk buff
4871  *
4872  * Return: none
4873  */
4874 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
4875 {
4876 	struct ethhdr *eh = (struct ethhdr *)skb->data;
4877 
4878 	/* check destination mac address is broadcast/multicast */
4879 	if (is_broadcast_ether_addr((uint8_t *)eh))
4880 		QDF_NBUF_CB_SET_BCAST(skb);
4881 	else if (is_multicast_ether_addr((uint8_t *)eh))
4882 		QDF_NBUF_CB_SET_MCAST(skb);
4883 
4884 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
4885 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4886 			QDF_NBUF_CB_PACKET_TYPE_ARP;
4887 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
4888 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4889 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
4890 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
4891 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4892 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
4893 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
4894 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
4895 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
4896 }
4897 qdf_export_symbol(qdf_nbuf_classify_pkt);
4898 
4899 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
4900 {
4901 	qdf_nbuf_users_set(&nbuf->users, 1);
4902 	nbuf->data = nbuf->head + NET_SKB_PAD;
4903 	skb_reset_tail_pointer(nbuf);
4904 }
4905 qdf_export_symbol(__qdf_nbuf_init);
4906 
4907 #ifdef WLAN_FEATURE_FASTPATH
4908 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
4909 {
4910 	qdf_nbuf_users_set(&nbuf->users, 1);
4911 	nbuf->data = nbuf->head + NET_SKB_PAD;
4912 	skb_reset_tail_pointer(nbuf);
4913 }
4914 qdf_export_symbol(qdf_nbuf_init_fast);
4915 #endif /* WLAN_FEATURE_FASTPATH */
4916 
4917 
4918 #ifdef QDF_NBUF_GLOBAL_COUNT
4919 /**
4920  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
4921  *
4922  * Return void
4923  */
4924 void __qdf_nbuf_mod_init(void)
4925 {
4926 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
4927 	qdf_atomic_init(&nbuf_count);
4928 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
4929 }
4930 
4931 /**
4932  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
4933  *
4934  * Return void
4935  */
4936 void __qdf_nbuf_mod_exit(void)
4937 {
4938 }
4939 #endif
4940 
4941 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
4942 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4943 					    int offset)
4944 {
4945 	unsigned int frag_offset;
4946 	skb_frag_t *frag;
4947 
4948 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4949 		return QDF_STATUS_E_FAILURE;
4950 
4951 	frag = &skb_shinfo(nbuf)->frags[idx];
4952 	frag_offset = skb_frag_off(frag);
4953 
4954 	frag_offset += offset;
4955 	skb_frag_off_set(frag, frag_offset);
4956 
4957 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4958 
4959 	return QDF_STATUS_SUCCESS;
4960 }
4961 
4962 #else
4963 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
4964 					    int offset)
4965 {
4966 	uint16_t frag_offset;
4967 	skb_frag_t *frag;
4968 
4969 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
4970 		return QDF_STATUS_E_FAILURE;
4971 
4972 	frag = &skb_shinfo(nbuf)->frags[idx];
4973 	frag_offset = frag->page_offset;
4974 
4975 	frag_offset += offset;
4976 	frag->page_offset = frag_offset;
4977 
4978 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
4979 
4980 	return QDF_STATUS_SUCCESS;
4981 }
4982 #endif
4983 
4984 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
4985 
4986 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
4987 			    int offset, int frag_len,
4988 			    unsigned int truesize, bool take_frag_ref)
4989 {
4990 	struct page *page;
4991 	int frag_offset;
4992 	uint8_t nr_frag;
4993 
4994 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
4995 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
4996 
4997 	page = virt_to_head_page(buf);
4998 	frag_offset = buf - page_address(page);
4999 
5000 	skb_add_rx_frag(nbuf, nr_frag, page,
5001 			(frag_offset + offset),
5002 			frag_len, truesize);
5003 
5004 	if (unlikely(take_frag_ref)) {
5005 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5006 		skb_frag_ref(nbuf, nr_frag);
5007 	}
5008 }
5009 
5010 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5011 
5012 #ifdef NBUF_FRAG_MEMORY_DEBUG
5013 
5014 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5015 						int offset, const char *func,
5016 						uint32_t line)
5017 {
5018 	QDF_STATUS result;
5019 	qdf_frag_t p_fragp, n_fragp;
5020 
5021 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5022 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5023 
5024 	if (qdf_likely(is_initial_mem_debug_disabled))
5025 		return result;
5026 
5027 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5028 
5029 	/*
5030 	 * Update frag address in frag debug tracker
5031 	 * when frag offset is successfully changed in skb
5032 	 */
5033 	if (result == QDF_STATUS_SUCCESS)
5034 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
5035 
5036 	return result;
5037 }
5038 
5039 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
5040 
5041 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
5042 				int offset, int frag_len,
5043 				unsigned int truesize, bool take_frag_ref,
5044 				const char *func, uint32_t line)
5045 {
5046 	qdf_frag_t fragp;
5047 	uint32_t num_nr_frags;
5048 
5049 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
5050 			       frag_len, truesize, take_frag_ref);
5051 
5052 	if (qdf_likely(is_initial_mem_debug_disabled))
5053 		return;
5054 
5055 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
5056 
5057 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5058 
5059 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
5060 
5061 	/* Update frag address in frag debug tracking table */
5062 	if (fragp != buf)
5063 		qdf_frag_debug_update_addr(buf, fragp, func, line);
5064 
5065 	/* Update frag refcount in frag debug tracking table */
5066 	qdf_frag_debug_refcount_inc(fragp, func, line);
5067 }
5068 
5069 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
5070 
5071 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
5072 				    uint32_t line)
5073 {
5074 	uint32_t num_nr_frags;
5075 	uint32_t idx = 0;
5076 	qdf_nbuf_t ext_list;
5077 	qdf_frag_t p_frag;
5078 
5079 	if (qdf_likely(is_initial_mem_debug_disabled))
5080 		return;
5081 
5082 	if (qdf_unlikely(!buf))
5083 		return;
5084 
5085 	/* Take care to update the refcount in the debug entries for frags */
5086 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5087 
5088 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5089 
5090 	while (idx < num_nr_frags) {
5091 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5092 		if (qdf_likely(p_frag))
5093 			qdf_frag_debug_refcount_inc(p_frag, func, line);
5094 		idx++;
5095 	}
5096 
5097 	/**
5098 	 * Take care to update the refcount in the debug entries for the
5099 	 * frags attached to frag_list
5100 	 */
5101 	ext_list = qdf_nbuf_get_ext_list(buf);
5102 	while (ext_list) {
5103 		idx = 0;
5104 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5105 
5106 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5107 
5108 		while (idx < num_nr_frags) {
5109 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5110 			if (qdf_likely(p_frag))
5111 				qdf_frag_debug_refcount_inc(p_frag, func, line);
5112 			idx++;
5113 		}
5114 		ext_list = qdf_nbuf_queue_next(ext_list);
5115 	}
5116 }
5117 
5118 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
5119 
5120 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
5121 				    uint32_t line)
5122 {
5123 	uint32_t num_nr_frags;
5124 	qdf_nbuf_t ext_list;
5125 	uint32_t idx = 0;
5126 	qdf_frag_t p_frag;
5127 
5128 	if (qdf_likely(is_initial_mem_debug_disabled))
5129 		return;
5130 
5131 	if (qdf_unlikely(!buf))
5132 		return;
5133 
5134 	/**
5135 	 * Decrement refcount for frag debug nodes only when last user
5136 	 * of nbuf calls this API so as to avoid decrementing refcount
5137 	 * on every call expect the last one in case where nbuf has multiple
5138 	 * users
5139 	 */
5140 	if (qdf_nbuf_get_users(buf) > 1)
5141 		return;
5142 
5143 	/* Take care to update the refcount in the debug entries for frags */
5144 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5145 
5146 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5147 
5148 	while (idx < num_nr_frags) {
5149 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5150 		if (qdf_likely(p_frag))
5151 			qdf_frag_debug_refcount_dec(p_frag, func, line);
5152 		idx++;
5153 	}
5154 
5155 	/* Take care to update debug entries for frags attached to frag_list */
5156 	ext_list = qdf_nbuf_get_ext_list(buf);
5157 	while (ext_list) {
5158 		if (qdf_nbuf_get_users(ext_list) == 1) {
5159 			idx = 0;
5160 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5161 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5162 			while (idx < num_nr_frags) {
5163 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5164 				if (qdf_likely(p_frag))
5165 					qdf_frag_debug_refcount_dec(p_frag,
5166 								    func, line);
5167 				idx++;
5168 			}
5169 		}
5170 		ext_list = qdf_nbuf_queue_next(ext_list);
5171 	}
5172 }
5173 
5174 qdf_export_symbol(qdf_net_buf_debug_release_frag);
5175 #endif /* NBUF_FRAG_MEMORY_DEBUG */
5176 
5177 #ifdef MEMORY_DEBUG
5178 void qdf_nbuf_acquire_track_lock(uint32_t index,
5179 				 unsigned long irq_flag)
5180 {
5181 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
5182 			  irq_flag);
5183 }
5184 
5185 void qdf_nbuf_release_track_lock(uint32_t index,
5186 				 unsigned long irq_flag)
5187 {
5188 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
5189 			       irq_flag);
5190 }
5191 
5192 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
5193 {
5194 	return gp_qdf_net_buf_track_tbl[index];
5195 }
5196 #endif /* MEMORY_DEBUG */
5197 
5198 #ifdef ENHANCED_OS_ABSTRACTION
5199 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
5200 {
5201 	__qdf_nbuf_set_timestamp(buf);
5202 }
5203 
5204 qdf_export_symbol(qdf_nbuf_set_timestamp);
5205 
5206 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
5207 {
5208 	return __qdf_nbuf_get_timestamp(buf);
5209 }
5210 
5211 qdf_export_symbol(qdf_nbuf_get_timestamp);
5212 
5213 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
5214 {
5215 	return __qdf_nbuf_get_timedelta_us(buf);
5216 }
5217 
5218 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
5219 
5220 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
5221 {
5222 	return __qdf_nbuf_get_timedelta_ms(buf);
5223 }
5224 
5225 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
5226 
5227 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
5228 {
5229 	return __qdf_nbuf_net_timedelta(t);
5230 }
5231 
5232 qdf_export_symbol(qdf_nbuf_net_timedelta);
5233 #endif
5234