xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 
25 #include <linux/hashtable.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/proc_fs.h>
31 #include <linux/inetdevice.h>
32 #include <qdf_atomic.h>
33 #include <qdf_debugfs.h>
34 #include <qdf_lock.h>
35 #include <qdf_mem.h>
36 #include <qdf_module.h>
37 #include <qdf_nbuf.h>
38 #include <qdf_status.h>
39 #include "qdf_str.h"
40 #include <qdf_trace.h>
41 #include "qdf_tracker.h"
42 #include <qdf_types.h>
43 #include <net/ieee80211_radiotap.h>
44 #include <pld_common.h>
45 
46 #if defined(FEATURE_TSO)
47 #include <net/ipv6.h>
48 #include <linux/ipv6.h>
49 #include <linux/tcp.h>
50 #include <linux/if_vlan.h>
51 #include <linux/ip.h>
52 #endif /* FEATURE_TSO */
53 
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
55 
56 #define qdf_nbuf_users_inc atomic_inc
57 #define qdf_nbuf_users_dec atomic_dec
58 #define qdf_nbuf_users_set atomic_set
59 #define qdf_nbuf_users_read atomic_read
60 #else
61 #define qdf_nbuf_users_inc refcount_inc
62 #define qdf_nbuf_users_dec refcount_dec
63 #define qdf_nbuf_users_set refcount_set
64 #define qdf_nbuf_users_read refcount_read
65 #endif /* KERNEL_VERSION(4, 13, 0) */
66 
67 #define IEEE80211_RADIOTAP_VHT_BW_20	0
68 #define IEEE80211_RADIOTAP_VHT_BW_40	1
69 #define IEEE80211_RADIOTAP_VHT_BW_80	2
70 #define IEEE80211_RADIOTAP_VHT_BW_160	3
71 
72 #define RADIOTAP_VHT_BW_20	0
73 #define RADIOTAP_VHT_BW_40	1
74 #define RADIOTAP_VHT_BW_80	4
75 #define RADIOTAP_VHT_BW_160	11
76 
77 /* tx status */
78 #define RADIOTAP_TX_STATUS_FAIL		1
79 #define RADIOTAP_TX_STATUS_NOACK	2
80 
81 /* channel number to freq conversion */
82 #define CHANNEL_NUM_14 14
83 #define CHANNEL_NUM_15 15
84 #define CHANNEL_NUM_27 27
85 #define CHANNEL_NUM_35 35
86 #define CHANNEL_NUM_182 182
87 #define CHANNEL_NUM_197 197
88 #define CHANNEL_FREQ_2484 2484
89 #define CHANNEL_FREQ_2407 2407
90 #define CHANNEL_FREQ_2512 2512
91 #define CHANNEL_FREQ_5000 5000
92 #define CHANNEL_FREQ_4000 4000
93 #define CHANNEL_FREQ_5150 5150
94 #define FREQ_MULTIPLIER_CONST_5MHZ 5
95 #define FREQ_MULTIPLIER_CONST_20MHZ 20
96 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
97 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
98 #define RADIOTAP_CCK_CHANNEL 0x0020
99 #define RADIOTAP_OFDM_CHANNEL 0x0040
100 
101 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
102 #include <qdf_mc_timer.h>
103 
104 struct qdf_track_timer {
105 	qdf_mc_timer_t track_timer;
106 	qdf_atomic_t alloc_fail_cnt;
107 };
108 
109 static struct qdf_track_timer alloc_track_timer;
110 
111 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
112 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
113 #endif
114 
115 #ifdef NBUF_MEMORY_DEBUG
116 /* SMMU crash indication*/
117 static qdf_atomic_t smmu_crashed;
118 /* Number of nbuf not added to history*/
119 unsigned long g_histroy_add_drop;
120 #endif
121 
122 /* Packet Counter */
123 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
124 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
125 #ifdef QDF_NBUF_GLOBAL_COUNT
126 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
127 static qdf_atomic_t nbuf_count;
128 #endif
129 
130 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
131 static bool is_initial_mem_debug_disabled;
132 #endif
133 
134 /**
135  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
136  * @data: Pointer to network data buffer
137  *
138  * Get the IP header offset in case of 8021Q and 8021AD
139  * tag is present in L2 header.
140  *
141  * Return: IP header offset
142  */
143 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
144 {
145 	uint16_t ether_type;
146 
147 	ether_type = *(uint16_t *)(data +
148 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
149 
150 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
151 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
152 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
153 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
154 
155 	return QDF_NBUF_TRAC_IP_OFFSET;
156 }
157 
158 /**
159  *  __qdf_nbuf_get_ether_type - Get the ether type
160  * @data: Pointer to network data buffer
161  *
162  * Get the ether type in case of 8021Q and 8021AD tag
163  * is present in L2 header, e.g for the returned ether type
164  * value, if IPV4 data ether type 0x0800, return 0x0008.
165  *
166  * Return ether type.
167  */
168 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
169 {
170 	uint16_t ether_type;
171 
172 	ether_type = *(uint16_t *)(data +
173 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
174 
175 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
176 		ether_type = *(uint16_t *)(data +
177 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
178 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
179 		ether_type = *(uint16_t *)(data +
180 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
181 
182 	return ether_type;
183 }
184 
185 /**
186  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
187  *
188  * Return: none
189  */
190 void qdf_nbuf_tx_desc_count_display(void)
191 {
192 	qdf_debug("Current Snapshot of the Driver:");
193 	qdf_debug("Data Packets:");
194 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
195 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
196 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
198 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
205 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
207 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
211 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
212 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
213 	qdf_debug("Mgmt Packets:");
214 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
215 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
216 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
227 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
228 }
229 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
230 
231 /**
232  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
233  * @packet_type   : packet type either mgmt/data
234  * @current_state : layer at which the packet currently present
235  *
236  * Return: none
237  */
238 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
239 			uint8_t current_state)
240 {
241 	switch (packet_type) {
242 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
243 		nbuf_tx_mgmt[current_state]++;
244 		break;
245 	case QDF_NBUF_TX_PKT_DATA_TRACK:
246 		nbuf_tx_data[current_state]++;
247 		break;
248 	default:
249 		break;
250 	}
251 }
252 
253 /**
254  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
255  *
256  * Return: none
257  */
258 void qdf_nbuf_tx_desc_count_clear(void)
259 {
260 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
261 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
262 }
263 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
264 
265 /**
266  * qdf_nbuf_set_state() - Updates the packet state
267  * @nbuf:            network buffer
268  * @current_state :  layer at which the packet currently is
269  *
270  * This function updates the packet state to the layer at which the packet
271  * currently is
272  *
273  * Return: none
274  */
275 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
276 {
277 	/*
278 	 * Only Mgmt, Data Packets are tracked. WMI messages
279 	 * such as scan commands are not tracked
280 	 */
281 	uint8_t packet_type;
282 
283 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
284 
285 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
286 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
287 		return;
288 	}
289 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
290 	qdf_nbuf_tx_desc_count_update(packet_type,
291 					current_state);
292 }
293 qdf_export_symbol(qdf_nbuf_set_state);
294 
295 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
296 /**
297  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
298  *
299  * This function starts the alloc fail replenish timer.
300  *
301  * Return: void
302  */
303 static inline void __qdf_nbuf_start_replenish_timer(void)
304 {
305 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
306 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
307 	    QDF_TIMER_STATE_RUNNING)
308 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
309 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
310 }
311 
312 /**
313  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
314  *
315  * This function stops the alloc fail replenish timer.
316  *
317  * Return: void
318  */
319 static inline void __qdf_nbuf_stop_replenish_timer(void)
320 {
321 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
322 		return;
323 
324 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
325 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
326 	    QDF_TIMER_STATE_RUNNING)
327 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
328 }
329 
330 /**
331  * qdf_replenish_expire_handler - Replenish expire handler
332  *
333  * This function triggers when the alloc fail replenish timer expires.
334  *
335  * Return: void
336  */
337 static void qdf_replenish_expire_handler(void *arg)
338 {
339 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
340 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
341 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
342 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
343 
344 		/* Error handling here */
345 	}
346 }
347 
348 /**
349  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
350  *
351  * This function initializes the nbuf alloc fail replenish timer.
352  *
353  * Return: void
354  */
355 void __qdf_nbuf_init_replenish_timer(void)
356 {
357 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
358 			  qdf_replenish_expire_handler, NULL);
359 }
360 
361 /**
362  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
363  *
364  * This function deinitializes the nbuf alloc fail replenish timer.
365  *
366  * Return: void
367  */
368 void __qdf_nbuf_deinit_replenish_timer(void)
369 {
370 	__qdf_nbuf_stop_replenish_timer();
371 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
372 }
373 
374 void qdf_nbuf_stop_replenish_timer(void)
375 {
376 	__qdf_nbuf_stop_replenish_timer();
377 }
378 #else
379 
380 static inline void __qdf_nbuf_start_replenish_timer(void) {}
381 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
382 void qdf_nbuf_stop_replenish_timer(void)
383 {
384 }
385 #endif
386 
387 /* globals do not need to be initialized to NULL/0 */
388 qdf_nbuf_trace_update_t qdf_trace_update_cb;
389 qdf_nbuf_free_t nbuf_free_cb;
390 
391 #ifdef QDF_NBUF_GLOBAL_COUNT
392 
393 /**
394  * __qdf_nbuf_count_get() - get nbuf global count
395  *
396  * Return: nbuf global count
397  */
398 int __qdf_nbuf_count_get(void)
399 {
400 	return qdf_atomic_read(&nbuf_count);
401 }
402 qdf_export_symbol(__qdf_nbuf_count_get);
403 
404 /**
405  * __qdf_nbuf_count_inc() - increment nbuf global count
406  *
407  * @buf: sk buff
408  *
409  * Return: void
410  */
411 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
412 {
413 	int num_nbuf = 1;
414 	qdf_nbuf_t ext_list;
415 
416 	if (qdf_likely(is_initial_mem_debug_disabled))
417 		return;
418 
419 	ext_list = qdf_nbuf_get_ext_list(nbuf);
420 
421 	/* Take care to account for frag_list */
422 	while (ext_list) {
423 		++num_nbuf;
424 		ext_list = qdf_nbuf_queue_next(ext_list);
425 	}
426 
427 	qdf_atomic_add(num_nbuf, &nbuf_count);
428 }
429 qdf_export_symbol(__qdf_nbuf_count_inc);
430 
431 /**
432  * __qdf_nbuf_count_dec() - decrement nbuf global count
433  *
434  * @buf: sk buff
435  *
436  * Return: void
437  */
438 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
439 {
440 	qdf_nbuf_t ext_list;
441 	int num_nbuf;
442 
443 	if (qdf_likely(is_initial_mem_debug_disabled))
444 		return;
445 
446 	if (qdf_nbuf_get_users(nbuf) > 1)
447 		return;
448 
449 	num_nbuf = 1;
450 
451 	/* Take care to account for frag_list */
452 	ext_list = qdf_nbuf_get_ext_list(nbuf);
453 	while (ext_list) {
454 		if (qdf_nbuf_get_users(ext_list) == 1)
455 			++num_nbuf;
456 		ext_list = qdf_nbuf_queue_next(ext_list);
457 	}
458 
459 	qdf_atomic_sub(num_nbuf, &nbuf_count);
460 }
461 qdf_export_symbol(__qdf_nbuf_count_dec);
462 #endif
463 
464 #ifdef NBUF_FRAG_MEMORY_DEBUG
465 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
466 {
467 	qdf_nbuf_t ext_list;
468 	uint32_t num_nr_frags;
469 	uint32_t total_num_nr_frags;
470 
471 	if (qdf_likely(is_initial_mem_debug_disabled))
472 		return;
473 
474 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
475 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
476 
477 	total_num_nr_frags = num_nr_frags;
478 
479 	/* Take into account the frags attached to frag_list */
480 	ext_list = qdf_nbuf_get_ext_list(nbuf);
481 	while (ext_list) {
482 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
483 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
484 		total_num_nr_frags += num_nr_frags;
485 		ext_list = qdf_nbuf_queue_next(ext_list);
486 	}
487 
488 	qdf_frag_count_inc(total_num_nr_frags);
489 }
490 
491 qdf_export_symbol(qdf_nbuf_frag_count_inc);
492 
493 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
494 {
495 	qdf_nbuf_t ext_list;
496 	uint32_t num_nr_frags;
497 	uint32_t total_num_nr_frags;
498 
499 	if (qdf_likely(is_initial_mem_debug_disabled))
500 		return;
501 
502 	if (qdf_nbuf_get_users(nbuf) > 1)
503 		return;
504 
505 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
506 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
507 
508 	total_num_nr_frags = num_nr_frags;
509 
510 	/* Take into account the frags attached to frag_list */
511 	ext_list = qdf_nbuf_get_ext_list(nbuf);
512 	while (ext_list) {
513 		if (qdf_nbuf_get_users(ext_list) == 1) {
514 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
515 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
516 			total_num_nr_frags += num_nr_frags;
517 		}
518 		ext_list = qdf_nbuf_queue_next(ext_list);
519 	}
520 
521 	qdf_frag_count_dec(total_num_nr_frags);
522 }
523 
524 qdf_export_symbol(qdf_nbuf_frag_count_dec);
525 
526 #endif
527 
528 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
529 	!defined(QCA_WIFI_QCN9000)
530 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
531 				 int align, int prio, const char *func,
532 				 uint32_t line)
533 {
534 	struct sk_buff *skb;
535 	unsigned long offset;
536 	uint32_t lowmem_alloc_tries = 0;
537 
538 	if (align)
539 		size += (align - 1);
540 
541 realloc:
542 	skb = dev_alloc_skb(size);
543 
544 	if (skb)
545 		goto skb_alloc;
546 
547 	skb = pld_nbuf_pre_alloc(size);
548 
549 	if (!skb) {
550 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
551 				size, func, line);
552 		return NULL;
553 	}
554 
555 skb_alloc:
556 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
557 	 * Though we are trying to reserve low memory upfront to prevent this,
558 	 * we sometimes see SKBs allocated from low memory.
559 	 */
560 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
561 		lowmem_alloc_tries++;
562 		if (lowmem_alloc_tries > 100) {
563 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
564 				     size, func, line);
565 			return NULL;
566 		} else {
567 			/* Not freeing to make sure it
568 			 * will not get allocated again
569 			 */
570 			goto realloc;
571 		}
572 	}
573 	memset(skb->cb, 0x0, sizeof(skb->cb));
574 
575 	/*
576 	 * The default is for netbuf fragments to be interpreted
577 	 * as wordstreams rather than bytestreams.
578 	 */
579 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
580 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
581 
582 	/*
583 	 * XXX:how about we reserve first then align
584 	 * Align & make sure that the tail & data are adjusted properly
585 	 */
586 
587 	if (align) {
588 		offset = ((unsigned long)skb->data) % align;
589 		if (offset)
590 			skb_reserve(skb, align - offset);
591 	}
592 
593 	/*
594 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
595 	 * pointer
596 	 */
597 	skb_reserve(skb, reserve);
598 	qdf_nbuf_count_inc(skb);
599 
600 	return skb;
601 }
602 #else
603 
604 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
605 				 int align, int prio, const char *func,
606 				 uint32_t line)
607 {
608 	struct sk_buff *skb;
609 	unsigned long offset;
610 	int flags = GFP_KERNEL;
611 
612 	if (align)
613 		size += (align - 1);
614 
615 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
616 		flags = GFP_ATOMIC;
617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
618 		/*
619 		 * Observed that kcompactd burns out CPU to make order-3 page.
620 		 *__netdev_alloc_skb has 4k page fallback option just in case of
621 		 * failing high order page allocation so we don't need to be
622 		 * hard. Make kcompactd rest in piece.
623 		 */
624 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
625 #endif
626 	}
627 
628 	skb = __netdev_alloc_skb(NULL, size, flags);
629 
630 	if (skb)
631 		goto skb_alloc;
632 
633 	skb = pld_nbuf_pre_alloc(size);
634 
635 	if (!skb) {
636 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
637 				size, func, line);
638 		__qdf_nbuf_start_replenish_timer();
639 		return NULL;
640 	} else {
641 		__qdf_nbuf_stop_replenish_timer();
642 	}
643 
644 skb_alloc:
645 	memset(skb->cb, 0x0, sizeof(skb->cb));
646 
647 	/*
648 	 * The default is for netbuf fragments to be interpreted
649 	 * as wordstreams rather than bytestreams.
650 	 */
651 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
652 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
653 
654 	/*
655 	 * XXX:how about we reserve first then align
656 	 * Align & make sure that the tail & data are adjusted properly
657 	 */
658 
659 	if (align) {
660 		offset = ((unsigned long)skb->data) % align;
661 		if (offset)
662 			skb_reserve(skb, align - offset);
663 	}
664 
665 	/*
666 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
667 	 * pointer
668 	 */
669 	skb_reserve(skb, reserve);
670 	qdf_nbuf_count_inc(skb);
671 
672 	return skb;
673 }
674 #endif
675 qdf_export_symbol(__qdf_nbuf_alloc);
676 
677 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
678 					  const char *func, uint32_t line)
679 {
680 	qdf_nbuf_t nbuf;
681 	unsigned long offset;
682 
683 	if (align)
684 		size += (align - 1);
685 
686 	nbuf = alloc_skb(size, GFP_ATOMIC);
687 	if (!nbuf)
688 		goto ret_nbuf;
689 
690 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
691 
692 	skb_reserve(nbuf, reserve);
693 
694 	if (align) {
695 		offset = ((unsigned long)nbuf->data) % align;
696 		if (offset)
697 			skb_reserve(nbuf, align - offset);
698 	}
699 
700 	qdf_nbuf_count_inc(nbuf);
701 
702 ret_nbuf:
703 	return nbuf;
704 }
705 
706 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
707 
708 /**
709  * __qdf_nbuf_free() - free the nbuf its interrupt safe
710  * @skb: Pointer to network buffer
711  *
712  * Return: none
713  */
714 
715 void __qdf_nbuf_free(struct sk_buff *skb)
716 {
717 	if (pld_nbuf_pre_alloc_free(skb))
718 		return;
719 
720 	qdf_nbuf_frag_count_dec(skb);
721 
722 	qdf_nbuf_count_dec(skb);
723 	if (nbuf_free_cb)
724 		nbuf_free_cb(skb);
725 	else
726 		dev_kfree_skb_any(skb);
727 }
728 
729 qdf_export_symbol(__qdf_nbuf_free);
730 
731 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
732 {
733 	qdf_nbuf_t skb_new = NULL;
734 
735 	skb_new = skb_clone(skb, GFP_ATOMIC);
736 	if (skb_new) {
737 		qdf_nbuf_frag_count_inc(skb_new);
738 		qdf_nbuf_count_inc(skb_new);
739 	}
740 	return skb_new;
741 }
742 
743 qdf_export_symbol(__qdf_nbuf_clone);
744 
745 #ifdef NBUF_MEMORY_DEBUG
746 struct qdf_nbuf_event {
747 	qdf_nbuf_t nbuf;
748 	char func[QDF_MEM_FUNC_NAME_SIZE];
749 	uint32_t line;
750 	enum qdf_nbuf_event_type type;
751 	uint64_t timestamp;
752 	qdf_dma_addr_t iova;
753 };
754 
755 #ifndef QDF_NBUF_HISTORY_SIZE
756 #define QDF_NBUF_HISTORY_SIZE 4096
757 #endif
758 static qdf_atomic_t qdf_nbuf_history_index;
759 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
760 
761 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
762 {
763 	int32_t next = qdf_atomic_inc_return(index);
764 
765 	if (next == size)
766 		qdf_atomic_sub(size, index);
767 
768 	return next % size;
769 }
770 
771 void
772 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
773 		     enum qdf_nbuf_event_type type)
774 {
775 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
776 						   QDF_NBUF_HISTORY_SIZE);
777 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
778 
779 	if (qdf_atomic_read(&smmu_crashed)) {
780 		g_histroy_add_drop++;
781 		return;
782 	}
783 
784 	event->nbuf = nbuf;
785 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
786 	event->line = line;
787 	event->type = type;
788 	event->timestamp = qdf_get_log_timestamp();
789 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP)
790 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
791 	else
792 		event->iova = 0;
793 }
794 
795 void qdf_set_smmu_fault_state(bool smmu_fault_state)
796 {
797 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
798 	if (!smmu_fault_state)
799 		g_histroy_add_drop = 0;
800 }
801 qdf_export_symbol(qdf_set_smmu_fault_state);
802 #endif /* NBUF_MEMORY_DEBUG */
803 
804 #ifdef NBUF_MAP_UNMAP_DEBUG
805 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
806 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
807 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
808 
809 static void qdf_nbuf_map_tracking_init(void)
810 {
811 	qdf_tracker_init(&qdf_nbuf_map_tracker);
812 }
813 
814 static void qdf_nbuf_map_tracking_deinit(void)
815 {
816 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
817 }
818 
819 static QDF_STATUS
820 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
821 {
822 	if (is_initial_mem_debug_disabled)
823 		return QDF_STATUS_SUCCESS;
824 
825 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
826 }
827 
828 static void
829 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
830 {
831 	if (is_initial_mem_debug_disabled)
832 		return;
833 
834 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
835 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
836 }
837 
838 void qdf_nbuf_map_check_for_leaks(void)
839 {
840 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
841 }
842 
843 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
844 			      qdf_nbuf_t buf,
845 			      qdf_dma_dir_t dir,
846 			      const char *func,
847 			      uint32_t line)
848 {
849 	QDF_STATUS status;
850 
851 	status = qdf_nbuf_track_map(buf, func, line);
852 	if (QDF_IS_STATUS_ERROR(status))
853 		return status;
854 
855 	status = __qdf_nbuf_map(osdev, buf, dir);
856 	if (QDF_IS_STATUS_ERROR(status)) {
857 		qdf_nbuf_untrack_map(buf, func, line);
858 	} else {
859 		if (!is_initial_mem_debug_disabled)
860 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
861 		qdf_net_buf_debug_update_map_node(buf, func, line);
862 	}
863 
864 	return status;
865 }
866 
867 qdf_export_symbol(qdf_nbuf_map_debug);
868 
869 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
870 			  qdf_nbuf_t buf,
871 			  qdf_dma_dir_t dir,
872 			  const char *func,
873 			  uint32_t line)
874 {
875 	qdf_nbuf_untrack_map(buf, func, line);
876 	__qdf_nbuf_unmap_single(osdev, buf, dir);
877 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
878 }
879 
880 qdf_export_symbol(qdf_nbuf_unmap_debug);
881 
882 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
883 				     qdf_nbuf_t buf,
884 				     qdf_dma_dir_t dir,
885 				     const char *func,
886 				     uint32_t line)
887 {
888 	QDF_STATUS status;
889 
890 	status = qdf_nbuf_track_map(buf, func, line);
891 	if (QDF_IS_STATUS_ERROR(status))
892 		return status;
893 
894 	status = __qdf_nbuf_map_single(osdev, buf, dir);
895 	if (QDF_IS_STATUS_ERROR(status)) {
896 		qdf_nbuf_untrack_map(buf, func, line);
897 	} else {
898 		if (!is_initial_mem_debug_disabled)
899 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
900 		qdf_net_buf_debug_update_map_node(buf, func, line);
901 	}
902 
903 	return status;
904 }
905 
906 qdf_export_symbol(qdf_nbuf_map_single_debug);
907 
908 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
909 				 qdf_nbuf_t buf,
910 				 qdf_dma_dir_t dir,
911 				 const char *func,
912 				 uint32_t line)
913 {
914 	qdf_nbuf_untrack_map(buf, func, line);
915 	__qdf_nbuf_unmap_single(osdev, buf, dir);
916 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
917 }
918 
919 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
920 
921 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
922 				     qdf_nbuf_t buf,
923 				     qdf_dma_dir_t dir,
924 				     int nbytes,
925 				     const char *func,
926 				     uint32_t line)
927 {
928 	QDF_STATUS status;
929 
930 	status = qdf_nbuf_track_map(buf, func, line);
931 	if (QDF_IS_STATUS_ERROR(status))
932 		return status;
933 
934 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
935 	if (QDF_IS_STATUS_ERROR(status)) {
936 		qdf_nbuf_untrack_map(buf, func, line);
937 	} else {
938 		if (!is_initial_mem_debug_disabled)
939 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
940 		qdf_net_buf_debug_update_map_node(buf, func, line);
941 	}
942 
943 	return status;
944 }
945 
946 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
947 
948 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
949 				 qdf_nbuf_t buf,
950 				 qdf_dma_dir_t dir,
951 				 int nbytes,
952 				 const char *func,
953 				 uint32_t line)
954 {
955 	qdf_nbuf_untrack_map(buf, func, line);
956 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
957 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
958 }
959 
960 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
961 
962 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
963 					    qdf_nbuf_t buf,
964 					    qdf_dma_dir_t dir,
965 					    int nbytes,
966 					    const char *func,
967 					    uint32_t line)
968 {
969 	QDF_STATUS status;
970 
971 	status = qdf_nbuf_track_map(buf, func, line);
972 	if (QDF_IS_STATUS_ERROR(status))
973 		return status;
974 
975 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
976 	if (QDF_IS_STATUS_ERROR(status)) {
977 		qdf_nbuf_untrack_map(buf, func, line);
978 	} else {
979 		if (!is_initial_mem_debug_disabled)
980 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
981 		qdf_net_buf_debug_update_map_node(buf, func, line);
982 	}
983 
984 	return status;
985 }
986 
987 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
988 
989 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
990 					qdf_nbuf_t buf,
991 					qdf_dma_dir_t dir,
992 					int nbytes,
993 					const char *func,
994 					uint32_t line)
995 {
996 	qdf_nbuf_untrack_map(buf, func, line);
997 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
998 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
999 }
1000 
1001 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1002 
1003 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1004 					      qdf_nbuf_t buf,
1005 					      qdf_dma_addr_t phy_addr,
1006 					      qdf_dma_dir_t dir, int nbytes,
1007 					      const char *func, uint32_t line)
1008 {
1009 	qdf_nbuf_untrack_map(buf, func, line);
1010 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1011 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1012 }
1013 
1014 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1015 
1016 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1017 					     const char *func,
1018 					     uint32_t line)
1019 {
1020 	char map_func[QDF_TRACKER_FUNC_SIZE];
1021 	uint32_t map_line;
1022 
1023 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1024 				&map_func, &map_line))
1025 		return;
1026 
1027 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1028 			   func, line, map_func, map_line);
1029 }
1030 #else
1031 static inline void qdf_nbuf_map_tracking_init(void)
1032 {
1033 }
1034 
1035 static inline void qdf_nbuf_map_tracking_deinit(void)
1036 {
1037 }
1038 
1039 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1040 						    const char *func,
1041 						    uint32_t line)
1042 {
1043 }
1044 #endif /* NBUF_MAP_UNMAP_DEBUG */
1045 
1046 /**
1047  * __qdf_nbuf_map() - map a buffer to local bus address space
1048  * @osdev: OS device
1049  * @bmap: Bitmap
1050  * @skb: Pointer to network buffer
1051  * @dir: Direction
1052  *
1053  * Return: QDF_STATUS
1054  */
1055 #ifdef QDF_OS_DEBUG
1056 QDF_STATUS
1057 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1058 {
1059 	struct skb_shared_info *sh = skb_shinfo(skb);
1060 
1061 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1062 			|| (dir == QDF_DMA_FROM_DEVICE));
1063 
1064 	/*
1065 	 * Assume there's only a single fragment.
1066 	 * To support multiple fragments, it would be necessary to change
1067 	 * qdf_nbuf_t to be a separate object that stores meta-info
1068 	 * (including the bus address for each fragment) and a pointer
1069 	 * to the underlying sk_buff.
1070 	 */
1071 	qdf_assert(sh->nr_frags == 0);
1072 
1073 	return __qdf_nbuf_map_single(osdev, skb, dir);
1074 }
1075 qdf_export_symbol(__qdf_nbuf_map);
1076 
1077 #else
1078 QDF_STATUS
1079 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1080 {
1081 	return __qdf_nbuf_map_single(osdev, skb, dir);
1082 }
1083 qdf_export_symbol(__qdf_nbuf_map);
1084 #endif
1085 /**
1086  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1087  * @osdev: OS device
1088  * @skb: Pointer to network buffer
1089  * @dir: dma direction
1090  *
1091  * Return: none
1092  */
1093 void
1094 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1095 			qdf_dma_dir_t dir)
1096 {
1097 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1098 		   || (dir == QDF_DMA_FROM_DEVICE));
1099 
1100 	/*
1101 	 * Assume there's a single fragment.
1102 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1103 	 */
1104 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1105 }
1106 qdf_export_symbol(__qdf_nbuf_unmap);
1107 
1108 /**
1109  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1110  * @osdev: OS device
1111  * @skb: Pointer to network buffer
1112  * @dir: Direction
1113  *
1114  * Return: QDF_STATUS
1115  */
1116 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1117 QDF_STATUS
1118 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1119 {
1120 	qdf_dma_addr_t paddr;
1121 
1122 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1123 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1124 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1125 	return QDF_STATUS_SUCCESS;
1126 }
1127 qdf_export_symbol(__qdf_nbuf_map_single);
1128 #else
1129 QDF_STATUS
1130 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1131 {
1132 	qdf_dma_addr_t paddr;
1133 
1134 	/* assume that the OS only provides a single fragment */
1135 	QDF_NBUF_CB_PADDR(buf) = paddr =
1136 		dma_map_single(osdev->dev, buf->data,
1137 				skb_end_pointer(buf) - buf->data,
1138 				__qdf_dma_dir_to_os(dir));
1139 	__qdf_record_nbuf_nbytes(
1140 		__qdf_nbuf_get_end_offset(buf), dir, true);
1141 	return dma_mapping_error(osdev->dev, paddr)
1142 		? QDF_STATUS_E_FAILURE
1143 		: QDF_STATUS_SUCCESS;
1144 }
1145 qdf_export_symbol(__qdf_nbuf_map_single);
1146 #endif
1147 /**
1148  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1149  * @osdev: OS device
1150  * @skb: Pointer to network buffer
1151  * @dir: Direction
1152  *
1153  * Return: none
1154  */
1155 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1156 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1157 				qdf_dma_dir_t dir)
1158 {
1159 }
1160 #else
1161 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1162 					qdf_dma_dir_t dir)
1163 {
1164 	if (QDF_NBUF_CB_PADDR(buf)) {
1165 		__qdf_record_nbuf_nbytes(
1166 			__qdf_nbuf_get_end_offset(buf), dir, false);
1167 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1168 			skb_end_pointer(buf) - buf->data,
1169 			__qdf_dma_dir_to_os(dir));
1170 	}
1171 }
1172 #endif
1173 qdf_export_symbol(__qdf_nbuf_unmap_single);
1174 
1175 /**
1176  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1177  * @skb: Pointer to network buffer
1178  * @cksum: Pointer to checksum value
1179  *
1180  * Return: QDF_STATUS
1181  */
1182 QDF_STATUS
1183 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1184 {
1185 	switch (cksum->l4_result) {
1186 	case QDF_NBUF_RX_CKSUM_NONE:
1187 		skb->ip_summed = CHECKSUM_NONE;
1188 		break;
1189 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1190 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1191 		break;
1192 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1193 		skb->ip_summed = CHECKSUM_PARTIAL;
1194 		skb->csum = cksum->val;
1195 		break;
1196 	default:
1197 		pr_err("Unknown checksum type\n");
1198 		qdf_assert(0);
1199 		return QDF_STATUS_E_NOSUPPORT;
1200 	}
1201 	return QDF_STATUS_SUCCESS;
1202 }
1203 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1204 
1205 /**
1206  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1207  * @skb: Pointer to network buffer
1208  *
1209  * Return: TX checksum value
1210  */
1211 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1212 {
1213 	switch (skb->ip_summed) {
1214 	case CHECKSUM_NONE:
1215 		return QDF_NBUF_TX_CKSUM_NONE;
1216 	case CHECKSUM_PARTIAL:
1217 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1218 	case CHECKSUM_COMPLETE:
1219 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1220 	default:
1221 		return QDF_NBUF_TX_CKSUM_NONE;
1222 	}
1223 }
1224 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1225 
1226 /**
1227  * __qdf_nbuf_get_tid() - get tid
1228  * @skb: Pointer to network buffer
1229  *
1230  * Return: tid
1231  */
1232 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1233 {
1234 	return skb->priority;
1235 }
1236 qdf_export_symbol(__qdf_nbuf_get_tid);
1237 
1238 /**
1239  * __qdf_nbuf_set_tid() - set tid
1240  * @skb: Pointer to network buffer
1241  *
1242  * Return: none
1243  */
1244 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1245 {
1246 	skb->priority = tid;
1247 }
1248 qdf_export_symbol(__qdf_nbuf_set_tid);
1249 
1250 /**
1251  * __qdf_nbuf_set_tid() - set tid
1252  * @skb: Pointer to network buffer
1253  *
1254  * Return: none
1255  */
1256 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1257 {
1258 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1259 }
1260 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1261 
1262 /**
1263  * __qdf_nbuf_reg_trace_cb() - register trace callback
1264  * @cb_func_ptr: Pointer to trace callback function
1265  *
1266  * Return: none
1267  */
1268 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1269 {
1270 	qdf_trace_update_cb = cb_func_ptr;
1271 }
1272 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1273 
1274 /**
1275  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1276  *              of DHCP packet.
1277  * @data: Pointer to DHCP packet data buffer
1278  *
1279  * This func. returns the subtype of DHCP packet.
1280  *
1281  * Return: subtype of the DHCP packet.
1282  */
1283 enum qdf_proto_subtype
1284 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1285 {
1286 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1287 
1288 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1289 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1290 					QDF_DHCP_OPTION53_LENGTH)) {
1291 
1292 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1293 		case QDF_DHCP_DISCOVER:
1294 			subtype = QDF_PROTO_DHCP_DISCOVER;
1295 			break;
1296 		case QDF_DHCP_REQUEST:
1297 			subtype = QDF_PROTO_DHCP_REQUEST;
1298 			break;
1299 		case QDF_DHCP_OFFER:
1300 			subtype = QDF_PROTO_DHCP_OFFER;
1301 			break;
1302 		case QDF_DHCP_ACK:
1303 			subtype = QDF_PROTO_DHCP_ACK;
1304 			break;
1305 		case QDF_DHCP_NAK:
1306 			subtype = QDF_PROTO_DHCP_NACK;
1307 			break;
1308 		case QDF_DHCP_RELEASE:
1309 			subtype = QDF_PROTO_DHCP_RELEASE;
1310 			break;
1311 		case QDF_DHCP_INFORM:
1312 			subtype = QDF_PROTO_DHCP_INFORM;
1313 			break;
1314 		case QDF_DHCP_DECLINE:
1315 			subtype = QDF_PROTO_DHCP_DECLINE;
1316 			break;
1317 		default:
1318 			break;
1319 		}
1320 	}
1321 
1322 	return subtype;
1323 }
1324 
1325 /**
1326  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype
1327  *            of EAPOL packet.
1328  * @data: Pointer to EAPOL packet data buffer
1329  *
1330  * This func. returns the subtype of EAPOL packet.
1331  *
1332  * Return: subtype of the EAPOL packet.
1333  */
1334 enum qdf_proto_subtype
1335 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1336 {
1337 	uint16_t eapol_key_info;
1338 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1339 	uint16_t mask;
1340 
1341 	eapol_key_info = (uint16_t)(*(uint16_t *)
1342 			(data + EAPOL_KEY_INFO_OFFSET));
1343 
1344 	mask = eapol_key_info & EAPOL_MASK;
1345 	switch (mask) {
1346 	case EAPOL_M1_BIT_MASK:
1347 		subtype = QDF_PROTO_EAPOL_M1;
1348 		break;
1349 	case EAPOL_M2_BIT_MASK:
1350 		subtype = QDF_PROTO_EAPOL_M2;
1351 		break;
1352 	case EAPOL_M3_BIT_MASK:
1353 		subtype = QDF_PROTO_EAPOL_M3;
1354 		break;
1355 	case EAPOL_M4_BIT_MASK:
1356 		subtype = QDF_PROTO_EAPOL_M4;
1357 		break;
1358 	default:
1359 		break;
1360 	}
1361 
1362 	return subtype;
1363 }
1364 
1365 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1366 
1367 /**
1368  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1369  *            of ARP packet.
1370  * @data: Pointer to ARP packet data buffer
1371  *
1372  * This func. returns the subtype of ARP packet.
1373  *
1374  * Return: subtype of the ARP packet.
1375  */
1376 enum qdf_proto_subtype
1377 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1378 {
1379 	uint16_t subtype;
1380 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1381 
1382 	subtype = (uint16_t)(*(uint16_t *)
1383 			(data + ARP_SUB_TYPE_OFFSET));
1384 
1385 	switch (QDF_SWAP_U16(subtype)) {
1386 	case ARP_REQUEST:
1387 		proto_subtype = QDF_PROTO_ARP_REQ;
1388 		break;
1389 	case ARP_RESPONSE:
1390 		proto_subtype = QDF_PROTO_ARP_RES;
1391 		break;
1392 	default:
1393 		break;
1394 	}
1395 
1396 	return proto_subtype;
1397 }
1398 
1399 /**
1400  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1401  *            of IPV4 ICMP packet.
1402  * @data: Pointer to IPV4 ICMP packet data buffer
1403  *
1404  * This func. returns the subtype of ICMP packet.
1405  *
1406  * Return: subtype of the ICMP packet.
1407  */
1408 enum qdf_proto_subtype
1409 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1410 {
1411 	uint8_t subtype;
1412 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1413 
1414 	subtype = (uint8_t)(*(uint8_t *)
1415 			(data + ICMP_SUBTYPE_OFFSET));
1416 
1417 	switch (subtype) {
1418 	case ICMP_REQUEST:
1419 		proto_subtype = QDF_PROTO_ICMP_REQ;
1420 		break;
1421 	case ICMP_RESPONSE:
1422 		proto_subtype = QDF_PROTO_ICMP_RES;
1423 		break;
1424 	default:
1425 		break;
1426 	}
1427 
1428 	return proto_subtype;
1429 }
1430 
1431 /**
1432  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1433  *            of IPV6 ICMPV6 packet.
1434  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1435  *
1436  * This func. returns the subtype of ICMPV6 packet.
1437  *
1438  * Return: subtype of the ICMPV6 packet.
1439  */
1440 enum qdf_proto_subtype
1441 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1442 {
1443 	uint8_t subtype;
1444 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1445 
1446 	subtype = (uint8_t)(*(uint8_t *)
1447 			(data + ICMPV6_SUBTYPE_OFFSET));
1448 
1449 	switch (subtype) {
1450 	case ICMPV6_REQUEST:
1451 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1452 		break;
1453 	case ICMPV6_RESPONSE:
1454 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1455 		break;
1456 	case ICMPV6_RS:
1457 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1458 		break;
1459 	case ICMPV6_RA:
1460 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1461 		break;
1462 	case ICMPV6_NS:
1463 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1464 		break;
1465 	case ICMPV6_NA:
1466 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1467 		break;
1468 	default:
1469 		break;
1470 	}
1471 
1472 	return proto_subtype;
1473 }
1474 
1475 /**
1476  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1477  *            of IPV4 packet.
1478  * @data: Pointer to IPV4 packet data buffer
1479  *
1480  * This func. returns the proto type of IPV4 packet.
1481  *
1482  * Return: proto type of IPV4 packet.
1483  */
1484 uint8_t
1485 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1486 {
1487 	uint8_t proto_type;
1488 
1489 	proto_type = (uint8_t)(*(uint8_t *)(data +
1490 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1491 	return proto_type;
1492 }
1493 
1494 /**
1495  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1496  *            of IPV6 packet.
1497  * @data: Pointer to IPV6 packet data buffer
1498  *
1499  * This func. returns the proto type of IPV6 packet.
1500  *
1501  * Return: proto type of IPV6 packet.
1502  */
1503 uint8_t
1504 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1505 {
1506 	uint8_t proto_type;
1507 
1508 	proto_type = (uint8_t)(*(uint8_t *)(data +
1509 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1510 	return proto_type;
1511 }
1512 
1513 /**
1514  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1515  * @data: Pointer to network data
1516  *
1517  * This api is for Tx packets.
1518  *
1519  * Return: true if packet is ipv4 packet
1520  *	   false otherwise
1521  */
1522 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1523 {
1524 	uint16_t ether_type;
1525 
1526 	ether_type = (uint16_t)(*(uint16_t *)(data +
1527 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1528 
1529 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1530 		return true;
1531 	else
1532 		return false;
1533 }
1534 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1535 
1536 /**
1537  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1538  * @data: Pointer to network data buffer
1539  *
1540  * This api is for ipv4 packet.
1541  *
1542  * Return: true if packet is DHCP packet
1543  *	   false otherwise
1544  */
1545 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1546 {
1547 	uint16_t sport;
1548 	uint16_t dport;
1549 	uint8_t ipv4_offset;
1550 	uint8_t ipv4_hdr_len;
1551 	struct iphdr *iphdr;
1552 
1553 	if (__qdf_nbuf_get_ether_type(data) !=
1554 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1555 		return false;
1556 
1557 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1558 	iphdr = (struct iphdr *)(data + ipv4_offset);
1559 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1560 
1561 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1562 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1563 			      sizeof(uint16_t));
1564 
1565 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1566 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1567 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1568 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1569 		return true;
1570 	else
1571 		return false;
1572 }
1573 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1574 
1575 /**
1576  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1577  * @data: Pointer to network data buffer
1578  *
1579  * This api is for ipv4 packet.
1580  *
1581  * Return: true if packet is EAPOL packet
1582  *	   false otherwise.
1583  */
1584 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1585 {
1586 	uint16_t ether_type;
1587 
1588 	ether_type = __qdf_nbuf_get_ether_type(data);
1589 
1590 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1591 		return true;
1592 	else
1593 		return false;
1594 }
1595 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1596 
1597 /**
1598  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1599  * @skb: Pointer to network buffer
1600  *
1601  * This api is for ipv4 packet.
1602  *
1603  * Return: true if packet is WAPI packet
1604  *	   false otherwise.
1605  */
1606 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1607 {
1608 	uint16_t ether_type;
1609 
1610 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1611 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1612 
1613 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1614 		return true;
1615 	else
1616 		return false;
1617 }
1618 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1619 
1620 /**
1621  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
1622  * @data: Pointer to network data buffer
1623  *
1624  * This api is for ipv4 packet.
1625  *
1626  * Return: true if packet is igmp packet
1627  *	   false otherwise.
1628  */
1629 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1630 {
1631 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1632 		uint8_t pkt_type;
1633 
1634 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1635 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1636 
1637 		if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
1638 			return true;
1639 	}
1640 	return false;
1641 }
1642 
1643 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
1644 
1645 /**
1646  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
1647  * @data: Pointer to network data buffer
1648  *
1649  * This api is for ipv6 packet.
1650  *
1651  * Return: true if packet is igmp packet
1652  *	   false otherwise.
1653  */
1654 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
1655 {
1656 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
1657 		uint8_t pkt_type;
1658 		uint8_t next_hdr;
1659 
1660 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1661 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1662 		next_hdr = (uint8_t)(*(uint8_t *)(data +
1663 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
1664 
1665 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
1666 			return true;
1667 		if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
1668 		    (next_hdr == QDF_NBUF_TRAC_HOPOPTS_TYPE))
1669 			return true;
1670 	}
1671 	return false;
1672 }
1673 
1674 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
1675 
1676 /**
1677  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
1678  * @skb: Pointer to network buffer
1679  *
1680  * This api is for ipv4 packet.
1681  *
1682  * Return: true if packet is tdls packet
1683  *	   false otherwise.
1684  */
1685 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
1686 {
1687 	uint16_t ether_type;
1688 
1689 	ether_type = *(uint16_t *)(skb->data +
1690 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1691 
1692 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
1693 		return true;
1694 	else
1695 		return false;
1696 }
1697 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
1698 
1699 /**
1700  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
1701  * @data: Pointer to network data buffer
1702  *
1703  * This api is for ipv4 packet.
1704  *
1705  * Return: true if packet is ARP packet
1706  *	   false otherwise.
1707  */
1708 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
1709 {
1710 	uint16_t ether_type;
1711 
1712 	ether_type = __qdf_nbuf_get_ether_type(data);
1713 
1714 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
1715 		return true;
1716 	else
1717 		return false;
1718 }
1719 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
1720 
1721 /**
1722  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
1723  * @data: Pointer to network data buffer
1724  *
1725  * This api is for ipv4 packet.
1726  *
1727  * Return: true if packet is ARP request
1728  *	   false otherwise.
1729  */
1730 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
1731 {
1732 	uint16_t op_code;
1733 
1734 	op_code = (uint16_t)(*(uint16_t *)(data +
1735 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1736 
1737 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
1738 		return true;
1739 	return false;
1740 }
1741 
1742 /**
1743  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
1744  * @data: Pointer to network data buffer
1745  *
1746  * This api is for ipv4 packet.
1747  *
1748  * Return: true if packet is ARP response
1749  *	   false otherwise.
1750  */
1751 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
1752 {
1753 	uint16_t op_code;
1754 
1755 	op_code = (uint16_t)(*(uint16_t *)(data +
1756 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
1757 
1758 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
1759 		return true;
1760 	return false;
1761 }
1762 
1763 /**
1764  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
1765  * @data: Pointer to network data buffer
1766  *
1767  * This api is for ipv4 packet.
1768  *
1769  * Return: ARP packet source IP value.
1770  */
1771 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
1772 {
1773 	uint32_t src_ip;
1774 
1775 	src_ip = (uint32_t)(*(uint32_t *)(data +
1776 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
1777 
1778 	return src_ip;
1779 }
1780 
1781 /**
1782  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
1783  * @data: Pointer to network data buffer
1784  *
1785  * This api is for ipv4 packet.
1786  *
1787  * Return: ARP packet target IP value.
1788  */
1789 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
1790 {
1791 	uint32_t tgt_ip;
1792 
1793 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
1794 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
1795 
1796 	return tgt_ip;
1797 }
1798 
1799 /**
1800  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
1801  * @data: Pointer to network data buffer
1802  * @len: length to copy
1803  *
1804  * This api is for dns domain name
1805  *
1806  * Return: dns domain name.
1807  */
1808 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
1809 {
1810 	uint8_t *domain_name;
1811 
1812 	domain_name = (uint8_t *)
1813 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
1814 	return domain_name;
1815 }
1816 
1817 
1818 /**
1819  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
1820  * @data: Pointer to network data buffer
1821  *
1822  * This api is for dns query packet.
1823  *
1824  * Return: true if packet is dns query packet.
1825  *	   false otherwise.
1826  */
1827 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
1828 {
1829 	uint16_t op_code;
1830 	uint16_t tgt_port;
1831 
1832 	tgt_port = (uint16_t)(*(uint16_t *)(data +
1833 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
1834 	/* Standard DNS query always happen on Dest Port 53. */
1835 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1836 		op_code = (uint16_t)(*(uint16_t *)(data +
1837 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1838 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1839 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
1840 			return true;
1841 	}
1842 	return false;
1843 }
1844 
1845 /**
1846  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
1847  * @data: Pointer to network data buffer
1848  *
1849  * This api is for dns query response.
1850  *
1851  * Return: true if packet is dns response packet.
1852  *	   false otherwise.
1853  */
1854 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
1855 {
1856 	uint16_t op_code;
1857 	uint16_t src_port;
1858 
1859 	src_port = (uint16_t)(*(uint16_t *)(data +
1860 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
1861 	/* Standard DNS response always comes on Src Port 53. */
1862 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
1863 		op_code = (uint16_t)(*(uint16_t *)(data +
1864 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
1865 
1866 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
1867 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
1868 			return true;
1869 	}
1870 	return false;
1871 }
1872 
1873 /**
1874  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
1875  * @data: Pointer to network data buffer
1876  *
1877  * This api is to check if the packet is tcp fin.
1878  *
1879  * Return: true if packet is tcp fin packet.
1880  *         false otherwise.
1881  */
1882 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
1883 {
1884 	uint8_t op_code;
1885 
1886 	op_code = (uint8_t)(*(uint8_t *)(data +
1887 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1888 
1889 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
1890 		return true;
1891 
1892 	return false;
1893 }
1894 
1895 /**
1896  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
1897  * @data: Pointer to network data buffer
1898  *
1899  * This api is to check if the tcp packet is fin ack.
1900  *
1901  * Return: true if packet is tcp fin ack packet.
1902  *         false otherwise.
1903  */
1904 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
1905 {
1906 	uint8_t op_code;
1907 
1908 	op_code = (uint8_t)(*(uint8_t *)(data +
1909 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1910 
1911 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
1912 		return true;
1913 
1914 	return false;
1915 }
1916 
1917 /**
1918  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
1919  * @data: Pointer to network data buffer
1920  *
1921  * This api is for tcp syn packet.
1922  *
1923  * Return: true if packet is tcp syn packet.
1924  *	   false otherwise.
1925  */
1926 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
1927 {
1928 	uint8_t op_code;
1929 
1930 	op_code = (uint8_t)(*(uint8_t *)(data +
1931 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1932 
1933 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
1934 		return true;
1935 	return false;
1936 }
1937 
1938 /**
1939  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
1940  * @data: Pointer to network data buffer
1941  *
1942  * This api is for tcp syn ack packet.
1943  *
1944  * Return: true if packet is tcp syn ack packet.
1945  *	   false otherwise.
1946  */
1947 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
1948 {
1949 	uint8_t op_code;
1950 
1951 	op_code = (uint8_t)(*(uint8_t *)(data +
1952 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1953 
1954 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
1955 		return true;
1956 	return false;
1957 }
1958 
1959 /**
1960  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
1961  * @data: Pointer to network data buffer
1962  *
1963  * This api is to check if the tcp packet is rst.
1964  *
1965  * Return: true if packet is tcp rst packet.
1966  *         false otherwise.
1967  */
1968 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
1969 {
1970 	uint8_t op_code;
1971 
1972 	op_code = (uint8_t)(*(uint8_t *)(data +
1973 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1974 
1975 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
1976 		return true;
1977 
1978 	return false;
1979 }
1980 
1981 /**
1982  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1983  * @data: Pointer to network data buffer
1984  *
1985  * This api is for tcp ack packet.
1986  *
1987  * Return: true if packet is tcp ack packet.
1988  *	   false otherwise.
1989  */
1990 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
1991 {
1992 	uint8_t op_code;
1993 
1994 	op_code = (uint8_t)(*(uint8_t *)(data +
1995 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
1996 
1997 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
1998 		return true;
1999 	return false;
2000 }
2001 
2002 /**
2003  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
2004  * @data: Pointer to network data buffer
2005  *
2006  * This api is for tcp packet.
2007  *
2008  * Return: tcp source port value.
2009  */
2010 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2011 {
2012 	uint16_t src_port;
2013 
2014 	src_port = (uint16_t)(*(uint16_t *)(data +
2015 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2016 
2017 	return src_port;
2018 }
2019 
2020 /**
2021  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
2022  * @data: Pointer to network data buffer
2023  *
2024  * This api is for tcp packet.
2025  *
2026  * Return: tcp destination port value.
2027  */
2028 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2029 {
2030 	uint16_t tgt_port;
2031 
2032 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2033 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2034 
2035 	return tgt_port;
2036 }
2037 
2038 /**
2039  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
2040  * @data: Pointer to network data buffer
2041  *
2042  * This api is for ipv4 req packet.
2043  *
2044  * Return: true if packet is icmpv4 request
2045  *	   false otherwise.
2046  */
2047 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2048 {
2049 	uint8_t op_code;
2050 
2051 	op_code = (uint8_t)(*(uint8_t *)(data +
2052 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2053 
2054 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2055 		return true;
2056 	return false;
2057 }
2058 
2059 /**
2060  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
2061  * @data: Pointer to network data buffer
2062  *
2063  * This api is for ipv4 res packet.
2064  *
2065  * Return: true if packet is icmpv4 response
2066  *	   false otherwise.
2067  */
2068 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2069 {
2070 	uint8_t op_code;
2071 
2072 	op_code = (uint8_t)(*(uint8_t *)(data +
2073 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2074 
2075 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2076 		return true;
2077 	return false;
2078 }
2079 
2080 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2081 {
2082 	uint8_t op_code;
2083 
2084 	op_code = (uint8_t)(*(uint8_t *)(data +
2085 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2086 
2087 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2088 		return true;
2089 	return false;
2090 }
2091 
2092 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2093 
2094 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2095 {
2096 	uint8_t subtype;
2097 
2098 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2099 
2100 	if (subtype == ICMPV6_REDIRECT)
2101 		return true;
2102 	return false;
2103 }
2104 
2105 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2106 
2107 /**
2108  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
2109  * @data: Pointer to network data buffer
2110  *
2111  * This api is for ipv4 packet.
2112  *
2113  * Return: icmpv4 packet source IP value.
2114  */
2115 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2116 {
2117 	uint32_t src_ip;
2118 
2119 	src_ip = (uint32_t)(*(uint32_t *)(data +
2120 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2121 
2122 	return src_ip;
2123 }
2124 
2125 /**
2126  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
2127  * @data: Pointer to network data buffer
2128  *
2129  * This api is for ipv4 packet.
2130  *
2131  * Return: icmpv4 packet target IP value.
2132  */
2133 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2134 {
2135 	uint32_t tgt_ip;
2136 
2137 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2138 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2139 
2140 	return tgt_ip;
2141 }
2142 
2143 
2144 /**
2145  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
2146  * @data: Pointer to IPV6 packet data buffer
2147  *
2148  * This func. checks whether it is a IPV6 packet or not.
2149  *
2150  * Return: TRUE if it is a IPV6 packet
2151  *         FALSE if not
2152  */
2153 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2154 {
2155 	uint16_t ether_type;
2156 
2157 	ether_type = (uint16_t)(*(uint16_t *)(data +
2158 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2159 
2160 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2161 		return true;
2162 	else
2163 		return false;
2164 }
2165 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2166 
2167 /**
2168  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2169  * @data: Pointer to network data buffer
2170  *
2171  * This api is for ipv6 packet.
2172  *
2173  * Return: true if packet is DHCP packet
2174  *	   false otherwise
2175  */
2176 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2177 {
2178 	uint16_t sport;
2179 	uint16_t dport;
2180 	uint8_t ipv6_offset;
2181 
2182 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2183 		return false;
2184 
2185 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2186 	sport = *(uint16_t *)(data + ipv6_offset +
2187 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2188 	dport = *(uint16_t *)(data + ipv6_offset +
2189 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2190 			      sizeof(uint16_t));
2191 
2192 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2193 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2194 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2195 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2196 		return true;
2197 	else
2198 		return false;
2199 }
2200 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2201 
2202 /**
2203  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2204  * @data: Pointer to network data buffer
2205  *
2206  * This api is for ipv6 packet.
2207  *
2208  * Return: true if packet is MDNS packet
2209  *	   false otherwise
2210  */
2211 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2212 {
2213 	uint16_t sport;
2214 	uint16_t dport;
2215 
2216 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2217 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2218 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2219 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2220 					sizeof(uint16_t));
2221 
2222 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2223 	    dport == sport)
2224 		return true;
2225 	else
2226 		return false;
2227 }
2228 
2229 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2230 
2231 /**
2232  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2233  * @data: Pointer to IPV4 packet data buffer
2234  *
2235  * This func. checks whether it is a IPV4 multicast packet or not.
2236  *
2237  * Return: TRUE if it is a IPV4 multicast packet
2238  *         FALSE if not
2239  */
2240 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2241 {
2242 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2243 		uint32_t *dst_addr =
2244 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2245 
2246 		/*
2247 		 * Check first word of the IPV4 address and if it is
2248 		 * equal to 0xE then it represents multicast IP.
2249 		 */
2250 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2251 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2252 			return true;
2253 		else
2254 			return false;
2255 	} else
2256 		return false;
2257 }
2258 
2259 /**
2260  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2261  * @data: Pointer to IPV6 packet data buffer
2262  *
2263  * This func. checks whether it is a IPV6 multicast packet or not.
2264  *
2265  * Return: TRUE if it is a IPV6 multicast packet
2266  *         FALSE if not
2267  */
2268 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2269 {
2270 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2271 		uint16_t *dst_addr;
2272 
2273 		dst_addr = (uint16_t *)
2274 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2275 
2276 		/*
2277 		 * Check first byte of the IP address and if it
2278 		 * 0xFF00 then it is a IPV6 mcast packet.
2279 		 */
2280 		if (*dst_addr ==
2281 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2282 			return true;
2283 		else
2284 			return false;
2285 	} else
2286 		return false;
2287 }
2288 
2289 /**
2290  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2291  * @data: Pointer to IPV4 ICMP packet data buffer
2292  *
2293  * This func. checks whether it is a ICMP packet or not.
2294  *
2295  * Return: TRUE if it is a ICMP packet
2296  *         FALSE if not
2297  */
2298 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2299 {
2300 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2301 		uint8_t pkt_type;
2302 
2303 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2304 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2305 
2306 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2307 			return true;
2308 		else
2309 			return false;
2310 	} else
2311 		return false;
2312 }
2313 
2314 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2315 
2316 /**
2317  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2318  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2319  *
2320  * This func. checks whether it is a ICMPV6 packet or not.
2321  *
2322  * Return: TRUE if it is a ICMPV6 packet
2323  *         FALSE if not
2324  */
2325 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2326 {
2327 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2328 		uint8_t pkt_type;
2329 
2330 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2331 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2332 
2333 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2334 			return true;
2335 		else
2336 			return false;
2337 	} else
2338 		return false;
2339 }
2340 
2341 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2342 
2343 /**
2344  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2345  * @data: Pointer to IPV4 UDP packet data buffer
2346  *
2347  * This func. checks whether it is a IPV4 UDP packet or not.
2348  *
2349  * Return: TRUE if it is a IPV4 UDP packet
2350  *         FALSE if not
2351  */
2352 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2353 {
2354 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2355 		uint8_t pkt_type;
2356 
2357 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2358 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2359 
2360 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2361 			return true;
2362 		else
2363 			return false;
2364 	} else
2365 		return false;
2366 }
2367 
2368 /**
2369  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2370  * @data: Pointer to IPV4 TCP packet data buffer
2371  *
2372  * This func. checks whether it is a IPV4 TCP packet or not.
2373  *
2374  * Return: TRUE if it is a IPV4 TCP packet
2375  *         FALSE if not
2376  */
2377 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2378 {
2379 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2380 		uint8_t pkt_type;
2381 
2382 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2383 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2384 
2385 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2386 			return true;
2387 		else
2388 			return false;
2389 	} else
2390 		return false;
2391 }
2392 
2393 /**
2394  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2395  * @data: Pointer to IPV6 UDP packet data buffer
2396  *
2397  * This func. checks whether it is a IPV6 UDP packet or not.
2398  *
2399  * Return: TRUE if it is a IPV6 UDP packet
2400  *         FALSE if not
2401  */
2402 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2403 {
2404 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2405 		uint8_t pkt_type;
2406 
2407 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2408 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2409 
2410 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2411 			return true;
2412 		else
2413 			return false;
2414 	} else
2415 		return false;
2416 }
2417 
2418 /**
2419  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
2420  * @data: Pointer to IPV6 TCP packet data buffer
2421  *
2422  * This func. checks whether it is a IPV6 TCP packet or not.
2423  *
2424  * Return: TRUE if it is a IPV6 TCP packet
2425  *         FALSE if not
2426  */
2427 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2428 {
2429 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2430 		uint8_t pkt_type;
2431 
2432 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2433 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2434 
2435 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2436 			return true;
2437 		else
2438 			return false;
2439 	} else
2440 		return false;
2441 }
2442 
2443 /**
2444  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
2445  * @nbuf - sk buff
2446  *
2447  * Return: true if packet is broadcast
2448  *	   false otherwise
2449  */
2450 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2451 {
2452 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2453 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2454 }
2455 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2456 
2457 /**
2458  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
2459  * @nbuf - sk buff
2460  *
2461  * Return: true if packet is multicast replay
2462  *	   false otherwise
2463  */
2464 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2465 {
2466 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2467 
2468 	if (unlikely(nbuf->pkt_type == PACKET_MULTICAST)) {
2469 		if (unlikely(ether_addr_equal(eh->h_source,
2470 					      nbuf->dev->dev_addr)))
2471 			return true;
2472 	}
2473 	return false;
2474 }
2475 
2476 /**
2477  * __qdf_nbuf_is_arp_local() - check if local or non local arp
2478  * @skb: pointer to sk_buff
2479  *
2480  * Return: true if local arp or false otherwise.
2481  */
2482 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2483 {
2484 	struct arphdr *arp;
2485 	struct in_ifaddr **ifap = NULL;
2486 	struct in_ifaddr *ifa = NULL;
2487 	struct in_device *in_dev;
2488 	unsigned char *arp_ptr;
2489 	__be32 tip;
2490 
2491 	arp = (struct arphdr *)skb->data;
2492 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
2493 		/* if fail to acquire rtnl lock, assume it's local arp */
2494 		if (!rtnl_trylock())
2495 			return true;
2496 
2497 		in_dev = __in_dev_get_rtnl(skb->dev);
2498 		if (in_dev) {
2499 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2500 				ifap = &ifa->ifa_next) {
2501 				if (!strcmp(skb->dev->name, ifa->ifa_label))
2502 					break;
2503 			}
2504 		}
2505 
2506 		if (ifa && ifa->ifa_local) {
2507 			arp_ptr = (unsigned char *)(arp + 1);
2508 			arp_ptr += (skb->dev->addr_len + 4 +
2509 					skb->dev->addr_len);
2510 			memcpy(&tip, arp_ptr, 4);
2511 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
2512 				  ifa->ifa_local, tip);
2513 			if (ifa->ifa_local == tip) {
2514 				rtnl_unlock();
2515 				return true;
2516 			}
2517 		}
2518 		rtnl_unlock();
2519 	}
2520 
2521 	return false;
2522 }
2523 
2524 #ifdef NBUF_MEMORY_DEBUG
2525 
2526 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2527 
2528 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2529 static struct kmem_cache *nbuf_tracking_cache;
2530 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2531 static spinlock_t qdf_net_buf_track_free_list_lock;
2532 static uint32_t qdf_net_buf_track_free_list_count;
2533 static uint32_t qdf_net_buf_track_used_list_count;
2534 static uint32_t qdf_net_buf_track_max_used;
2535 static uint32_t qdf_net_buf_track_max_free;
2536 static uint32_t qdf_net_buf_track_max_allocated;
2537 static uint32_t qdf_net_buf_track_fail_count;
2538 
2539 /**
2540  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2541  *
2542  * tracks the max number of network buffers that the wlan driver was tracking
2543  * at any one time.
2544  *
2545  * Return: none
2546  */
2547 static inline void update_max_used(void)
2548 {
2549 	int sum;
2550 
2551 	if (qdf_net_buf_track_max_used <
2552 	    qdf_net_buf_track_used_list_count)
2553 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2554 	sum = qdf_net_buf_track_free_list_count +
2555 		qdf_net_buf_track_used_list_count;
2556 	if (qdf_net_buf_track_max_allocated < sum)
2557 		qdf_net_buf_track_max_allocated = sum;
2558 }
2559 
2560 /**
2561  * update_max_free() - update qdf_net_buf_track_free_list_count
2562  *
2563  * tracks the max number tracking buffers kept in the freelist.
2564  *
2565  * Return: none
2566  */
2567 static inline void update_max_free(void)
2568 {
2569 	if (qdf_net_buf_track_max_free <
2570 	    qdf_net_buf_track_free_list_count)
2571 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2572 }
2573 
2574 /**
2575  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2576  *
2577  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2578  * This function also ads fexibility to adjust the allocation and freelist
2579  * scheems.
2580  *
2581  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2582  */
2583 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2584 {
2585 	int flags = GFP_KERNEL;
2586 	unsigned long irq_flag;
2587 	QDF_NBUF_TRACK *new_node = NULL;
2588 
2589 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2590 	qdf_net_buf_track_used_list_count++;
2591 	if (qdf_net_buf_track_free_list) {
2592 		new_node = qdf_net_buf_track_free_list;
2593 		qdf_net_buf_track_free_list =
2594 			qdf_net_buf_track_free_list->p_next;
2595 		qdf_net_buf_track_free_list_count--;
2596 	}
2597 	update_max_used();
2598 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2599 
2600 	if (new_node)
2601 		return new_node;
2602 
2603 	if (in_interrupt() || irqs_disabled() || in_atomic())
2604 		flags = GFP_ATOMIC;
2605 
2606 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2607 }
2608 
2609 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2610 #define FREEQ_POOLSIZE 2048
2611 
2612 /**
2613  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2614  *
2615  * Matches calls to qdf_nbuf_track_alloc.
2616  * Either frees the tracking cookie to kernel or an internal
2617  * freelist based on the size of the freelist.
2618  *
2619  * Return: none
2620  */
2621 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2622 {
2623 	unsigned long irq_flag;
2624 
2625 	if (!node)
2626 		return;
2627 
2628 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2629 	 * only shrink the freelist if it is bigger than twice the number of
2630 	 * nbufs in use. If the driver is stalling in a consistent bursty
2631 	 * fasion, this will keep 3/4 of thee allocations from the free list
2632 	 * while also allowing the system to recover memory as less frantic
2633 	 * traffic occurs.
2634 	 */
2635 
2636 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2637 
2638 	qdf_net_buf_track_used_list_count--;
2639 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2640 	   (qdf_net_buf_track_free_list_count >
2641 	    qdf_net_buf_track_used_list_count << 1)) {
2642 		kmem_cache_free(nbuf_tracking_cache, node);
2643 	} else {
2644 		node->p_next = qdf_net_buf_track_free_list;
2645 		qdf_net_buf_track_free_list = node;
2646 		qdf_net_buf_track_free_list_count++;
2647 	}
2648 	update_max_free();
2649 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2650 }
2651 
2652 /**
2653  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2654  *
2655  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2656  * the freelist first makes it performant for the first iperf udp burst
2657  * as well as steady state.
2658  *
2659  * Return: None
2660  */
2661 static void qdf_nbuf_track_prefill(void)
2662 {
2663 	int i;
2664 	QDF_NBUF_TRACK *node, *head;
2665 
2666 	/* prepopulate the freelist */
2667 	head = NULL;
2668 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2669 		node = qdf_nbuf_track_alloc();
2670 		if (!node)
2671 			continue;
2672 		node->p_next = head;
2673 		head = node;
2674 	}
2675 	while (head) {
2676 		node = head->p_next;
2677 		qdf_nbuf_track_free(head);
2678 		head = node;
2679 	}
2680 
2681 	/* prefilled buffers should not count as used */
2682 	qdf_net_buf_track_max_used = 0;
2683 }
2684 
2685 /**
2686  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
2687  *
2688  * This initializes the memory manager for the nbuf tracking cookies.  Because
2689  * these cookies are all the same size and only used in this feature, we can
2690  * use a kmem_cache to provide tracking as well as to speed up allocations.
2691  * To avoid the overhead of allocating and freeing the buffers (including SLUB
2692  * features) a freelist is prepopulated here.
2693  *
2694  * Return: None
2695  */
2696 static void qdf_nbuf_track_memory_manager_create(void)
2697 {
2698 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
2699 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
2700 						sizeof(QDF_NBUF_TRACK),
2701 						0, 0, NULL);
2702 
2703 	qdf_nbuf_track_prefill();
2704 }
2705 
2706 /**
2707  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
2708  *
2709  * Empty the freelist and print out usage statistics when it is no longer
2710  * needed. Also the kmem_cache should be destroyed here so that it can warn if
2711  * any nbuf tracking cookies were leaked.
2712  *
2713  * Return: None
2714  */
2715 static void qdf_nbuf_track_memory_manager_destroy(void)
2716 {
2717 	QDF_NBUF_TRACK *node, *tmp;
2718 	unsigned long irq_flag;
2719 
2720 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2721 	node = qdf_net_buf_track_free_list;
2722 
2723 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
2724 		qdf_print("%s: unexpectedly large max_used count %d",
2725 			  __func__, qdf_net_buf_track_max_used);
2726 
2727 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
2728 		qdf_print("%s: %d unused trackers were allocated",
2729 			  __func__,
2730 			  qdf_net_buf_track_max_allocated -
2731 			  qdf_net_buf_track_max_used);
2732 
2733 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2734 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
2735 		qdf_print("%s: check freelist shrinking functionality",
2736 			  __func__);
2737 
2738 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2739 		  "%s: %d residual freelist size",
2740 		  __func__, qdf_net_buf_track_free_list_count);
2741 
2742 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2743 		  "%s: %d max freelist size observed",
2744 		  __func__, qdf_net_buf_track_max_free);
2745 
2746 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2747 		  "%s: %d max buffers used observed",
2748 		  __func__, qdf_net_buf_track_max_used);
2749 
2750 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
2751 		  "%s: %d max buffers allocated observed",
2752 		  __func__, qdf_net_buf_track_max_allocated);
2753 
2754 	while (node) {
2755 		tmp = node;
2756 		node = node->p_next;
2757 		kmem_cache_free(nbuf_tracking_cache, tmp);
2758 		qdf_net_buf_track_free_list_count--;
2759 	}
2760 
2761 	if (qdf_net_buf_track_free_list_count != 0)
2762 		qdf_info("%d unfreed tracking memory lost in freelist",
2763 			 qdf_net_buf_track_free_list_count);
2764 
2765 	if (qdf_net_buf_track_used_list_count != 0)
2766 		qdf_info("%d unfreed tracking memory still in use",
2767 			 qdf_net_buf_track_used_list_count);
2768 
2769 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2770 	kmem_cache_destroy(nbuf_tracking_cache);
2771 	qdf_net_buf_track_free_list = NULL;
2772 }
2773 
2774 /**
2775  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
2776  *
2777  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
2778  * in a hash table and when driver is unloaded it reports about leaked SKBs.
2779  * WLAN driver module whose allocated SKB is freed by network stack are
2780  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
2781  * reported as memory leak.
2782  *
2783  * Return: none
2784  */
2785 void qdf_net_buf_debug_init(void)
2786 {
2787 	uint32_t i;
2788 
2789 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
2790 
2791 	if (is_initial_mem_debug_disabled)
2792 		return;
2793 
2794 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
2795 
2796 	qdf_nbuf_map_tracking_init();
2797 	qdf_nbuf_track_memory_manager_create();
2798 
2799 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2800 		gp_qdf_net_buf_track_tbl[i] = NULL;
2801 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
2802 	}
2803 }
2804 qdf_export_symbol(qdf_net_buf_debug_init);
2805 
2806 /**
2807  * qdf_net_buf_debug_init() - exit network buffer debug functionality
2808  *
2809  * Exit network buffer tracking debug functionality and log SKB memory leaks
2810  * As part of exiting the functionality, free the leaked memory and
2811  * cleanup the tracking buffers.
2812  *
2813  * Return: none
2814  */
2815 void qdf_net_buf_debug_exit(void)
2816 {
2817 	uint32_t i;
2818 	uint32_t count = 0;
2819 	unsigned long irq_flag;
2820 	QDF_NBUF_TRACK *p_node;
2821 	QDF_NBUF_TRACK *p_prev;
2822 
2823 	if (is_initial_mem_debug_disabled)
2824 		return;
2825 
2826 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
2827 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2828 		p_node = gp_qdf_net_buf_track_tbl[i];
2829 		while (p_node) {
2830 			p_prev = p_node;
2831 			p_node = p_node->p_next;
2832 			count++;
2833 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
2834 				 p_prev->func_name, p_prev->line_num,
2835 				 p_prev->size, p_prev->net_buf);
2836 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
2837 				 p_prev->map_func_name,
2838 				 p_prev->map_line_num,
2839 				 p_prev->unmap_func_name,
2840 				 p_prev->unmap_line_num,
2841 				 p_prev->is_nbuf_mapped);
2842 			qdf_nbuf_track_free(p_prev);
2843 		}
2844 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2845 	}
2846 
2847 	qdf_nbuf_track_memory_manager_destroy();
2848 	qdf_nbuf_map_tracking_deinit();
2849 
2850 #ifdef CONFIG_HALT_KMEMLEAK
2851 	if (count) {
2852 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
2853 		QDF_BUG(0);
2854 	}
2855 #endif
2856 }
2857 qdf_export_symbol(qdf_net_buf_debug_exit);
2858 
2859 /**
2860  * qdf_net_buf_debug_hash() - hash network buffer pointer
2861  *
2862  * Return: hash value
2863  */
2864 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
2865 {
2866 	uint32_t i;
2867 
2868 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
2869 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
2870 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
2871 
2872 	return i;
2873 }
2874 
2875 /**
2876  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
2877  *
2878  * Return: If skb is found in hash table then return pointer to network buffer
2879  *	else return %NULL
2880  */
2881 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
2882 {
2883 	uint32_t i;
2884 	QDF_NBUF_TRACK *p_node;
2885 
2886 	i = qdf_net_buf_debug_hash(net_buf);
2887 	p_node = gp_qdf_net_buf_track_tbl[i];
2888 
2889 	while (p_node) {
2890 		if (p_node->net_buf == net_buf)
2891 			return p_node;
2892 		p_node = p_node->p_next;
2893 	}
2894 
2895 	return NULL;
2896 }
2897 
2898 /**
2899  * qdf_net_buf_debug_add_node() - store skb in debug hash table
2900  *
2901  * Return: none
2902  */
2903 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
2904 				const char *func_name, uint32_t line_num)
2905 {
2906 	uint32_t i;
2907 	unsigned long irq_flag;
2908 	QDF_NBUF_TRACK *p_node;
2909 	QDF_NBUF_TRACK *new_node;
2910 
2911 	if (is_initial_mem_debug_disabled)
2912 		return;
2913 
2914 	new_node = qdf_nbuf_track_alloc();
2915 
2916 	i = qdf_net_buf_debug_hash(net_buf);
2917 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2918 
2919 	p_node = qdf_net_buf_debug_look_up(net_buf);
2920 
2921 	if (p_node) {
2922 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
2923 			  p_node->net_buf, p_node->func_name, p_node->line_num,
2924 			  net_buf, func_name, line_num);
2925 		qdf_nbuf_track_free(new_node);
2926 	} else {
2927 		p_node = new_node;
2928 		if (p_node) {
2929 			p_node->net_buf = net_buf;
2930 			qdf_str_lcopy(p_node->func_name, func_name,
2931 				      QDF_MEM_FUNC_NAME_SIZE);
2932 			p_node->line_num = line_num;
2933 			p_node->is_nbuf_mapped = false;
2934 			p_node->map_line_num = 0;
2935 			p_node->unmap_line_num = 0;
2936 			p_node->map_func_name[0] = '\0';
2937 			p_node->unmap_func_name[0] = '\0';
2938 			p_node->size = size;
2939 			p_node->time = qdf_get_log_timestamp();
2940 			qdf_mem_skb_inc(size);
2941 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
2942 			gp_qdf_net_buf_track_tbl[i] = p_node;
2943 		} else {
2944 			qdf_net_buf_track_fail_count++;
2945 			qdf_print(
2946 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
2947 				  func_name, line_num, size);
2948 		}
2949 	}
2950 
2951 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2952 }
2953 qdf_export_symbol(qdf_net_buf_debug_add_node);
2954 
2955 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
2956 				   uint32_t line_num)
2957 {
2958 	uint32_t i;
2959 	unsigned long irq_flag;
2960 	QDF_NBUF_TRACK *p_node;
2961 
2962 	if (is_initial_mem_debug_disabled)
2963 		return;
2964 
2965 	i = qdf_net_buf_debug_hash(net_buf);
2966 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2967 
2968 	p_node = qdf_net_buf_debug_look_up(net_buf);
2969 
2970 	if (p_node) {
2971 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
2972 			      QDF_MEM_FUNC_NAME_SIZE);
2973 		p_node->line_num = line_num;
2974 	}
2975 
2976 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
2977 }
2978 
2979 qdf_export_symbol(qdf_net_buf_debug_update_node);
2980 
2981 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
2982 				       const char *func_name,
2983 				       uint32_t line_num)
2984 {
2985 	uint32_t i;
2986 	unsigned long irq_flag;
2987 	QDF_NBUF_TRACK *p_node;
2988 
2989 	if (is_initial_mem_debug_disabled)
2990 		return;
2991 
2992 	i = qdf_net_buf_debug_hash(net_buf);
2993 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
2994 
2995 	p_node = qdf_net_buf_debug_look_up(net_buf);
2996 
2997 	if (p_node) {
2998 		qdf_str_lcopy(p_node->map_func_name, func_name,
2999 			      QDF_MEM_FUNC_NAME_SIZE);
3000 		p_node->map_line_num = line_num;
3001 		p_node->is_nbuf_mapped = true;
3002 	}
3003 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3004 }
3005 
3006 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3007 					 const char *func_name,
3008 					 uint32_t line_num)
3009 {
3010 	uint32_t i;
3011 	unsigned long irq_flag;
3012 	QDF_NBUF_TRACK *p_node;
3013 
3014 	if (is_initial_mem_debug_disabled)
3015 		return;
3016 
3017 	i = qdf_net_buf_debug_hash(net_buf);
3018 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3019 
3020 	p_node = qdf_net_buf_debug_look_up(net_buf);
3021 
3022 	if (p_node) {
3023 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3024 			      QDF_MEM_FUNC_NAME_SIZE);
3025 		p_node->unmap_line_num = line_num;
3026 		p_node->is_nbuf_mapped = false;
3027 	}
3028 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3029 }
3030 
3031 /**
3032  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
3033  *
3034  * Return: none
3035  */
3036 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3037 {
3038 	uint32_t i;
3039 	QDF_NBUF_TRACK *p_head;
3040 	QDF_NBUF_TRACK *p_node = NULL;
3041 	unsigned long irq_flag;
3042 	QDF_NBUF_TRACK *p_prev;
3043 
3044 	if (is_initial_mem_debug_disabled)
3045 		return;
3046 
3047 	i = qdf_net_buf_debug_hash(net_buf);
3048 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3049 
3050 	p_head = gp_qdf_net_buf_track_tbl[i];
3051 
3052 	/* Unallocated SKB */
3053 	if (!p_head)
3054 		goto done;
3055 
3056 	p_node = p_head;
3057 	/* Found at head of the table */
3058 	if (p_head->net_buf == net_buf) {
3059 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3060 		goto done;
3061 	}
3062 
3063 	/* Search in collision list */
3064 	while (p_node) {
3065 		p_prev = p_node;
3066 		p_node = p_node->p_next;
3067 		if ((p_node) && (p_node->net_buf == net_buf)) {
3068 			p_prev->p_next = p_node->p_next;
3069 			break;
3070 		}
3071 	}
3072 
3073 done:
3074 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3075 
3076 	if (p_node) {
3077 		qdf_mem_skb_dec(p_node->size);
3078 		qdf_nbuf_track_free(p_node);
3079 	} else {
3080 		if (qdf_net_buf_track_fail_count) {
3081 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3082 				  net_buf, qdf_net_buf_track_fail_count);
3083 		} else
3084 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3085 					   net_buf);
3086 	}
3087 }
3088 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3089 
3090 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3091 				   const char *func_name, uint32_t line_num)
3092 {
3093 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3094 
3095 	if (is_initial_mem_debug_disabled)
3096 		return;
3097 
3098 	while (ext_list) {
3099 		/*
3100 		 * Take care to add if it is Jumbo packet connected using
3101 		 * frag_list
3102 		 */
3103 		qdf_nbuf_t next;
3104 
3105 		next = qdf_nbuf_queue_next(ext_list);
3106 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3107 		ext_list = next;
3108 	}
3109 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3110 }
3111 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3112 
3113 /**
3114  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
3115  * @net_buf: Network buf holding head segment (single)
3116  *
3117  * WLAN driver module whose allocated SKB is freed by network stack are
3118  * suppose to call this API before returning SKB to network stack such
3119  * that the SKB is not reported as memory leak.
3120  *
3121  * Return: none
3122  */
3123 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3124 {
3125 	qdf_nbuf_t ext_list;
3126 
3127 	if (is_initial_mem_debug_disabled)
3128 		return;
3129 
3130 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3131 	while (ext_list) {
3132 		/*
3133 		 * Take care to free if it is Jumbo packet connected using
3134 		 * frag_list
3135 		 */
3136 		qdf_nbuf_t next;
3137 
3138 		next = qdf_nbuf_queue_next(ext_list);
3139 
3140 		if (qdf_nbuf_get_users(ext_list) > 1) {
3141 			ext_list = next;
3142 			continue;
3143 		}
3144 
3145 		qdf_net_buf_debug_delete_node(ext_list);
3146 		ext_list = next;
3147 	}
3148 
3149 	if (qdf_nbuf_get_users(net_buf) > 1)
3150 		return;
3151 
3152 	qdf_net_buf_debug_delete_node(net_buf);
3153 }
3154 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3155 
3156 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3157 				int reserve, int align, int prio,
3158 				const char *func, uint32_t line)
3159 {
3160 	qdf_nbuf_t nbuf;
3161 
3162 	if (is_initial_mem_debug_disabled)
3163 		return __qdf_nbuf_alloc(osdev, size,
3164 					reserve, align,
3165 					prio, func, line);
3166 
3167 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3168 
3169 	/* Store SKB in internal QDF tracking table */
3170 	if (qdf_likely(nbuf)) {
3171 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3172 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3173 	} else {
3174 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3175 	}
3176 
3177 	return nbuf;
3178 }
3179 qdf_export_symbol(qdf_nbuf_alloc_debug);
3180 
3181 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3182 					    const char *func, uint32_t line)
3183 {
3184 	qdf_nbuf_t nbuf;
3185 
3186 	if (is_initial_mem_debug_disabled)
3187 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3188 						    line);
3189 
3190 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3191 
3192 	/* Store SKB in internal QDF tracking table */
3193 	if (qdf_likely(nbuf)) {
3194 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3195 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3196 	} else {
3197 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3198 	}
3199 
3200 	return nbuf;
3201 }
3202 
3203 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3204 
3205 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3206 {
3207 	qdf_nbuf_t ext_list;
3208 	qdf_frag_t p_frag;
3209 	uint32_t num_nr_frags;
3210 	uint32_t idx = 0;
3211 
3212 	if (qdf_unlikely(!nbuf))
3213 		return;
3214 
3215 	if (is_initial_mem_debug_disabled)
3216 		goto free_buf;
3217 
3218 	if (qdf_nbuf_get_users(nbuf) > 1)
3219 		goto free_buf;
3220 
3221 	/* Remove SKB from internal QDF tracking table */
3222 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3223 	qdf_net_buf_debug_delete_node(nbuf);
3224 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3225 
3226 	/* Take care to delete the debug entries for frags */
3227 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3228 
3229 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3230 
3231 	while (idx < num_nr_frags) {
3232 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3233 		if (qdf_likely(p_frag))
3234 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3235 		idx++;
3236 	}
3237 
3238 	/**
3239 	 * Take care to update the debug entries for frag_list and also
3240 	 * for the frags attached to frag_list
3241 	 */
3242 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3243 	while (ext_list) {
3244 		if (qdf_nbuf_get_users(ext_list) == 1) {
3245 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3246 			idx = 0;
3247 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3248 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3249 			while (idx < num_nr_frags) {
3250 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3251 				if (qdf_likely(p_frag))
3252 					qdf_frag_debug_refcount_dec(p_frag,
3253 								    func, line);
3254 				idx++;
3255 			}
3256 			qdf_net_buf_debug_delete_node(ext_list);
3257 		}
3258 
3259 		ext_list = qdf_nbuf_queue_next(ext_list);
3260 	}
3261 
3262 free_buf:
3263 	__qdf_nbuf_free(nbuf);
3264 }
3265 qdf_export_symbol(qdf_nbuf_free_debug);
3266 
3267 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3268 					const char *func, uint32_t line)
3269 {
3270 	struct sk_buff *skb;
3271 	int flags = GFP_KERNEL;
3272 
3273 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3274 		flags = GFP_ATOMIC;
3275 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3276 		/*
3277 		 * Observed that kcompactd burns out CPU to make order-3 page.
3278 		 *__netdev_alloc_skb has 4k page fallback option just in case of
3279 		 * failing high order page allocation so we don't need to be
3280 		 * hard. Make kcompactd rest in piece.
3281 		 */
3282 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3283 #endif
3284 	}
3285 
3286 	skb = __netdev_alloc_skb(NULL, size, flags);
3287 
3288 
3289 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3290 		if (qdf_likely(skb))
3291 			qdf_nbuf_count_inc(skb);
3292 	} else {
3293 		if (qdf_likely(skb)) {
3294 			qdf_nbuf_count_inc(skb);
3295 			qdf_net_buf_debug_add_node(skb, size, func, line);
3296 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3297 		} else {
3298 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3299 		}
3300 	}
3301 
3302 
3303 	return skb;
3304 }
3305 
3306 qdf_export_symbol(__qdf_nbuf_alloc_simple);
3307 
3308 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3309 				uint32_t line)
3310 {
3311 	if (qdf_likely(nbuf)) {
3312 		if (is_initial_mem_debug_disabled) {
3313 			dev_kfree_skb_any(nbuf);
3314 		} else {
3315 			qdf_nbuf_free_debug(nbuf, func, line);
3316 		}
3317 	}
3318 }
3319 
3320 qdf_export_symbol(qdf_nbuf_free_debug_simple);
3321 
3322 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3323 {
3324 	uint32_t num_nr_frags;
3325 	uint32_t idx = 0;
3326 	qdf_nbuf_t ext_list;
3327 	qdf_frag_t p_frag;
3328 
3329 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3330 
3331 	if (is_initial_mem_debug_disabled)
3332 		return cloned_buf;
3333 
3334 	if (qdf_unlikely(!cloned_buf))
3335 		return NULL;
3336 
3337 	/* Take care to update the debug entries for frags */
3338 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3339 
3340 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3341 
3342 	while (idx < num_nr_frags) {
3343 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3344 		if (qdf_likely(p_frag))
3345 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3346 		idx++;
3347 	}
3348 
3349 	/* Take care to update debug entries for frags attached to frag_list */
3350 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3351 	while (ext_list) {
3352 		idx = 0;
3353 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3354 
3355 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3356 
3357 		while (idx < num_nr_frags) {
3358 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3359 			if (qdf_likely(p_frag))
3360 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3361 			idx++;
3362 		}
3363 		ext_list = qdf_nbuf_queue_next(ext_list);
3364 	}
3365 
3366 	/* Store SKB in internal QDF tracking table */
3367 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3368 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3369 
3370 	return cloned_buf;
3371 }
3372 qdf_export_symbol(qdf_nbuf_clone_debug);
3373 
3374 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3375 {
3376 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3377 
3378 	if (is_initial_mem_debug_disabled)
3379 		return copied_buf;
3380 
3381 	if (qdf_unlikely(!copied_buf))
3382 		return NULL;
3383 
3384 	/* Store SKB in internal QDF tracking table */
3385 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3386 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3387 
3388 	return copied_buf;
3389 }
3390 qdf_export_symbol(qdf_nbuf_copy_debug);
3391 
3392 qdf_nbuf_t
3393 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3394 			   const char *func, uint32_t line)
3395 {
3396 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3397 
3398 	if (qdf_unlikely(!copied_buf))
3399 		return NULL;
3400 
3401 	if (is_initial_mem_debug_disabled)
3402 		return copied_buf;
3403 
3404 	/* Store SKB in internal QDF tracking table */
3405 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3406 	qdf_nbuf_history_add(copied_buf, func, line,
3407 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3408 
3409 	return copied_buf;
3410 }
3411 
3412 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3413 
3414 qdf_nbuf_t
3415 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3416 		       uint32_t line_num)
3417 {
3418 	qdf_nbuf_t unshared_buf;
3419 	qdf_frag_t p_frag;
3420 	uint32_t num_nr_frags;
3421 	uint32_t idx = 0;
3422 	qdf_nbuf_t ext_list, next;
3423 
3424 	if (is_initial_mem_debug_disabled)
3425 		return __qdf_nbuf_unshare(buf);
3426 
3427 	/* Not a shared buffer, nothing to do */
3428 	if (!qdf_nbuf_is_cloned(buf))
3429 		return buf;
3430 
3431 	if (qdf_nbuf_get_users(buf) > 1)
3432 		goto unshare_buf;
3433 
3434 	/* Take care to delete the debug entries for frags */
3435 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3436 
3437 	while (idx < num_nr_frags) {
3438 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3439 		if (qdf_likely(p_frag))
3440 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3441 						    line_num);
3442 		idx++;
3443 	}
3444 
3445 	qdf_net_buf_debug_delete_node(buf);
3446 
3447 	 /* Take care of jumbo packet connected using frag_list and frags */
3448 	ext_list = qdf_nbuf_get_ext_list(buf);
3449 	while (ext_list) {
3450 		idx = 0;
3451 		next = qdf_nbuf_queue_next(ext_list);
3452 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3453 
3454 		if (qdf_nbuf_get_users(ext_list) > 1) {
3455 			ext_list = next;
3456 			continue;
3457 		}
3458 
3459 		while (idx < num_nr_frags) {
3460 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3461 			if (qdf_likely(p_frag))
3462 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3463 							    line_num);
3464 			idx++;
3465 		}
3466 
3467 		qdf_net_buf_debug_delete_node(ext_list);
3468 		ext_list = next;
3469 	}
3470 
3471 unshare_buf:
3472 	unshared_buf = __qdf_nbuf_unshare(buf);
3473 
3474 	if (qdf_likely(unshared_buf))
3475 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3476 					   line_num);
3477 
3478 	return unshared_buf;
3479 }
3480 
3481 qdf_export_symbol(qdf_nbuf_unshare_debug);
3482 
3483 #endif /* NBUF_MEMORY_DEBUG */
3484 
3485 #if defined(FEATURE_TSO)
3486 
3487 /**
3488  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3489  *
3490  * @ethproto: ethernet type of the msdu
3491  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3492  * @l2_len: L2 length for the msdu
3493  * @eit_hdr: pointer to EIT header
3494  * @eit_hdr_len: EIT header length for the msdu
3495  * @eit_hdr_dma_map_addr: dma addr for EIT header
3496  * @tcphdr: pointer to tcp header
3497  * @ipv4_csum_en: ipv4 checksum enable
3498  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3499  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3500  * @ip_id: IP id
3501  * @tcp_seq_num: TCP sequence number
3502  *
3503  * This structure holds the TSO common info that is common
3504  * across all the TCP segments of the jumbo packet.
3505  */
3506 struct qdf_tso_cmn_seg_info_t {
3507 	uint16_t ethproto;
3508 	uint16_t ip_tcp_hdr_len;
3509 	uint16_t l2_len;
3510 	uint8_t *eit_hdr;
3511 	uint32_t eit_hdr_len;
3512 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3513 	struct tcphdr *tcphdr;
3514 	uint16_t ipv4_csum_en;
3515 	uint16_t tcp_ipv4_csum_en;
3516 	uint16_t tcp_ipv6_csum_en;
3517 	uint16_t ip_id;
3518 	uint32_t tcp_seq_num;
3519 };
3520 
3521 /**
3522  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
3523  *
3524  * @skb: network buffer
3525  *
3526  * Return: byte offset length of 8 bytes aligned.
3527  */
3528 #ifdef FIX_TXDMA_LIMITATION
3529 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3530 {
3531 	uint32_t eit_hdr_len;
3532 	uint8_t *eit_hdr;
3533 	uint8_t byte_8_align_offset;
3534 
3535 	eit_hdr = skb->data;
3536 	eit_hdr_len = (skb_transport_header(skb)
3537 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3538 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
3539 	if (qdf_unlikely(byte_8_align_offset)) {
3540 		TSO_DEBUG("%pK,Len %d %d",
3541 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
3542 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
3543 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
3544 				  __LINE__, skb->head, skb->data,
3545 				 byte_8_align_offset);
3546 			return 0;
3547 		}
3548 		qdf_nbuf_push_head(skb, byte_8_align_offset);
3549 		qdf_mem_move(skb->data,
3550 			     skb->data + byte_8_align_offset,
3551 			     eit_hdr_len);
3552 		skb->len -= byte_8_align_offset;
3553 		skb->mac_header -= byte_8_align_offset;
3554 		skb->network_header -= byte_8_align_offset;
3555 		skb->transport_header -= byte_8_align_offset;
3556 	}
3557 	return byte_8_align_offset;
3558 }
3559 #else
3560 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
3561 {
3562 	return 0;
3563 }
3564 #endif
3565 
3566 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
3567 void qdf_record_nbuf_nbytes(
3568 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
3569 {
3570 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
3571 }
3572 
3573 qdf_export_symbol(qdf_record_nbuf_nbytes);
3574 
3575 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
3576 
3577 /**
3578  * qdf_nbuf_tso_map_frag() - Map TSO segment
3579  * @osdev: qdf device handle
3580  * @tso_frag_vaddr: addr of tso fragment
3581  * @nbytes: number of bytes
3582  * @dir: direction
3583  *
3584  * Map TSO segment and for MCL record the amount of memory mapped
3585  *
3586  * Return: DMA address of mapped TSO fragment in success and
3587  * NULL in case of DMA mapping failure
3588  */
3589 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
3590 	qdf_device_t osdev, void *tso_frag_vaddr,
3591 	uint32_t nbytes, qdf_dma_dir_t dir)
3592 {
3593 	qdf_dma_addr_t tso_frag_paddr = 0;
3594 
3595 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
3596 					nbytes, __qdf_dma_dir_to_os(dir));
3597 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
3598 		qdf_err("DMA mapping error!");
3599 		qdf_assert_always(0);
3600 		return 0;
3601 	}
3602 	qdf_record_nbuf_nbytes(nbytes, dir, true);
3603 	return tso_frag_paddr;
3604 }
3605 
3606 /**
3607  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
3608  * @osdev: qdf device handle
3609  * @tso_frag_paddr: DMA addr of tso fragment
3610  * @dir: direction
3611  * @nbytes: number of bytes
3612  *
3613  * Unmap TSO segment and for MCL record the amount of memory mapped
3614  *
3615  * Return: None
3616  */
3617 static inline void qdf_nbuf_tso_unmap_frag(
3618 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
3619 	uint32_t nbytes, qdf_dma_dir_t dir)
3620 {
3621 	qdf_record_nbuf_nbytes(nbytes, dir, false);
3622 	dma_unmap_single(osdev->dev, tso_frag_paddr,
3623 			 nbytes, __qdf_dma_dir_to_os(dir));
3624 }
3625 
3626 /**
3627  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
3628  * information
3629  * @osdev: qdf device handle
3630  * @skb: skb buffer
3631  * @tso_info: Parameters common to all segements
3632  *
3633  * Get the TSO information that is common across all the TCP
3634  * segments of the jumbo packet
3635  *
3636  * Return: 0 - success 1 - failure
3637  */
3638 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
3639 			struct sk_buff *skb,
3640 			struct qdf_tso_cmn_seg_info_t *tso_info)
3641 {
3642 	/* Get ethernet type and ethernet header length */
3643 	tso_info->ethproto = vlan_get_protocol(skb);
3644 
3645 	/* Determine whether this is an IPv4 or IPv6 packet */
3646 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
3647 		/* for IPv4, get the IP ID and enable TCP and IP csum */
3648 		struct iphdr *ipv4_hdr = ip_hdr(skb);
3649 
3650 		tso_info->ip_id = ntohs(ipv4_hdr->id);
3651 		tso_info->ipv4_csum_en = 1;
3652 		tso_info->tcp_ipv4_csum_en = 1;
3653 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
3654 			qdf_err("TSO IPV4 proto 0x%x not TCP",
3655 				ipv4_hdr->protocol);
3656 			return 1;
3657 		}
3658 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
3659 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
3660 		tso_info->tcp_ipv6_csum_en = 1;
3661 	} else {
3662 		qdf_err("TSO: ethertype 0x%x is not supported!",
3663 			tso_info->ethproto);
3664 		return 1;
3665 	}
3666 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
3667 	tso_info->tcphdr = tcp_hdr(skb);
3668 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
3669 	/* get pointer to the ethernet + IP + TCP header and their length */
3670 	tso_info->eit_hdr = skb->data;
3671 	tso_info->eit_hdr_len = (skb_transport_header(skb)
3672 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
3673 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
3674 						osdev, tso_info->eit_hdr,
3675 						tso_info->eit_hdr_len,
3676 						QDF_DMA_TO_DEVICE);
3677 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
3678 		return 1;
3679 
3680 	if (tso_info->ethproto == htons(ETH_P_IP)) {
3681 		/* inlcude IPv4 header length for IPV4 (total length) */
3682 		tso_info->ip_tcp_hdr_len =
3683 			tso_info->eit_hdr_len - tso_info->l2_len;
3684 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
3685 		/* exclude IPv6 header length for IPv6 (payload length) */
3686 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
3687 	}
3688 	/*
3689 	 * The length of the payload (application layer data) is added to
3690 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
3691 	 * descriptor.
3692 	 */
3693 
3694 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
3695 		tso_info->tcp_seq_num,
3696 		tso_info->eit_hdr_len,
3697 		tso_info->l2_len,
3698 		skb->len);
3699 	return 0;
3700 }
3701 
3702 
3703 /**
3704  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
3705  *
3706  * @curr_seg: Segment whose contents are initialized
3707  * @tso_cmn_info: Parameters common to all segements
3708  *
3709  * Return: None
3710  */
3711 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
3712 				struct qdf_tso_seg_elem_t *curr_seg,
3713 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
3714 {
3715 	/* Initialize the flags to 0 */
3716 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
3717 
3718 	/*
3719 	 * The following fields remain the same across all segments of
3720 	 * a jumbo packet
3721 	 */
3722 	curr_seg->seg.tso_flags.tso_enable = 1;
3723 	curr_seg->seg.tso_flags.ipv4_checksum_en =
3724 		tso_cmn_info->ipv4_csum_en;
3725 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
3726 		tso_cmn_info->tcp_ipv6_csum_en;
3727 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
3728 		tso_cmn_info->tcp_ipv4_csum_en;
3729 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
3730 
3731 	/* The following fields change for the segments */
3732 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
3733 	tso_cmn_info->ip_id++;
3734 
3735 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
3736 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
3737 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
3738 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
3739 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
3740 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
3741 
3742 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
3743 
3744 	/*
3745 	 * First fragment for each segment always contains the ethernet,
3746 	 * IP and TCP header
3747 	 */
3748 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
3749 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
3750 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
3751 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
3752 
3753 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
3754 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
3755 		   tso_cmn_info->eit_hdr_len,
3756 		   curr_seg->seg.tso_flags.tcp_seq_num,
3757 		   curr_seg->seg.total_len);
3758 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
3759 }
3760 
3761 /**
3762  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
3763  * into segments
3764  * @nbuf: network buffer to be segmented
3765  * @tso_info: This is the output. The information about the
3766  *           TSO segments will be populated within this.
3767  *
3768  * This function fragments a TCP jumbo packet into smaller
3769  * segments to be transmitted by the driver. It chains the TSO
3770  * segments created into a list.
3771  *
3772  * Return: number of TSO segments
3773  */
3774 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
3775 		struct qdf_tso_info_t *tso_info)
3776 {
3777 	/* common across all segments */
3778 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
3779 	/* segment specific */
3780 	void *tso_frag_vaddr;
3781 	qdf_dma_addr_t tso_frag_paddr = 0;
3782 	uint32_t num_seg = 0;
3783 	struct qdf_tso_seg_elem_t *curr_seg;
3784 	struct qdf_tso_num_seg_elem_t *total_num_seg;
3785 	skb_frag_t *frag = NULL;
3786 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
3787 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
3788 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
3789 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
3790 	int j = 0; /* skb fragment index */
3791 	uint8_t byte_8_align_offset;
3792 
3793 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
3794 	total_num_seg = tso_info->tso_num_seg_list;
3795 	curr_seg = tso_info->tso_seg_list;
3796 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
3797 
3798 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
3799 
3800 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
3801 						skb, &tso_cmn_info))) {
3802 		qdf_warn("TSO: error getting common segment info");
3803 		return 0;
3804 	}
3805 
3806 	/* length of the first chunk of data in the skb */
3807 	skb_frag_len = skb_headlen(skb);
3808 
3809 	/* the 0th tso segment's 0th fragment always contains the EIT header */
3810 	/* update the remaining skb fragment length and TSO segment length */
3811 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
3812 	skb_proc -= tso_cmn_info.eit_hdr_len;
3813 
3814 	/* get the address to the next tso fragment */
3815 	tso_frag_vaddr = skb->data +
3816 			 tso_cmn_info.eit_hdr_len +
3817 			 byte_8_align_offset;
3818 	/* get the length of the next tso fragment */
3819 	tso_frag_len = min(skb_frag_len, tso_seg_size);
3820 
3821 	if (tso_frag_len != 0) {
3822 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
3823 					osdev, tso_frag_vaddr, tso_frag_len,
3824 					QDF_DMA_TO_DEVICE);
3825 		if (qdf_unlikely(!tso_frag_paddr))
3826 			return 0;
3827 	}
3828 
3829 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
3830 		__LINE__, skb_frag_len, tso_frag_len);
3831 	num_seg = tso_info->num_segs;
3832 	tso_info->num_segs = 0;
3833 	tso_info->is_tso = 1;
3834 
3835 	while (num_seg && curr_seg) {
3836 		int i = 1; /* tso fragment index */
3837 		uint8_t more_tso_frags = 1;
3838 
3839 		curr_seg->seg.num_frags = 0;
3840 		tso_info->num_segs++;
3841 		total_num_seg->num_seg.tso_cmn_num_seg++;
3842 
3843 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
3844 						 &tso_cmn_info);
3845 
3846 		/* If TCP PSH flag is set, set it in the last or only segment */
3847 		if (num_seg == 1)
3848 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
3849 
3850 		if (unlikely(skb_proc == 0))
3851 			return tso_info->num_segs;
3852 
3853 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
3854 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
3855 		/* frag len is added to ip_len in while loop below*/
3856 
3857 		curr_seg->seg.num_frags++;
3858 
3859 		while (more_tso_frags) {
3860 			if (tso_frag_len != 0) {
3861 				curr_seg->seg.tso_frags[i].vaddr =
3862 					tso_frag_vaddr;
3863 				curr_seg->seg.tso_frags[i].length =
3864 					tso_frag_len;
3865 				curr_seg->seg.total_len += tso_frag_len;
3866 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
3867 				curr_seg->seg.num_frags++;
3868 				skb_proc = skb_proc - tso_frag_len;
3869 
3870 				/* increment the TCP sequence number */
3871 
3872 				tso_cmn_info.tcp_seq_num += tso_frag_len;
3873 				curr_seg->seg.tso_frags[i].paddr =
3874 					tso_frag_paddr;
3875 
3876 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
3877 			}
3878 
3879 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
3880 					__func__, __LINE__,
3881 					i,
3882 					tso_frag_len,
3883 					curr_seg->seg.total_len,
3884 					curr_seg->seg.tso_frags[i].vaddr);
3885 
3886 			/* if there is no more data left in the skb */
3887 			if (!skb_proc)
3888 				return tso_info->num_segs;
3889 
3890 			/* get the next payload fragment information */
3891 			/* check if there are more fragments in this segment */
3892 			if (tso_frag_len < tso_seg_size) {
3893 				more_tso_frags = 1;
3894 				if (tso_frag_len != 0) {
3895 					tso_seg_size = tso_seg_size -
3896 						tso_frag_len;
3897 					i++;
3898 					if (curr_seg->seg.num_frags ==
3899 								FRAG_NUM_MAX) {
3900 						more_tso_frags = 0;
3901 						/*
3902 						 * reset i and the tso
3903 						 * payload size
3904 						 */
3905 						i = 1;
3906 						tso_seg_size =
3907 							skb_shinfo(skb)->
3908 								gso_size;
3909 					}
3910 				}
3911 			} else {
3912 				more_tso_frags = 0;
3913 				/* reset i and the tso payload size */
3914 				i = 1;
3915 				tso_seg_size = skb_shinfo(skb)->gso_size;
3916 			}
3917 
3918 			/* if the next fragment is contiguous */
3919 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
3920 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
3921 				skb_frag_len = skb_frag_len - tso_frag_len;
3922 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3923 
3924 			} else { /* the next fragment is not contiguous */
3925 				if (skb_shinfo(skb)->nr_frags == 0) {
3926 					qdf_info("TSO: nr_frags == 0!");
3927 					qdf_assert(0);
3928 					return 0;
3929 				}
3930 				if (j >= skb_shinfo(skb)->nr_frags) {
3931 					qdf_info("TSO: nr_frags %d j %d",
3932 						 skb_shinfo(skb)->nr_frags, j);
3933 					qdf_assert(0);
3934 					return 0;
3935 				}
3936 				frag = &skb_shinfo(skb)->frags[j];
3937 				skb_frag_len = skb_frag_size(frag);
3938 				tso_frag_len = min(skb_frag_len, tso_seg_size);
3939 				tso_frag_vaddr = skb_frag_address_safe(frag);
3940 				j++;
3941 			}
3942 
3943 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
3944 				__func__, __LINE__, skb_frag_len, tso_frag_len,
3945 				tso_seg_size);
3946 
3947 			if (!(tso_frag_vaddr)) {
3948 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
3949 						__func__);
3950 				return 0;
3951 			}
3952 
3953 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
3954 						osdev, tso_frag_vaddr,
3955 						tso_frag_len,
3956 						QDF_DMA_TO_DEVICE);
3957 			if (qdf_unlikely(!tso_frag_paddr))
3958 				return 0;
3959 		}
3960 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
3961 				curr_seg->seg.tso_flags.tcp_seq_num);
3962 		num_seg--;
3963 		/* if TCP FIN flag was set, set it in the last segment */
3964 		if (!num_seg)
3965 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
3966 
3967 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
3968 		curr_seg = curr_seg->next;
3969 	}
3970 	return tso_info->num_segs;
3971 }
3972 qdf_export_symbol(__qdf_nbuf_get_tso_info);
3973 
3974 /**
3975  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
3976  *
3977  * @osdev: qdf device handle
3978  * @tso_seg: TSO segment element to be unmapped
3979  * @is_last_seg: whether this is last tso seg or not
3980  *
3981  * Return: none
3982  */
3983 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
3984 			  struct qdf_tso_seg_elem_t *tso_seg,
3985 			  bool is_last_seg)
3986 {
3987 	uint32_t num_frags = 0;
3988 
3989 	if (tso_seg->seg.num_frags > 0)
3990 		num_frags = tso_seg->seg.num_frags - 1;
3991 
3992 	/*Num of frags in a tso seg cannot be less than 2 */
3993 	if (num_frags < 1) {
3994 		/*
3995 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
3996 		 * this may happen when qdf_nbuf_get_tso_info failed,
3997 		 * do dma unmap for the 0th frag in this seg.
3998 		 */
3999 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4000 			goto last_seg_free_first_frag;
4001 
4002 		qdf_assert(0);
4003 		qdf_err("ERROR: num of frags in a tso segment is %d",
4004 			(num_frags + 1));
4005 		return;
4006 	}
4007 
4008 	while (num_frags) {
4009 		/*Do dma unmap the tso seg except the 0th frag */
4010 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4011 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4012 				num_frags);
4013 			qdf_assert(0);
4014 			return;
4015 		}
4016 		qdf_nbuf_tso_unmap_frag(
4017 			osdev,
4018 			tso_seg->seg.tso_frags[num_frags].paddr,
4019 			tso_seg->seg.tso_frags[num_frags].length,
4020 			QDF_DMA_TO_DEVICE);
4021 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4022 		num_frags--;
4023 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4024 	}
4025 
4026 last_seg_free_first_frag:
4027 	if (is_last_seg) {
4028 		/*Do dma unmap for the tso seg 0th frag */
4029 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4030 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4031 			qdf_assert(0);
4032 			return;
4033 		}
4034 		qdf_nbuf_tso_unmap_frag(osdev,
4035 					tso_seg->seg.tso_frags[0].paddr,
4036 					tso_seg->seg.tso_frags[0].length,
4037 					QDF_DMA_TO_DEVICE);
4038 		tso_seg->seg.tso_frags[0].paddr = 0;
4039 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4040 	}
4041 }
4042 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4043 
4044 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4045 {
4046 	size_t packet_len;
4047 
4048 	packet_len = skb->len -
4049 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4050 		 tcp_hdrlen(skb));
4051 
4052 	return packet_len;
4053 }
4054 
4055 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4056 
4057 /**
4058  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
4059  * into segments
4060  * @nbuf:   network buffer to be segmented
4061  * @tso_info:  This is the output. The information about the
4062  *      TSO segments will be populated within this.
4063  *
4064  * This function fragments a TCP jumbo packet into smaller
4065  * segments to be transmitted by the driver. It chains the TSO
4066  * segments created into a list.
4067  *
4068  * Return: 0 - success, 1 - failure
4069  */
4070 #ifndef BUILD_X86
4071 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4072 {
4073 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4074 	uint32_t remainder, num_segs = 0;
4075 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4076 	uint8_t frags_per_tso = 0;
4077 	uint32_t skb_frag_len = 0;
4078 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4079 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4080 	skb_frag_t *frag = NULL;
4081 	int j = 0;
4082 	uint32_t temp_num_seg = 0;
4083 
4084 	/* length of the first chunk of data in the skb minus eit header*/
4085 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4086 
4087 	/* Calculate num of segs for skb's first chunk of data*/
4088 	remainder = skb_frag_len % tso_seg_size;
4089 	num_segs = skb_frag_len / tso_seg_size;
4090 	/**
4091 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4092 	 * In that case, one more tso seg is required to accommodate
4093 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4094 	 * then remaining data will be accomodated while doing the calculation
4095 	 * for nr_frags data. Hence, frags_per_tso++.
4096 	 */
4097 	if (remainder) {
4098 		if (!skb_nr_frags)
4099 			num_segs++;
4100 		else
4101 			frags_per_tso++;
4102 	}
4103 
4104 	while (skb_nr_frags) {
4105 		if (j >= skb_shinfo(skb)->nr_frags) {
4106 			qdf_info("TSO: nr_frags %d j %d",
4107 				 skb_shinfo(skb)->nr_frags, j);
4108 			qdf_assert(0);
4109 			return 0;
4110 		}
4111 		/**
4112 		 * Calculate the number of tso seg for nr_frags data:
4113 		 * Get the length of each frag in skb_frag_len, add to
4114 		 * remainder.Get the number of segments by dividing it to
4115 		 * tso_seg_size and calculate the new remainder.
4116 		 * Decrement the nr_frags value and keep
4117 		 * looping all the skb_fragments.
4118 		 */
4119 		frag = &skb_shinfo(skb)->frags[j];
4120 		skb_frag_len = skb_frag_size(frag);
4121 		temp_num_seg = num_segs;
4122 		remainder += skb_frag_len;
4123 		num_segs += remainder / tso_seg_size;
4124 		remainder = remainder % tso_seg_size;
4125 		skb_nr_frags--;
4126 		if (remainder) {
4127 			if (num_segs > temp_num_seg)
4128 				frags_per_tso = 0;
4129 			/**
4130 			 * increment the tso per frags whenever remainder is
4131 			 * positive. If frags_per_tso reaches the (max-1),
4132 			 * [First frags always have EIT header, therefore max-1]
4133 			 * increment the num_segs as no more data can be
4134 			 * accomodated in the curr tso seg. Reset the remainder
4135 			 * and frags per tso and keep looping.
4136 			 */
4137 			frags_per_tso++;
4138 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4139 				num_segs++;
4140 				frags_per_tso = 0;
4141 				remainder = 0;
4142 			}
4143 			/**
4144 			 * If this is the last skb frag and still remainder is
4145 			 * non-zero(frags_per_tso is not reached to the max-1)
4146 			 * then increment the num_segs to take care of the
4147 			 * remaining length.
4148 			 */
4149 			if (!skb_nr_frags && remainder) {
4150 				num_segs++;
4151 				frags_per_tso = 0;
4152 			}
4153 		} else {
4154 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4155 			frags_per_tso = 0;
4156 		}
4157 		j++;
4158 	}
4159 
4160 	return num_segs;
4161 }
4162 #elif !defined(QCA_WIFI_QCN9000)
4163 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4164 {
4165 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4166 	skb_frag_t *frag = NULL;
4167 
4168 	/*
4169 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4170 	 * region which cannot be accessed by Target
4171 	 */
4172 	if (virt_to_phys(skb->data) < 0x50000040) {
4173 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4174 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4175 				virt_to_phys(skb->data));
4176 		goto fail;
4177 
4178 	}
4179 
4180 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4181 		frag = &skb_shinfo(skb)->frags[i];
4182 
4183 		if (!frag)
4184 			goto fail;
4185 
4186 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4187 			goto fail;
4188 	}
4189 
4190 
4191 	gso_size = skb_shinfo(skb)->gso_size;
4192 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4193 			+ tcp_hdrlen(skb));
4194 	while (tmp_len) {
4195 		num_segs++;
4196 		if (tmp_len > gso_size)
4197 			tmp_len -= gso_size;
4198 		else
4199 			break;
4200 	}
4201 
4202 	return num_segs;
4203 
4204 	/*
4205 	 * Do not free this frame, just do socket level accounting
4206 	 * so that this is not reused.
4207 	 */
4208 fail:
4209 	if (skb->sk)
4210 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4211 
4212 	return 0;
4213 }
4214 #else
4215 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4216 {
4217 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4218 	skb_frag_t *frag = NULL;
4219 
4220 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4221 		frag = &skb_shinfo(skb)->frags[i];
4222 
4223 		if (!frag)
4224 			goto fail;
4225 	}
4226 
4227 	gso_size = skb_shinfo(skb)->gso_size;
4228 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4229 			+ tcp_hdrlen(skb));
4230 	while (tmp_len) {
4231 		num_segs++;
4232 		if (tmp_len > gso_size)
4233 			tmp_len -= gso_size;
4234 		else
4235 			break;
4236 	}
4237 
4238 	return num_segs;
4239 
4240 	/*
4241 	 * Do not free this frame, just do socket level accounting
4242 	 * so that this is not reused.
4243 	 */
4244 fail:
4245 	if (skb->sk)
4246 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4247 
4248 	return 0;
4249 }
4250 #endif
4251 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4252 
4253 #endif /* FEATURE_TSO */
4254 
4255 /**
4256  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
4257  *
4258  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
4259  *
4260  * Return: N/A
4261  */
4262 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4263 			  uint32_t *lo, uint32_t *hi)
4264 {
4265 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4266 		*lo = lower_32_bits(dmaaddr);
4267 		*hi = upper_32_bits(dmaaddr);
4268 	} else {
4269 		*lo = dmaaddr;
4270 		*hi = 0;
4271 	}
4272 }
4273 
4274 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4275 
4276 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4277 {
4278 	qdf_nbuf_users_inc(&skb->users);
4279 	return skb;
4280 }
4281 qdf_export_symbol(__qdf_nbuf_inc_users);
4282 
4283 int __qdf_nbuf_get_users(struct sk_buff *skb)
4284 {
4285 	return qdf_nbuf_users_read(&skb->users);
4286 }
4287 qdf_export_symbol(__qdf_nbuf_get_users);
4288 
4289 /**
4290  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
4291  * @skb: sk_buff handle
4292  *
4293  * Return: none
4294  */
4295 
4296 void __qdf_nbuf_ref(struct sk_buff *skb)
4297 {
4298 	skb_get(skb);
4299 }
4300 qdf_export_symbol(__qdf_nbuf_ref);
4301 
4302 /**
4303  * __qdf_nbuf_shared() - Check whether the buffer is shared
4304  *  @skb: sk_buff buffer
4305  *
4306  *  Return: true if more than one person has a reference to this buffer.
4307  */
4308 int __qdf_nbuf_shared(struct sk_buff *skb)
4309 {
4310 	return skb_shared(skb);
4311 }
4312 qdf_export_symbol(__qdf_nbuf_shared);
4313 
4314 /**
4315  * __qdf_nbuf_dmamap_create() - create a DMA map.
4316  * @osdev: qdf device handle
4317  * @dmap: dma map handle
4318  *
4319  * This can later be used to map networking buffers. They :
4320  * - need space in adf_drv's software descriptor
4321  * - are typically created during adf_drv_create
4322  * - need to be created before any API(qdf_nbuf_map) that uses them
4323  *
4324  * Return: QDF STATUS
4325  */
4326 QDF_STATUS
4327 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4328 {
4329 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4330 	/*
4331 	 * driver can tell its SG capablity, it must be handled.
4332 	 * Bounce buffers if they are there
4333 	 */
4334 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4335 	if (!(*dmap))
4336 		error = QDF_STATUS_E_NOMEM;
4337 
4338 	return error;
4339 }
4340 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4341 /**
4342  * __qdf_nbuf_dmamap_destroy() - delete a dma map
4343  * @osdev: qdf device handle
4344  * @dmap: dma map handle
4345  *
4346  * Return: none
4347  */
4348 void
4349 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4350 {
4351 	kfree(dmap);
4352 }
4353 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4354 
4355 /**
4356  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
4357  * @osdev: os device
4358  * @skb: skb handle
4359  * @dir: dma direction
4360  * @nbytes: number of bytes to be mapped
4361  *
4362  * Return: QDF_STATUS
4363  */
4364 #ifdef QDF_OS_DEBUG
4365 QDF_STATUS
4366 __qdf_nbuf_map_nbytes(
4367 	qdf_device_t osdev,
4368 	struct sk_buff *skb,
4369 	qdf_dma_dir_t dir,
4370 	int nbytes)
4371 {
4372 	struct skb_shared_info  *sh = skb_shinfo(skb);
4373 
4374 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4375 
4376 	/*
4377 	 * Assume there's only a single fragment.
4378 	 * To support multiple fragments, it would be necessary to change
4379 	 * adf_nbuf_t to be a separate object that stores meta-info
4380 	 * (including the bus address for each fragment) and a pointer
4381 	 * to the underlying sk_buff.
4382 	 */
4383 	qdf_assert(sh->nr_frags == 0);
4384 
4385 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4386 }
4387 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4388 #else
4389 QDF_STATUS
4390 __qdf_nbuf_map_nbytes(
4391 	qdf_device_t osdev,
4392 	struct sk_buff *skb,
4393 	qdf_dma_dir_t dir,
4394 	int nbytes)
4395 {
4396 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4397 }
4398 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4399 #endif
4400 /**
4401  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
4402  * @osdev: OS device
4403  * @skb: skb handle
4404  * @dir: direction
4405  * @nbytes: number of bytes
4406  *
4407  * Return: none
4408  */
4409 void
4410 __qdf_nbuf_unmap_nbytes(
4411 	qdf_device_t osdev,
4412 	struct sk_buff *skb,
4413 	qdf_dma_dir_t dir,
4414 	int nbytes)
4415 {
4416 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4417 
4418 	/*
4419 	 * Assume there's a single fragment.
4420 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4421 	 */
4422 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4423 }
4424 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4425 
4426 /**
4427  * __qdf_nbuf_dma_map_info() - return the dma map info
4428  * @bmap: dma map
4429  * @sg: dma map info
4430  *
4431  * Return: none
4432  */
4433 void
4434 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4435 {
4436 	qdf_assert(bmap->mapped);
4437 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4438 
4439 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4440 			sizeof(struct __qdf_segment));
4441 	sg->nsegs = bmap->nsegs;
4442 }
4443 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4444 /**
4445  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
4446  *			specified by the index
4447  * @skb: sk buff
4448  * @sg: scatter/gather list of all the frags
4449  *
4450  * Return: none
4451  */
4452 #if defined(__QDF_SUPPORT_FRAG_MEM)
4453 void
4454 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4455 {
4456 	qdf_assert(skb);
4457 	sg->sg_segs[0].vaddr = skb->data;
4458 	sg->sg_segs[0].len   = skb->len;
4459 	sg->nsegs            = 1;
4460 
4461 	for (int i = 1; i <= sh->nr_frags; i++) {
4462 		skb_frag_t    *f        = &sh->frags[i - 1];
4463 
4464 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4465 			f->page_offset);
4466 		sg->sg_segs[i].len      = f->size;
4467 
4468 		qdf_assert(i < QDF_MAX_SGLIST);
4469 	}
4470 	sg->nsegs += i;
4471 
4472 }
4473 qdf_export_symbol(__qdf_nbuf_frag_info);
4474 #else
4475 #ifdef QDF_OS_DEBUG
4476 void
4477 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4478 {
4479 
4480 	struct skb_shared_info  *sh = skb_shinfo(skb);
4481 
4482 	qdf_assert(skb);
4483 	sg->sg_segs[0].vaddr = skb->data;
4484 	sg->sg_segs[0].len   = skb->len;
4485 	sg->nsegs            = 1;
4486 
4487 	qdf_assert(sh->nr_frags == 0);
4488 }
4489 qdf_export_symbol(__qdf_nbuf_frag_info);
4490 #else
4491 void
4492 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4493 {
4494 	sg->sg_segs[0].vaddr = skb->data;
4495 	sg->sg_segs[0].len   = skb->len;
4496 	sg->nsegs            = 1;
4497 }
4498 qdf_export_symbol(__qdf_nbuf_frag_info);
4499 #endif
4500 #endif
4501 /**
4502  * __qdf_nbuf_get_frag_size() - get frag size
4503  * @nbuf: sk buffer
4504  * @cur_frag: current frag
4505  *
4506  * Return: frag size
4507  */
4508 uint32_t
4509 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4510 {
4511 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4512 	const skb_frag_t *frag = sh->frags + cur_frag;
4513 
4514 	return skb_frag_size(frag);
4515 }
4516 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4517 
4518 /**
4519  * __qdf_nbuf_frag_map() - dma map frag
4520  * @osdev: os device
4521  * @nbuf: sk buff
4522  * @offset: offset
4523  * @dir: direction
4524  * @cur_frag: current fragment
4525  *
4526  * Return: QDF status
4527  */
4528 #ifdef A_SIMOS_DEVHOST
4529 QDF_STATUS __qdf_nbuf_frag_map(
4530 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4531 	int offset, qdf_dma_dir_t dir, int cur_frag)
4532 {
4533 	int32_t paddr, frag_len;
4534 
4535 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4536 	return QDF_STATUS_SUCCESS;
4537 }
4538 qdf_export_symbol(__qdf_nbuf_frag_map);
4539 #else
4540 QDF_STATUS __qdf_nbuf_frag_map(
4541 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4542 	int offset, qdf_dma_dir_t dir, int cur_frag)
4543 {
4544 	dma_addr_t paddr, frag_len;
4545 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4546 	const skb_frag_t *frag = sh->frags + cur_frag;
4547 
4548 	frag_len = skb_frag_size(frag);
4549 
4550 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4551 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4552 					__qdf_dma_dir_to_os(dir));
4553 	return dma_mapping_error(osdev->dev, paddr) ?
4554 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4555 }
4556 qdf_export_symbol(__qdf_nbuf_frag_map);
4557 #endif
4558 /**
4559  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
4560  * @dmap: dma map
4561  * @cb: callback
4562  * @arg: argument
4563  *
4564  * Return: none
4565  */
4566 void
4567 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4568 {
4569 	return;
4570 }
4571 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4572 
4573 
4574 /**
4575  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4576  * @osdev: os device
4577  * @buf: sk buff
4578  * @dir: direction
4579  *
4580  * Return: none
4581  */
4582 #if defined(A_SIMOS_DEVHOST)
4583 static void __qdf_nbuf_sync_single_for_cpu(
4584 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4585 {
4586 	return;
4587 }
4588 #else
4589 static void __qdf_nbuf_sync_single_for_cpu(
4590 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4591 {
4592 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4593 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4594 		return;
4595 	}
4596 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4597 		skb_end_offset(buf) - skb_headroom(buf),
4598 		__qdf_dma_dir_to_os(dir));
4599 }
4600 #endif
4601 /**
4602  * __qdf_nbuf_sync_for_cpu() - nbuf sync
4603  * @osdev: os device
4604  * @skb: sk buff
4605  * @dir: direction
4606  *
4607  * Return: none
4608  */
4609 void
4610 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4611 	struct sk_buff *skb, qdf_dma_dir_t dir)
4612 {
4613 	qdf_assert(
4614 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4615 
4616 	/*
4617 	 * Assume there's a single fragment.
4618 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4619 	 */
4620 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4621 }
4622 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4623 
4624 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4625 /**
4626  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4627  * @rx_status: Pointer to rx_status.
4628  * @rtap_buf: Buf to which VHT info has to be updated.
4629  * @rtap_len: Current length of radiotap buffer
4630  *
4631  * Return: Length of radiotap after VHT flags updated.
4632  */
4633 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4634 					struct mon_rx_status *rx_status,
4635 					int8_t *rtap_buf,
4636 					uint32_t rtap_len)
4637 {
4638 	uint16_t vht_flags = 0;
4639 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4640 
4641 	rtap_len = qdf_align(rtap_len, 2);
4642 
4643 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4644 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4645 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4646 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4647 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4648 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4649 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4650 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4651 	rtap_len += 2;
4652 
4653 	rtap_buf[rtap_len] |=
4654 		(rx_status->is_stbc ?
4655 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
4656 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
4657 		(rx_status->ldpc ?
4658 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
4659 		(rx_status->beamformed ?
4660 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
4661 	rtap_len += 1;
4662 
4663 	if (!rx_user_status) {
4664 		switch (rx_status->vht_flag_values2) {
4665 		case IEEE80211_RADIOTAP_VHT_BW_20:
4666 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4667 			break;
4668 		case IEEE80211_RADIOTAP_VHT_BW_40:
4669 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4670 			break;
4671 		case IEEE80211_RADIOTAP_VHT_BW_80:
4672 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4673 			break;
4674 		case IEEE80211_RADIOTAP_VHT_BW_160:
4675 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4676 			break;
4677 		}
4678 		rtap_len += 1;
4679 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
4680 		rtap_len += 1;
4681 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
4682 		rtap_len += 1;
4683 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
4684 		rtap_len += 1;
4685 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
4686 		rtap_len += 1;
4687 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
4688 		rtap_len += 1;
4689 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
4690 		rtap_len += 1;
4691 		put_unaligned_le16(rx_status->vht_flag_values6,
4692 				   &rtap_buf[rtap_len]);
4693 		rtap_len += 2;
4694 	} else {
4695 		switch (rx_user_status->vht_flag_values2) {
4696 		case IEEE80211_RADIOTAP_VHT_BW_20:
4697 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
4698 			break;
4699 		case IEEE80211_RADIOTAP_VHT_BW_40:
4700 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
4701 			break;
4702 		case IEEE80211_RADIOTAP_VHT_BW_80:
4703 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
4704 			break;
4705 		case IEEE80211_RADIOTAP_VHT_BW_160:
4706 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
4707 			break;
4708 		}
4709 		rtap_len += 1;
4710 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
4711 		rtap_len += 1;
4712 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
4713 		rtap_len += 1;
4714 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
4715 		rtap_len += 1;
4716 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
4717 		rtap_len += 1;
4718 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
4719 		rtap_len += 1;
4720 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
4721 		rtap_len += 1;
4722 		put_unaligned_le16(rx_user_status->vht_flag_values6,
4723 				   &rtap_buf[rtap_len]);
4724 		rtap_len += 2;
4725 	}
4726 
4727 	return rtap_len;
4728 }
4729 
4730 /**
4731  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
4732  * @rx_status: Pointer to rx_status.
4733  * @rtap_buf: buffer to which radiotap has to be updated
4734  * @rtap_len: radiotap length
4735  *
4736  * API update high-efficiency (11ax) fields in the radiotap header
4737  *
4738  * Return: length of rtap_len updated.
4739  */
4740 static unsigned int
4741 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
4742 				     int8_t *rtap_buf, uint32_t rtap_len)
4743 {
4744 	/*
4745 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
4746 	 * Enable all "known" HE radiotap flags for now
4747 	 */
4748 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4749 
4750 	rtap_len = qdf_align(rtap_len, 2);
4751 
4752 	if (!rx_user_status) {
4753 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4754 		rtap_len += 2;
4755 
4756 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4757 		rtap_len += 2;
4758 
4759 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4760 		rtap_len += 2;
4761 
4762 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4763 		rtap_len += 2;
4764 
4765 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4766 		rtap_len += 2;
4767 
4768 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4769 		rtap_len += 2;
4770 		qdf_rl_debug("he data %x %x %x %x %x %x",
4771 			     rx_status->he_data1,
4772 			     rx_status->he_data2, rx_status->he_data3,
4773 			     rx_status->he_data4, rx_status->he_data5,
4774 			     rx_status->he_data6);
4775 	} else {
4776 		put_unaligned_le16(rx_user_status->he_data1,
4777 				   &rtap_buf[rtap_len]);
4778 		rtap_len += 2;
4779 
4780 		put_unaligned_le16(rx_user_status->he_data2,
4781 				   &rtap_buf[rtap_len]);
4782 		rtap_len += 2;
4783 
4784 		put_unaligned_le16(rx_user_status->he_data3,
4785 				   &rtap_buf[rtap_len]);
4786 		rtap_len += 2;
4787 
4788 		put_unaligned_le16(rx_user_status->he_data4,
4789 				   &rtap_buf[rtap_len]);
4790 		rtap_len += 2;
4791 
4792 		put_unaligned_le16(rx_user_status->he_data5,
4793 				   &rtap_buf[rtap_len]);
4794 		rtap_len += 2;
4795 
4796 		put_unaligned_le16(rx_user_status->he_data6,
4797 				   &rtap_buf[rtap_len]);
4798 		rtap_len += 2;
4799 		qdf_rl_debug("he data %x %x %x %x %x %x",
4800 			     rx_user_status->he_data1,
4801 			     rx_user_status->he_data2, rx_user_status->he_data3,
4802 			     rx_user_status->he_data4, rx_user_status->he_data5,
4803 			     rx_user_status->he_data6);
4804 	}
4805 
4806 	return rtap_len;
4807 }
4808 
4809 
4810 /**
4811  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
4812  * @rx_status: Pointer to rx_status.
4813  * @rtap_buf: buffer to which radiotap has to be updated
4814  * @rtap_len: radiotap length
4815  *
4816  * API update HE-MU fields in the radiotap header
4817  *
4818  * Return: length of rtap_len updated.
4819  */
4820 static unsigned int
4821 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
4822 				     int8_t *rtap_buf, uint32_t rtap_len)
4823 {
4824 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4825 
4826 	rtap_len = qdf_align(rtap_len, 2);
4827 
4828 	/*
4829 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
4830 	 * Enable all "known" he-mu radiotap flags for now
4831 	 */
4832 
4833 	if (!rx_user_status) {
4834 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4835 		rtap_len += 2;
4836 
4837 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4838 		rtap_len += 2;
4839 
4840 		rtap_buf[rtap_len] = rx_status->he_RU[0];
4841 		rtap_len += 1;
4842 
4843 		rtap_buf[rtap_len] = rx_status->he_RU[1];
4844 		rtap_len += 1;
4845 
4846 		rtap_buf[rtap_len] = rx_status->he_RU[2];
4847 		rtap_len += 1;
4848 
4849 		rtap_buf[rtap_len] = rx_status->he_RU[3];
4850 		rtap_len += 1;
4851 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4852 			  rx_status->he_flags1,
4853 			  rx_status->he_flags2, rx_status->he_RU[0],
4854 			  rx_status->he_RU[1], rx_status->he_RU[2],
4855 			  rx_status->he_RU[3]);
4856 	} else {
4857 		put_unaligned_le16(rx_user_status->he_flags1,
4858 				   &rtap_buf[rtap_len]);
4859 		rtap_len += 2;
4860 
4861 		put_unaligned_le16(rx_user_status->he_flags2,
4862 				   &rtap_buf[rtap_len]);
4863 		rtap_len += 2;
4864 
4865 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
4866 		rtap_len += 1;
4867 
4868 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
4869 		rtap_len += 1;
4870 
4871 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
4872 		rtap_len += 1;
4873 
4874 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
4875 		rtap_len += 1;
4876 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
4877 			  rx_user_status->he_flags1,
4878 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
4879 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
4880 			  rx_user_status->he_RU[3]);
4881 	}
4882 
4883 	return rtap_len;
4884 }
4885 
4886 /**
4887  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
4888  * @rx_status: Pointer to rx_status.
4889  * @rtap_buf: buffer to which radiotap has to be updated
4890  * @rtap_len: radiotap length
4891  *
4892  * API update he-mu-other fields in the radiotap header
4893  *
4894  * Return: length of rtap_len updated.
4895  */
4896 static unsigned int
4897 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
4898 				     int8_t *rtap_buf, uint32_t rtap_len)
4899 {
4900 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4901 
4902 	rtap_len = qdf_align(rtap_len, 2);
4903 
4904 	/*
4905 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
4906 	 * Enable all "known" he-mu-other radiotap flags for now
4907 	 */
4908 	if (!rx_user_status) {
4909 		put_unaligned_le16(rx_status->he_per_user_1,
4910 				   &rtap_buf[rtap_len]);
4911 		rtap_len += 2;
4912 
4913 		put_unaligned_le16(rx_status->he_per_user_2,
4914 				   &rtap_buf[rtap_len]);
4915 		rtap_len += 2;
4916 
4917 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
4918 		rtap_len += 1;
4919 
4920 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
4921 		rtap_len += 1;
4922 		qdf_debug("he_per_user %x %x pos %x knwn %x",
4923 			  rx_status->he_per_user_1,
4924 			  rx_status->he_per_user_2,
4925 			  rx_status->he_per_user_position,
4926 			  rx_status->he_per_user_known);
4927 	} else {
4928 		put_unaligned_le16(rx_user_status->he_per_user_1,
4929 				   &rtap_buf[rtap_len]);
4930 		rtap_len += 2;
4931 
4932 		put_unaligned_le16(rx_user_status->he_per_user_2,
4933 				   &rtap_buf[rtap_len]);
4934 		rtap_len += 2;
4935 
4936 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
4937 		rtap_len += 1;
4938 
4939 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
4940 		rtap_len += 1;
4941 		qdf_debug("he_per_user %x %x pos %x knwn %x",
4942 			  rx_user_status->he_per_user_1,
4943 			  rx_user_status->he_per_user_2,
4944 			  rx_user_status->he_per_user_position,
4945 			  rx_user_status->he_per_user_known);
4946 	}
4947 
4948 	return rtap_len;
4949 }
4950 
4951 /**
4952  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
4953  *						from rx_status
4954  * @rx_status: Pointer to rx_status.
4955  * @rtap_buf: buffer to which radiotap has to be updated
4956  * @rtap_len: radiotap length
4957  *
4958  * API update Extra High Throughput (11be) fields in the radiotap header
4959  *
4960  * Return: length of rtap_len updated.
4961  */
4962 static unsigned int
4963 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
4964 				    int8_t *rtap_buf, uint32_t rtap_len)
4965 {
4966 	/*
4967 	 * IEEE80211_RADIOTAP_USIG:
4968 	 *		u32, u32, u32
4969 	 */
4970 	rtap_len = qdf_align(rtap_len, 4);
4971 
4972 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
4973 	rtap_len += 4;
4974 
4975 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
4976 	rtap_len += 4;
4977 
4978 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
4979 	rtap_len += 4;
4980 
4981 	qdf_rl_debug("U-SIG data %x %x %x",
4982 		     rx_status->usig_common, rx_status->usig_value,
4983 		     rx_status->usig_mask);
4984 
4985 	return rtap_len;
4986 }
4987 
4988 /**
4989  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
4990  *					from rx_status
4991  * @rx_status: Pointer to rx_status.
4992  * @rtap_buf: buffer to which radiotap has to be updated
4993  * @rtap_len: radiotap length
4994  *
4995  * API update Extra High Throughput (11be) fields in the radiotap header
4996  *
4997  * Return: length of rtap_len updated.
4998  */
4999 static unsigned int
5000 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5001 				   int8_t *rtap_buf, uint32_t rtap_len)
5002 {
5003 	uint32_t user;
5004 
5005 	/*
5006 	 * IEEE80211_RADIOTAP_EHT:
5007 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5008 	 */
5009 	rtap_len = qdf_align(rtap_len, 4);
5010 
5011 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5012 	rtap_len += 4;
5013 
5014 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5015 	rtap_len += 4;
5016 
5017 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5018 	rtap_len += 4;
5019 
5020 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5021 	rtap_len += 4;
5022 
5023 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5024 	rtap_len += 4;
5025 
5026 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5027 	rtap_len += 4;
5028 
5029 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5030 	rtap_len += 4;
5031 
5032 	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
5033 		put_unaligned_le32(rx_status->eht_user_info[user],
5034 				   &rtap_buf[rtap_len]);
5035 		rtap_len += 4;
5036 	}
5037 
5038 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5039 		     rx_status->eht_known, rx_status->eht_data[0],
5040 		     rx_status->eht_data[1], rx_status->eht_data[2],
5041 		     rx_status->eht_data[3], rx_status->eht_data[4],
5042 		     rx_status->eht_data[5]);
5043 
5044 	return rtap_len;
5045 }
5046 
5047 #define IEEE80211_RADIOTAP_TX_STATUS 0
5048 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5049 #define IEEE80211_RADIOTAP_EXTENSION2 2
5050 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5051 
5052 /**
5053  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5054  * @rx_status: Pointer to rx_status.
5055  * @rtap_buf: Buf to which AMPDU info has to be updated.
5056  * @rtap_len: Current length of radiotap buffer
5057  *
5058  * Return: Length of radiotap after AMPDU flags updated.
5059  */
5060 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5061 					struct mon_rx_status *rx_status,
5062 					uint8_t *rtap_buf,
5063 					uint32_t rtap_len)
5064 {
5065 	/*
5066 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5067 	 * First 32 bits of AMPDU represents the reference number
5068 	 */
5069 
5070 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5071 	uint16_t ampdu_flags = 0;
5072 	uint16_t ampdu_reserved_flags = 0;
5073 
5074 	rtap_len = qdf_align(rtap_len, 4);
5075 
5076 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5077 	rtap_len += 4;
5078 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5079 	rtap_len += 2;
5080 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5081 	rtap_len += 2;
5082 
5083 	return rtap_len;
5084 }
5085 
5086 #ifdef DP_MON_RSSI_IN_DBM
5087 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5088 (rx_status->rssi_comb)
5089 #else
5090 #ifdef QCA_RSSI_DB2DBM
5091 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5092 (((rx_status)->rssi_dbm_conv_support) ? \
5093 ((rx_status)->rssi_comb + (rx_status)->min_nf_dbm +\
5094 (rx_status)->rssi_temp_offset) : \
5095 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5096 #else
5097 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5098 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5099 #endif
5100 #endif
5101 
5102 /**
5103  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5104  * @rx_status: Pointer to rx_status.
5105  * @rtap_buf: Buf to which tx info has to be updated.
5106  * @rtap_len: Current length of radiotap buffer
5107  *
5108  * Return: Length of radiotap after tx flags updated.
5109  */
5110 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5111 						struct mon_rx_status *rx_status,
5112 						uint8_t *rtap_buf,
5113 						uint32_t rtap_len)
5114 {
5115 	/*
5116 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5117 	 */
5118 
5119 	uint16_t tx_flags = 0;
5120 
5121 	rtap_len = qdf_align(rtap_len, 2);
5122 
5123 	switch (rx_status->tx_status) {
5124 	case RADIOTAP_TX_STATUS_FAIL:
5125 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5126 		break;
5127 	case RADIOTAP_TX_STATUS_NOACK:
5128 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5129 		break;
5130 	}
5131 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5132 	rtap_len += 2;
5133 
5134 	return rtap_len;
5135 }
5136 
5137 /**
5138  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
5139  * @rx_status: Pointer to rx_status.
5140  * @nbuf:      nbuf pointer to which radiotap has to be updated
5141  * @headroom_sz: Available headroom size.
5142  *
5143  * Return: length of rtap_len updated.
5144  */
5145 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5146 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5147 {
5148 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5149 	struct ieee80211_radiotap_header *rthdr =
5150 		(struct ieee80211_radiotap_header *)rtap_buf;
5151 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5152 	uint32_t rtap_len = rtap_hdr_len;
5153 	uint8_t length = rtap_len;
5154 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5155 	struct qdf_radiotap_ext2 *rtap_ext2;
5156 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5157 
5158 	/* per user info */
5159 	qdf_le32_t *it_present;
5160 	uint32_t it_present_val;
5161 	bool radiotap_ext1_hdr_present = false;
5162 
5163 	it_present = &rthdr->it_present;
5164 
5165 	/* Adding Extended Header space */
5166 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5167 	    rx_status->usig_flags || rx_status->eht_flags) {
5168 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5169 		rtap_len = rtap_hdr_len;
5170 		radiotap_ext1_hdr_present = true;
5171 	}
5172 
5173 	length = rtap_len;
5174 
5175 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5176 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5177 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5178 	rtap_len += 8;
5179 
5180 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5181 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5182 
5183 	if (rx_status->rs_fcs_err)
5184 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5185 
5186 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5187 	rtap_len += 1;
5188 
5189 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5190 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5191 	    !rx_status->he_flags) {
5192 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5193 		rtap_buf[rtap_len] = rx_status->rate;
5194 	} else
5195 		rtap_buf[rtap_len] = 0;
5196 	rtap_len += 1;
5197 
5198 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5199 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5200 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5201 	rtap_len += 2;
5202 	/* Channel flags. */
5203 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5204 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5205 	else
5206 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5207 	if (rx_status->cck_flag)
5208 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5209 	if (rx_status->ofdm_flag)
5210 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5211 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5212 	rtap_len += 2;
5213 
5214 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5215 	 *					(dBm)
5216 	 */
5217 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5218 	/*
5219 	 * rssi_comb is int dB, need to convert it to dBm.
5220 	 * normalize value to noise floor of -96 dBm
5221 	 */
5222 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5223 	rtap_len += 1;
5224 
5225 	/* RX signal noise floor */
5226 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5227 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5228 	rtap_len += 1;
5229 
5230 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5231 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5232 	rtap_buf[rtap_len] = rx_status->nr_ant;
5233 	rtap_len += 1;
5234 
5235 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5236 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5237 		return 0;
5238 	}
5239 
5240 	/* update tx flags for pkt capture*/
5241 	if (rx_status->add_rtap_ext) {
5242 		rthdr->it_present |=
5243 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5244 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5245 							     rtap_buf,
5246 							     rtap_len);
5247 
5248 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5249 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5250 			return 0;
5251 		}
5252 	}
5253 
5254 	if (rx_status->ht_flags) {
5255 		length = rtap_len;
5256 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5257 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5258 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5259 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5260 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5261 		rtap_len += 1;
5262 
5263 		if (rx_status->sgi)
5264 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5265 		if (rx_status->bw)
5266 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5267 		else
5268 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5269 		rtap_len += 1;
5270 
5271 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5272 		rtap_len += 1;
5273 
5274 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5275 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5276 			return 0;
5277 		}
5278 	}
5279 
5280 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5281 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5282 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5283 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5284 								rtap_buf,
5285 								rtap_len);
5286 	}
5287 
5288 	if (rx_status->vht_flags) {
5289 		length = rtap_len;
5290 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5291 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5292 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5293 								rtap_buf,
5294 								rtap_len);
5295 
5296 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5297 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5298 			return 0;
5299 		}
5300 	}
5301 
5302 	if (rx_status->he_flags) {
5303 		length = rtap_len;
5304 		/* IEEE80211_RADIOTAP_HE */
5305 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5306 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5307 								rtap_buf,
5308 								rtap_len);
5309 
5310 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5311 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5312 			return 0;
5313 		}
5314 	}
5315 
5316 	if (rx_status->he_mu_flags) {
5317 		length = rtap_len;
5318 		/* IEEE80211_RADIOTAP_HE-MU */
5319 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5320 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5321 								rtap_buf,
5322 								rtap_len);
5323 
5324 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5325 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5326 			return 0;
5327 		}
5328 	}
5329 
5330 	if (rx_status->he_mu_other_flags) {
5331 		length = rtap_len;
5332 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5333 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5334 		rtap_len =
5335 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5336 								rtap_buf,
5337 								rtap_len);
5338 
5339 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5340 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5341 			return 0;
5342 		}
5343 	}
5344 
5345 	rtap_len = qdf_align(rtap_len, 2);
5346 	/*
5347 	 * Radiotap Vendor Namespace
5348 	 */
5349 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5350 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5351 					(rtap_buf + rtap_len);
5352 	/*
5353 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5354 	 */
5355 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5356 	/*
5357 	 * Name space selector = 0
5358 	 * We only will have one namespace for now
5359 	 */
5360 	radiotap_vendor_ns_ath->hdr.selector = 0;
5361 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5362 					sizeof(*radiotap_vendor_ns_ath) -
5363 					sizeof(radiotap_vendor_ns_ath->hdr));
5364 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5365 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5366 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5367 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5368 				cpu_to_le32(rx_status->ppdu_timestamp);
5369 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5370 
5371 	/* Move to next it_present */
5372 	if (radiotap_ext1_hdr_present) {
5373 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5374 		put_unaligned_le32(it_present_val, it_present);
5375 		it_present_val = 0;
5376 		it_present++;
5377 	}
5378 
5379 	/* Add Extension to Radiotap Header & corresponding data */
5380 	if (rx_status->add_rtap_ext) {
5381 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5382 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5383 
5384 		rtap_buf[rtap_len] = rx_status->tx_status;
5385 		rtap_len += 1;
5386 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5387 		rtap_len += 1;
5388 	}
5389 
5390 	/* Add Extension2 to Radiotap Header */
5391 	if (rx_status->add_rtap_ext2) {
5392 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5393 
5394 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5395 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5396 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5397 		if (!rx_user_status) {
5398 			rtap_ext2->tid = rx_status->tid;
5399 			rtap_ext2->start_seq = rx_status->start_seq;
5400 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5401 				     rx_status->ba_bitmap,
5402 				     8 * (sizeof(uint32_t)));
5403 		} else {
5404 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5405 
5406 			/* set default bitmap sz if not set */
5407 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5408 			rtap_ext2->tid = rx_user_status->tid;
5409 			rtap_ext2->start_seq = rx_user_status->start_seq;
5410 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5411 				     rx_user_status->ba_bitmap,
5412 				     ba_bitmap_sz * (sizeof(uint32_t)));
5413 		}
5414 
5415 		rtap_len += sizeof(*rtap_ext2);
5416 	}
5417 
5418 	if (rx_status->usig_flags) {
5419 		length = rtap_len;
5420 		/* IEEE80211_RADIOTAP_USIG */
5421 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5422 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5423 							       rtap_buf,
5424 							       rtap_len);
5425 
5426 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5427 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5428 			return 0;
5429 		}
5430 	}
5431 
5432 	if (rx_status->eht_flags) {
5433 		length = rtap_len;
5434 		/* IEEE80211_RADIOTAP_EHT */
5435 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5436 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5437 							      rtap_buf,
5438 							      rtap_len);
5439 
5440 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5441 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5442 			return 0;
5443 		}
5444 	}
5445 
5446 	put_unaligned_le32(it_present_val, it_present);
5447 	rthdr->it_len = cpu_to_le16(rtap_len);
5448 
5449 	if (headroom_sz < rtap_len) {
5450 		qdf_debug("DEBUG: Not enough space to update radiotap");
5451 		return 0;
5452 	}
5453 
5454 	qdf_nbuf_push_head(nbuf, rtap_len);
5455 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5456 	return rtap_len;
5457 }
5458 #else
5459 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5460 					struct mon_rx_status *rx_status,
5461 					int8_t *rtap_buf,
5462 					uint32_t rtap_len)
5463 {
5464 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5465 	return 0;
5466 }
5467 
5468 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5469 				      int8_t *rtap_buf, uint32_t rtap_len)
5470 {
5471 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5472 	return 0;
5473 }
5474 
5475 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5476 					struct mon_rx_status *rx_status,
5477 					uint8_t *rtap_buf,
5478 					uint32_t rtap_len)
5479 {
5480 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5481 	return 0;
5482 }
5483 
5484 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5485 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5486 {
5487 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5488 	return 0;
5489 }
5490 #endif
5491 qdf_export_symbol(qdf_nbuf_update_radiotap);
5492 
5493 /**
5494  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
5495  * @cb_func_ptr: function pointer to the nbuf free callback
5496  *
5497  * This function registers a callback function for nbuf free.
5498  *
5499  * Return: none
5500  */
5501 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5502 {
5503 	nbuf_free_cb = cb_func_ptr;
5504 }
5505 
5506 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5507 
5508 /**
5509  * qdf_nbuf_classify_pkt() - classify packet
5510  * @skb - sk buff
5511  *
5512  * Return: none
5513  */
5514 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5515 {
5516 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5517 
5518 	/* check destination mac address is broadcast/multicast */
5519 	if (is_broadcast_ether_addr((uint8_t *)eh))
5520 		QDF_NBUF_CB_SET_BCAST(skb);
5521 	else if (is_multicast_ether_addr((uint8_t *)eh))
5522 		QDF_NBUF_CB_SET_MCAST(skb);
5523 
5524 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5525 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5526 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5527 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5528 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5529 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5530 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5531 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5532 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5533 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5534 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5535 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5536 }
5537 qdf_export_symbol(qdf_nbuf_classify_pkt);
5538 
5539 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5540 {
5541 	qdf_nbuf_users_set(&nbuf->users, 1);
5542 	nbuf->data = nbuf->head + NET_SKB_PAD;
5543 	skb_reset_tail_pointer(nbuf);
5544 }
5545 qdf_export_symbol(__qdf_nbuf_init);
5546 
5547 #ifdef WLAN_FEATURE_FASTPATH
5548 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5549 {
5550 	qdf_nbuf_users_set(&nbuf->users, 1);
5551 	nbuf->data = nbuf->head + NET_SKB_PAD;
5552 	skb_reset_tail_pointer(nbuf);
5553 }
5554 qdf_export_symbol(qdf_nbuf_init_fast);
5555 #endif /* WLAN_FEATURE_FASTPATH */
5556 
5557 
5558 #ifdef QDF_NBUF_GLOBAL_COUNT
5559 /**
5560  * __qdf_nbuf_mod_init() - Intialization routine for qdf_nuf
5561  *
5562  * Return void
5563  */
5564 void __qdf_nbuf_mod_init(void)
5565 {
5566 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5567 	qdf_atomic_init(&nbuf_count);
5568 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5569 }
5570 
5571 /**
5572  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
5573  *
5574  * Return void
5575  */
5576 void __qdf_nbuf_mod_exit(void)
5577 {
5578 }
5579 #endif
5580 
5581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
5582 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5583 					    int offset)
5584 {
5585 	unsigned int frag_offset;
5586 	skb_frag_t *frag;
5587 
5588 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5589 		return QDF_STATUS_E_FAILURE;
5590 
5591 	frag = &skb_shinfo(nbuf)->frags[idx];
5592 	frag_offset = skb_frag_off(frag);
5593 
5594 	frag_offset += offset;
5595 	skb_frag_off_set(frag, frag_offset);
5596 
5597 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5598 
5599 	return QDF_STATUS_SUCCESS;
5600 }
5601 
5602 #else
5603 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5604 					    int offset)
5605 {
5606 	uint16_t frag_offset;
5607 	skb_frag_t *frag;
5608 
5609 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5610 		return QDF_STATUS_E_FAILURE;
5611 
5612 	frag = &skb_shinfo(nbuf)->frags[idx];
5613 	frag_offset = frag->page_offset;
5614 
5615 	frag_offset += offset;
5616 	frag->page_offset = frag_offset;
5617 
5618 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5619 
5620 	return QDF_STATUS_SUCCESS;
5621 }
5622 #endif
5623 
5624 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5625 
5626 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5627 			    uint16_t idx,
5628 			    uint16_t truesize)
5629 {
5630 	struct page *page;
5631 	uint16_t frag_len;
5632 
5633 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5634 
5635 	if (qdf_unlikely(!page))
5636 		return;
5637 
5638 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5639 	put_page(page);
5640 	nbuf->len -= frag_len;
5641 	nbuf->data_len -= frag_len;
5642 	nbuf->truesize -= truesize;
5643 	skb_shinfo(nbuf)->nr_frags--;
5644 }
5645 
5646 qdf_export_symbol(__qdf_nbuf_remove_frag);
5647 
5648 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5649 			    int offset, int frag_len,
5650 			    unsigned int truesize, bool take_frag_ref)
5651 {
5652 	struct page *page;
5653 	int frag_offset;
5654 	uint8_t nr_frag;
5655 
5656 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5657 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5658 
5659 	page = virt_to_head_page(buf);
5660 	frag_offset = buf - page_address(page);
5661 
5662 	skb_add_rx_frag(nbuf, nr_frag, page,
5663 			(frag_offset + offset),
5664 			frag_len, truesize);
5665 
5666 	if (unlikely(take_frag_ref)) {
5667 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5668 		skb_frag_ref(nbuf, nr_frag);
5669 	}
5670 }
5671 
5672 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5673 
5674 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
5675 {
5676 	struct page *page;
5677 	skb_frag_t frag = {0};
5678 
5679 	page = virt_to_head_page(buf);
5680 	__skb_frag_set_page(&frag, page);
5681 
5682 	/*
5683 	 * since __skb_frag_ref() just use page to increase ref
5684 	 * we just decode page alone
5685 	 */
5686 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5687 	__skb_frag_ref(&frag);
5688 }
5689 
5690 qdf_export_symbol(__qdf_nbuf_ref_frag);
5691 
5692 #ifdef NBUF_FRAG_MEMORY_DEBUG
5693 
5694 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5695 						int offset, const char *func,
5696 						uint32_t line)
5697 {
5698 	QDF_STATUS result;
5699 	qdf_frag_t p_fragp, n_fragp;
5700 
5701 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5702 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5703 
5704 	if (qdf_likely(is_initial_mem_debug_disabled))
5705 		return result;
5706 
5707 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5708 
5709 	/*
5710 	 * Update frag address in frag debug tracker
5711 	 * when frag offset is successfully changed in skb
5712 	 */
5713 	if (result == QDF_STATUS_SUCCESS)
5714 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
5715 
5716 	return result;
5717 }
5718 
5719 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
5720 
5721 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
5722 				int offset, int frag_len,
5723 				unsigned int truesize, bool take_frag_ref,
5724 				const char *func, uint32_t line)
5725 {
5726 	qdf_frag_t fragp;
5727 	uint32_t num_nr_frags;
5728 
5729 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
5730 			       frag_len, truesize, take_frag_ref);
5731 
5732 	if (qdf_likely(is_initial_mem_debug_disabled))
5733 		return;
5734 
5735 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
5736 
5737 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5738 
5739 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
5740 
5741 	/* Update frag address in frag debug tracking table */
5742 	if (fragp != buf)
5743 		qdf_frag_debug_update_addr(buf, fragp, func, line);
5744 
5745 	/* Update frag refcount in frag debug tracking table */
5746 	qdf_frag_debug_refcount_inc(fragp, func, line);
5747 }
5748 
5749 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
5750 
5751 /**
5752  * qdf_nbuf_ref_frag_debug() - get frag reference
5753  * @buf: Frag pointer needs to be taken reference.
5754  *
5755  * return: void
5756  */
5757 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
5758 {
5759 	__qdf_nbuf_ref_frag(buf);
5760 
5761 	if (qdf_likely(is_initial_mem_debug_disabled))
5762 		return;
5763 
5764 	/* Update frag refcount in frag debug tracking table */
5765 	qdf_frag_debug_refcount_inc(buf, func, line);
5766 }
5767 
5768 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
5769 
5770 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
5771 				    uint32_t line)
5772 {
5773 	uint32_t num_nr_frags;
5774 	uint32_t idx = 0;
5775 	qdf_nbuf_t ext_list;
5776 	qdf_frag_t p_frag;
5777 
5778 	if (qdf_likely(is_initial_mem_debug_disabled))
5779 		return;
5780 
5781 	if (qdf_unlikely(!buf))
5782 		return;
5783 
5784 	/* Take care to update the refcount in the debug entries for frags */
5785 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5786 
5787 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5788 
5789 	while (idx < num_nr_frags) {
5790 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5791 		if (qdf_likely(p_frag))
5792 			qdf_frag_debug_refcount_inc(p_frag, func, line);
5793 		idx++;
5794 	}
5795 
5796 	/**
5797 	 * Take care to update the refcount in the debug entries for the
5798 	 * frags attached to frag_list
5799 	 */
5800 	ext_list = qdf_nbuf_get_ext_list(buf);
5801 	while (ext_list) {
5802 		idx = 0;
5803 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5804 
5805 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5806 
5807 		while (idx < num_nr_frags) {
5808 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5809 			if (qdf_likely(p_frag))
5810 				qdf_frag_debug_refcount_inc(p_frag, func, line);
5811 			idx++;
5812 		}
5813 		ext_list = qdf_nbuf_queue_next(ext_list);
5814 	}
5815 }
5816 
5817 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
5818 
5819 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
5820 				    uint32_t line)
5821 {
5822 	uint32_t num_nr_frags;
5823 	qdf_nbuf_t ext_list;
5824 	uint32_t idx = 0;
5825 	qdf_frag_t p_frag;
5826 
5827 	if (qdf_likely(is_initial_mem_debug_disabled))
5828 		return;
5829 
5830 	if (qdf_unlikely(!buf))
5831 		return;
5832 
5833 	/**
5834 	 * Decrement refcount for frag debug nodes only when last user
5835 	 * of nbuf calls this API so as to avoid decrementing refcount
5836 	 * on every call expect the last one in case where nbuf has multiple
5837 	 * users
5838 	 */
5839 	if (qdf_nbuf_get_users(buf) > 1)
5840 		return;
5841 
5842 	/* Take care to update the refcount in the debug entries for frags */
5843 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
5844 
5845 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5846 
5847 	while (idx < num_nr_frags) {
5848 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
5849 		if (qdf_likely(p_frag))
5850 			qdf_frag_debug_refcount_dec(p_frag, func, line);
5851 		idx++;
5852 	}
5853 
5854 	/* Take care to update debug entries for frags attached to frag_list */
5855 	ext_list = qdf_nbuf_get_ext_list(buf);
5856 	while (ext_list) {
5857 		if (qdf_nbuf_get_users(ext_list) == 1) {
5858 			idx = 0;
5859 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
5860 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
5861 			while (idx < num_nr_frags) {
5862 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
5863 				if (qdf_likely(p_frag))
5864 					qdf_frag_debug_refcount_dec(p_frag,
5865 								    func, line);
5866 				idx++;
5867 			}
5868 		}
5869 		ext_list = qdf_nbuf_queue_next(ext_list);
5870 	}
5871 }
5872 
5873 qdf_export_symbol(qdf_net_buf_debug_release_frag);
5874 
5875 /**
5876  * qdf_nbuf_remove_frag_debug - Remove frag from nbuf
5877  * @nbuf: nbuf  where frag will be removed
5878  * @idx: frag index
5879  * @truesize: truesize of frag
5880  * @func: Caller function name
5881  * @line:  Caller function line no.
5882  *
5883  * Return: QDF_STATUS
5884  *
5885  */
5886 QDF_STATUS
5887 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
5888 			   uint16_t idx,
5889 			   uint16_t truesize,
5890 			   const char *func,
5891 			   uint32_t line)
5892 {
5893 	uint16_t num_frags;
5894 	qdf_frag_t frag;
5895 
5896 	if (qdf_unlikely(!nbuf))
5897 		return QDF_STATUS_E_INVAL;
5898 
5899 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
5900 	if (idx >= num_frags)
5901 		return QDF_STATUS_E_INVAL;
5902 
5903 	if (qdf_likely(is_initial_mem_debug_disabled)) {
5904 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
5905 		return QDF_STATUS_SUCCESS;
5906 	}
5907 
5908 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
5909 	if (qdf_likely(frag))
5910 		qdf_frag_debug_refcount_dec(frag, func, line);
5911 
5912 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
5913 
5914 	return QDF_STATUS_SUCCESS;
5915 }
5916 
5917 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
5918 
5919 #endif /* NBUF_FRAG_MEMORY_DEBUG */
5920 
5921 /**
5922  * qdf_get_nbuf_valid_frag() - Get nbuf to store frag
5923  * @nbuf: qdf_nbuf_t master nbuf
5924  *
5925  * Return: qdf_nbuf_t
5926  */
5927 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
5928 {
5929 	qdf_nbuf_t last_nbuf;
5930 	uint32_t num_frags;
5931 
5932 	if (qdf_unlikely(!nbuf))
5933 		return NULL;
5934 
5935 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
5936 
5937 	/* Check nbuf has enough memory to store frag memory */
5938 	if (num_frags < QDF_NBUF_MAX_FRAGS)
5939 		return nbuf;
5940 
5941 	if (!__qdf_nbuf_has_fraglist(nbuf))
5942 		return NULL;
5943 
5944 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
5945 	if (qdf_unlikely(!last_nbuf))
5946 		return NULL;
5947 
5948 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
5949 	if (num_frags < QDF_NBUF_MAX_FRAGS)
5950 		return last_nbuf;
5951 
5952 	return NULL;
5953 }
5954 
5955 qdf_export_symbol(qdf_get_nbuf_valid_frag);
5956 
5957 /**
5958  * qdf_nbuf_add_frag_debug() - Add frag to nbuf
5959  * @osdev: Device handle
5960  * @buf: Frag pointer needs to be added in nbuf frag
5961  * @nbuf: qdf_nbuf_t where frag will be added
5962  * @offset: Offset in frag to be added to nbuf_frags
5963  * @frag_len: Frag length
5964  * @truesize: truesize
5965  * @take_frag_ref: Whether to take ref for frag or not
5966  *      This bool must be set as per below comdition:
5967  *      1. False: If this frag is being added in any nbuf
5968  *              for the first time after allocation
5969  *      2. True: If frag is already attached part of any
5970  *              nbuf
5971  * @minsize: Minimum size to allocate
5972  * @func: Caller function name
5973  * @line: Caller function line no.
5974  *
5975  * if number of frag exceed maximum frag array. A new nbuf is allocated
5976  * with minimum headroom and frag it added to that nbuf.
5977  * new nbuf is added as frag_list to the master nbuf.
5978  *
5979  * Return: QDF_STATUS
5980  */
5981 QDF_STATUS
5982 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
5983 			qdf_nbuf_t nbuf, int offset,
5984 			int frag_len, unsigned int truesize,
5985 			bool take_frag_ref, unsigned int minsize,
5986 			const char *func, uint32_t line)
5987 {
5988 	qdf_nbuf_t cur_nbuf;
5989 	qdf_nbuf_t this_nbuf;
5990 
5991 	cur_nbuf = nbuf;
5992 	this_nbuf = nbuf;
5993 
5994 	if (qdf_unlikely(!frag_len || !buf)) {
5995 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
5996 			     func, line,
5997 			     buf, frag_len);
5998 		return QDF_STATUS_E_INVAL;
5999 	}
6000 
6001 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6002 
6003 	if (this_nbuf) {
6004 		cur_nbuf = this_nbuf;
6005 	} else {
6006 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6007 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6008 		if (qdf_unlikely(!this_nbuf)) {
6009 			qdf_nofl_err("%s : %d no memory to allocate\n",
6010 				     func, line);
6011 			return QDF_STATUS_E_NOMEM;
6012 		}
6013 	}
6014 
6015 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6016 			     take_frag_ref);
6017 
6018 	if (this_nbuf != cur_nbuf) {
6019 		/* add new skb to frag list */
6020 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6021 					 qdf_nbuf_len(this_nbuf));
6022 	}
6023 
6024 	return QDF_STATUS_SUCCESS;
6025 }
6026 
6027 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6028 
6029 #ifdef MEMORY_DEBUG
6030 void qdf_nbuf_acquire_track_lock(uint32_t index,
6031 				 unsigned long irq_flag)
6032 {
6033 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6034 			  irq_flag);
6035 }
6036 
6037 void qdf_nbuf_release_track_lock(uint32_t index,
6038 				 unsigned long irq_flag)
6039 {
6040 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6041 			       irq_flag);
6042 }
6043 
6044 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6045 {
6046 	return gp_qdf_net_buf_track_tbl[index];
6047 }
6048 #endif /* MEMORY_DEBUG */
6049 
6050 #ifdef ENHANCED_OS_ABSTRACTION
6051 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6052 {
6053 	__qdf_nbuf_set_timestamp(buf);
6054 }
6055 
6056 qdf_export_symbol(qdf_nbuf_set_timestamp);
6057 
6058 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6059 {
6060 	return __qdf_nbuf_get_timestamp(buf);
6061 }
6062 
6063 qdf_export_symbol(qdf_nbuf_get_timestamp);
6064 
6065 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6066 {
6067 	return __qdf_nbuf_get_timedelta_us(buf);
6068 }
6069 
6070 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6071 
6072 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6073 {
6074 	return __qdf_nbuf_get_timedelta_ms(buf);
6075 }
6076 
6077 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6078 
6079 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6080 {
6081 	return __qdf_nbuf_net_timedelta(t);
6082 }
6083 
6084 qdf_export_symbol(qdf_nbuf_net_timedelta);
6085 #endif
6086