xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision b80337cee1b59d5fa6f823a38d377ce0bc0a3ab3)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <linux/inetdevice.h>
31 #include <qdf_atomic.h>
32 #include <qdf_debugfs.h>
33 #include <qdf_lock.h>
34 #include <qdf_mem.h>
35 #include <qdf_module.h>
36 #include <qdf_nbuf.h>
37 #include <qdf_status.h>
38 #include "qdf_str.h"
39 #include <qdf_trace.h>
40 #include "qdf_tracker.h"
41 #include <qdf_types.h>
42 #include <net/ieee80211_radiotap.h>
43 #include <pld_common.h>
44 #include <qdf_crypto.h>
45 #include <linux/igmp.h>
46 #include <net/mld.h>
47 
48 #if defined(FEATURE_TSO)
49 #include <net/ipv6.h>
50 #include <linux/ipv6.h>
51 #include <linux/tcp.h>
52 #include <linux/if_vlan.h>
53 #include <linux/ip.h>
54 #endif /* FEATURE_TSO */
55 
56 #ifdef IPA_OFFLOAD
57 #include <i_qdf_ipa_wdi3.h>
58 #endif /* IPA_OFFLOAD */
59 #include "qdf_ssr_driver_dump.h"
60 
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
62 
63 #define qdf_nbuf_users_inc atomic_inc
64 #define qdf_nbuf_users_dec atomic_dec
65 #define qdf_nbuf_users_set atomic_set
66 #define qdf_nbuf_users_read atomic_read
67 #else
68 #define qdf_nbuf_users_inc refcount_inc
69 #define qdf_nbuf_users_dec refcount_dec
70 #define qdf_nbuf_users_set refcount_set
71 #define qdf_nbuf_users_read refcount_read
72 #endif /* KERNEL_VERSION(4, 13, 0) */
73 
74 #define IEEE80211_RADIOTAP_VHT_BW_20	0
75 #define IEEE80211_RADIOTAP_VHT_BW_40	1
76 #define IEEE80211_RADIOTAP_VHT_BW_80	2
77 #define IEEE80211_RADIOTAP_VHT_BW_160	3
78 
79 #define RADIOTAP_VHT_BW_20	0
80 #define RADIOTAP_VHT_BW_40	1
81 #define RADIOTAP_VHT_BW_80	4
82 #define RADIOTAP_VHT_BW_160	11
83 
84 /* tx status */
85 #define RADIOTAP_TX_STATUS_FAIL		1
86 #define RADIOTAP_TX_STATUS_NOACK	2
87 
88 /* channel number to freq conversion */
89 #define CHANNEL_NUM_14 14
90 #define CHANNEL_NUM_15 15
91 #define CHANNEL_NUM_27 27
92 #define CHANNEL_NUM_35 35
93 #define CHANNEL_NUM_182 182
94 #define CHANNEL_NUM_197 197
95 #define CHANNEL_FREQ_2484 2484
96 #define CHANNEL_FREQ_2407 2407
97 #define CHANNEL_FREQ_2512 2512
98 #define CHANNEL_FREQ_5000 5000
99 #define CHANNEL_FREQ_4000 4000
100 #define CHANNEL_FREQ_5150 5150
101 #define FREQ_MULTIPLIER_CONST_5MHZ 5
102 #define FREQ_MULTIPLIER_CONST_20MHZ 20
103 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
104 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
105 #define RADIOTAP_CCK_CHANNEL 0x0020
106 #define RADIOTAP_OFDM_CHANNEL 0x0040
107 
108 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
109 #include <qdf_mc_timer.h>
110 
111 struct qdf_track_timer {
112 	qdf_mc_timer_t track_timer;
113 	qdf_atomic_t alloc_fail_cnt;
114 };
115 
116 static struct qdf_track_timer alloc_track_timer;
117 
118 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
119 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
120 #endif
121 
122 #ifdef NBUF_MEMORY_DEBUG
123 /* SMMU crash indication*/
124 static qdf_atomic_t smmu_crashed;
125 /* Number of nbuf not added to history*/
126 unsigned long g_histroy_add_drop;
127 #endif
128 
129 /* Packet Counter */
130 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
131 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
132 #ifdef QDF_NBUF_GLOBAL_COUNT
133 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
134 static qdf_atomic_t nbuf_count;
135 #endif
136 
137 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
138 static bool is_initial_mem_debug_disabled;
139 #endif
140 
141 /**
142  *  __qdf_nbuf_get_ip_offset() - Get IPV4/V6 header offset
143  * @data: Pointer to network data buffer
144  *
145  * Get the IP header offset in case of 8021Q and 8021AD
146  * tag is present in L2 header.
147  *
148  * Return: IP header offset
149  */
150 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
151 {
152 	uint16_t ether_type;
153 
154 	ether_type = *(uint16_t *)(data +
155 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
156 
157 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
158 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
159 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
160 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
161 
162 	return QDF_NBUF_TRAC_IP_OFFSET;
163 }
164 
165 /**
166  *  __qdf_nbuf_get_ether_type() - Get the ether type
167  * @data: Pointer to network data buffer
168  *
169  * Get the ether type in case of 8021Q and 8021AD tag
170  * is present in L2 header, e.g for the returned ether type
171  * value, if IPV4 data ether type 0x0800, return 0x0008.
172  *
173  * Return ether type.
174  */
175 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
176 {
177 	uint16_t ether_type;
178 
179 	ether_type = *(uint16_t *)(data +
180 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
181 
182 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
183 		ether_type = *(uint16_t *)(data +
184 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
185 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
186 		ether_type = *(uint16_t *)(data +
187 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
188 
189 	return ether_type;
190 }
191 
192 void qdf_nbuf_tx_desc_count_display(void)
193 {
194 	qdf_debug("Current Snapshot of the Driver:");
195 	qdf_debug("Data Packets:");
196 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
197 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
198 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
199 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
202 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
205 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
207 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
211 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
212 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
213 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
214 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
215 	qdf_debug("Mgmt Packets:");
216 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
217 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
218 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
219 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
220 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
227 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
228 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
229 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
230 }
231 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
232 
233 /**
234  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
235  * @packet_type   : packet type either mgmt/data
236  * @current_state : layer at which the packet currently present
237  *
238  * Return: none
239  */
240 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
241 			uint8_t current_state)
242 {
243 	switch (packet_type) {
244 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
245 		nbuf_tx_mgmt[current_state]++;
246 		break;
247 	case QDF_NBUF_TX_PKT_DATA_TRACK:
248 		nbuf_tx_data[current_state]++;
249 		break;
250 	default:
251 		break;
252 	}
253 }
254 
255 void qdf_nbuf_tx_desc_count_clear(void)
256 {
257 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
258 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
259 }
260 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
261 
262 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
263 {
264 	/*
265 	 * Only Mgmt, Data Packets are tracked. WMI messages
266 	 * such as scan commands are not tracked
267 	 */
268 	uint8_t packet_type;
269 
270 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
271 
272 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
273 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
274 		return;
275 	}
276 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
277 	qdf_nbuf_tx_desc_count_update(packet_type,
278 					current_state);
279 }
280 qdf_export_symbol(qdf_nbuf_set_state);
281 
282 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
283 /**
284  * __qdf_nbuf_start_replenish_timer() - Start alloc fail replenish timer
285  *
286  * This function starts the alloc fail replenish timer.
287  *
288  * Return: void
289  */
290 static inline void __qdf_nbuf_start_replenish_timer(void)
291 {
292 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
293 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
294 	    QDF_TIMER_STATE_RUNNING)
295 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
296 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
297 }
298 
299 /**
300  * __qdf_nbuf_stop_replenish_timer() - Stop alloc fail replenish timer
301  *
302  * This function stops the alloc fail replenish timer.
303  *
304  * Return: void
305  */
306 static inline void __qdf_nbuf_stop_replenish_timer(void)
307 {
308 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
309 		return;
310 
311 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
312 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
313 	    QDF_TIMER_STATE_RUNNING)
314 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
315 }
316 
317 /**
318  * qdf_replenish_expire_handler() - Replenish expire handler
319  * @arg: unused callback argument
320  *
321  * This function triggers when the alloc fail replenish timer expires.
322  *
323  * Return: void
324  */
325 static void qdf_replenish_expire_handler(void *arg)
326 {
327 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
328 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
329 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
330 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
331 
332 		/* Error handling here */
333 	}
334 }
335 
336 void __qdf_nbuf_init_replenish_timer(void)
337 {
338 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
339 			  qdf_replenish_expire_handler, NULL);
340 }
341 
342 void __qdf_nbuf_deinit_replenish_timer(void)
343 {
344 	__qdf_nbuf_stop_replenish_timer();
345 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
346 }
347 
348 void qdf_nbuf_stop_replenish_timer(void)
349 {
350 	__qdf_nbuf_stop_replenish_timer();
351 }
352 #else
353 
354 static inline void __qdf_nbuf_start_replenish_timer(void) {}
355 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
356 void qdf_nbuf_stop_replenish_timer(void)
357 {
358 }
359 #endif
360 
361 /* globals do not need to be initialized to NULL/0 */
362 qdf_nbuf_trace_update_t qdf_trace_update_cb;
363 qdf_nbuf_free_t nbuf_free_cb;
364 
365 #ifdef QDF_NBUF_GLOBAL_COUNT
366 
367 int __qdf_nbuf_count_get(void)
368 {
369 	return qdf_atomic_read(&nbuf_count);
370 }
371 qdf_export_symbol(__qdf_nbuf_count_get);
372 
373 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
374 {
375 	int num_nbuf = 1;
376 	qdf_nbuf_t ext_list;
377 
378 	if (qdf_likely(is_initial_mem_debug_disabled))
379 		return;
380 
381 	ext_list = qdf_nbuf_get_ext_list(nbuf);
382 
383 	/* Take care to account for frag_list */
384 	while (ext_list) {
385 		++num_nbuf;
386 		ext_list = qdf_nbuf_queue_next(ext_list);
387 	}
388 
389 	qdf_atomic_add(num_nbuf, &nbuf_count);
390 }
391 qdf_export_symbol(__qdf_nbuf_count_inc);
392 
393 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
394 {
395 	qdf_nbuf_t ext_list;
396 	int num_nbuf;
397 
398 	if (qdf_likely(is_initial_mem_debug_disabled))
399 		return;
400 
401 	if (qdf_nbuf_get_users(nbuf) > 1)
402 		return;
403 
404 	num_nbuf = 1;
405 
406 	/* Take care to account for frag_list */
407 	ext_list = qdf_nbuf_get_ext_list(nbuf);
408 	while (ext_list) {
409 		if (qdf_nbuf_get_users(ext_list) == 1)
410 			++num_nbuf;
411 		ext_list = qdf_nbuf_queue_next(ext_list);
412 	}
413 
414 	qdf_atomic_sub(num_nbuf, &nbuf_count);
415 }
416 qdf_export_symbol(__qdf_nbuf_count_dec);
417 #endif
418 
419 #ifdef NBUF_FRAG_MEMORY_DEBUG
420 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
421 {
422 	qdf_nbuf_t ext_list;
423 	uint32_t num_nr_frags;
424 	uint32_t total_num_nr_frags;
425 
426 	if (qdf_likely(is_initial_mem_debug_disabled))
427 		return;
428 
429 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
430 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
431 
432 	total_num_nr_frags = num_nr_frags;
433 
434 	/* Take into account the frags attached to frag_list */
435 	ext_list = qdf_nbuf_get_ext_list(nbuf);
436 	while (ext_list) {
437 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
438 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
439 		total_num_nr_frags += num_nr_frags;
440 		ext_list = qdf_nbuf_queue_next(ext_list);
441 	}
442 
443 	qdf_frag_count_inc(total_num_nr_frags);
444 }
445 
446 qdf_export_symbol(qdf_nbuf_frag_count_inc);
447 
448 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
449 {
450 	qdf_nbuf_t ext_list;
451 	uint32_t num_nr_frags;
452 	uint32_t total_num_nr_frags;
453 
454 	if (qdf_likely(is_initial_mem_debug_disabled))
455 		return;
456 
457 	if (qdf_nbuf_get_users(nbuf) > 1)
458 		return;
459 
460 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
461 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
462 
463 	total_num_nr_frags = num_nr_frags;
464 
465 	/* Take into account the frags attached to frag_list */
466 	ext_list = qdf_nbuf_get_ext_list(nbuf);
467 	while (ext_list) {
468 		if (qdf_nbuf_get_users(ext_list) == 1) {
469 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
470 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
471 			total_num_nr_frags += num_nr_frags;
472 		}
473 		ext_list = qdf_nbuf_queue_next(ext_list);
474 	}
475 
476 	qdf_frag_count_dec(total_num_nr_frags);
477 }
478 
479 qdf_export_symbol(qdf_nbuf_frag_count_dec);
480 
481 #endif
482 
483 static inline void
484 qdf_nbuf_set_defaults(struct sk_buff *skb, int align, int reserve)
485 {
486 	unsigned long offset;
487 
488 	memset(skb->cb, 0x0, sizeof(skb->cb));
489 	skb->dev = NULL;
490 
491 	/*
492 	 * The default is for netbuf fragments to be interpreted
493 	 * as wordstreams rather than bytestreams.
494 	 */
495 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
496 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
497 
498 	/*
499 	 * XXX:how about we reserve first then align
500 	 * Align & make sure that the tail & data are adjusted properly
501 	 */
502 
503 	if (align) {
504 		offset = ((unsigned long)skb->data) % align;
505 		if (offset)
506 			skb_reserve(skb, align - offset);
507 	}
508 
509 	/*
510 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
511 	 * pointer
512 	 */
513 	skb_reserve(skb, reserve);
514 	qdf_nbuf_count_inc(skb);
515 }
516 
517 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
518 	!defined(QCA_WIFI_QCN9000)
519 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
520 				 int align, int prio, const char *func,
521 				 uint32_t line)
522 {
523 	struct sk_buff *skb;
524 	uint32_t lowmem_alloc_tries = 0;
525 
526 	if (align)
527 		size += (align - 1);
528 
529 realloc:
530 	skb = dev_alloc_skb(size);
531 
532 	if (skb)
533 		goto skb_alloc;
534 
535 	skb = pld_nbuf_pre_alloc(size);
536 
537 	if (!skb) {
538 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
539 				size, func, line);
540 		return NULL;
541 	}
542 
543 skb_alloc:
544 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
545 	 * Though we are trying to reserve low memory upfront to prevent this,
546 	 * we sometimes see SKBs allocated from low memory.
547 	 */
548 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
549 		lowmem_alloc_tries++;
550 		if (lowmem_alloc_tries > 100) {
551 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
552 				     size, func, line);
553 			return NULL;
554 		} else {
555 			/* Not freeing to make sure it
556 			 * will not get allocated again
557 			 */
558 			goto realloc;
559 		}
560 	}
561 
562 	qdf_nbuf_set_defaults(skb, align, reserve);
563 
564 	return skb;
565 }
566 #else
567 
568 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
569 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
570 				 int align, int prio, const char *func,
571 				 uint32_t line)
572 {
573 	return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
574 				     line);
575 }
576 
577 #else
578 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
579 				 int align, int prio, const char *func,
580 				 uint32_t line)
581 {
582 	struct sk_buff *skb;
583 	int flags = GFP_KERNEL;
584 
585 	if (align)
586 		size += (align - 1);
587 
588 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
589 		flags = GFP_ATOMIC;
590 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
591 		/*
592 		 * Observed that kcompactd burns out CPU to make order-3 page.
593 		 *__netdev_alloc_skb has 4k page fallback option just in case of
594 		 * failing high order page allocation so we don't need to be
595 		 * hard. Make kcompactd rest in piece.
596 		 */
597 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
598 #endif
599 	}
600 
601 	skb =  alloc_skb(size, flags);
602 
603 	if (skb)
604 		goto skb_alloc;
605 
606 	skb = pld_nbuf_pre_alloc(size);
607 
608 	if (!skb) {
609 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
610 				size, func, line);
611 		__qdf_nbuf_start_replenish_timer();
612 		return NULL;
613 	}
614 
615 	__qdf_nbuf_stop_replenish_timer();
616 
617 skb_alloc:
618 	qdf_nbuf_set_defaults(skb, align, reserve);
619 
620 	return skb;
621 }
622 #endif
623 
624 #endif
625 qdf_export_symbol(__qdf_nbuf_alloc);
626 
627 struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
628 				      int reserve, int align, int prio,
629 				      const char *func, uint32_t line)
630 {
631 	struct sk_buff *skb;
632 	int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
633 	bool atomic = false;
634 
635 	if (align)
636 		size += (align - 1);
637 
638 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
639 		atomic = true;
640 		flags = GFP_ATOMIC;
641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
642 		/*
643 		 * Observed that kcompactd burns out CPU to make order-3 page.
644 		 *__netdev_alloc_skb has 4k page fallback option just in case of
645 		 * failing high order page allocation so we don't need to be
646 		 * hard. Make kcompactd rest in piece.
647 		 */
648 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
649 #endif
650 	}
651 
652 	skb = __netdev_alloc_skb(NULL, size, flags);
653 	if (skb)
654 		goto skb_alloc;
655 
656 	/* 32k page frag alloc failed, try page slab allocation */
657 	if (likely(!atomic))
658 		flags |= __GFP_DIRECT_RECLAIM;
659 
660 	skb = alloc_skb(size, flags);
661 	if (skb)
662 		goto skb_alloc;
663 
664 	skb = pld_nbuf_pre_alloc(size);
665 
666 	if (!skb) {
667 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
668 				size, func, line);
669 		__qdf_nbuf_start_replenish_timer();
670 		return NULL;
671 	}
672 
673 	__qdf_nbuf_stop_replenish_timer();
674 
675 skb_alloc:
676 	qdf_nbuf_set_defaults(skb, align, reserve);
677 
678 	return skb;
679 }
680 
681 qdf_export_symbol(__qdf_nbuf_frag_alloc);
682 
683 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
684 					  const char *func, uint32_t line)
685 {
686 	qdf_nbuf_t nbuf;
687 	unsigned long offset;
688 
689 	if (align)
690 		size += (align - 1);
691 
692 	nbuf = alloc_skb(size, GFP_ATOMIC);
693 	if (!nbuf)
694 		goto ret_nbuf;
695 
696 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
697 
698 	skb_reserve(nbuf, reserve);
699 
700 	if (align) {
701 		offset = ((unsigned long)nbuf->data) % align;
702 		if (offset)
703 			skb_reserve(nbuf, align - offset);
704 	}
705 
706 	qdf_nbuf_count_inc(nbuf);
707 
708 ret_nbuf:
709 	return nbuf;
710 }
711 
712 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
713 
714 void __qdf_nbuf_free(struct sk_buff *skb)
715 {
716 	if (pld_nbuf_pre_alloc_free(skb))
717 		return;
718 
719 	qdf_nbuf_frag_count_dec(skb);
720 
721 	qdf_nbuf_count_dec(skb);
722 	if (nbuf_free_cb)
723 		nbuf_free_cb(skb);
724 	else
725 		dev_kfree_skb_any(skb);
726 }
727 
728 qdf_export_symbol(__qdf_nbuf_free);
729 
730 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
731 {
732 	qdf_nbuf_t skb_new = NULL;
733 
734 	skb_new = skb_clone(skb, GFP_ATOMIC);
735 	if (skb_new) {
736 		qdf_nbuf_frag_count_inc(skb_new);
737 		qdf_nbuf_count_inc(skb_new);
738 	}
739 	return skb_new;
740 }
741 
742 qdf_export_symbol(__qdf_nbuf_clone);
743 
744 struct sk_buff *
745 __qdf_nbuf_page_frag_alloc(qdf_device_t osdev, size_t size, int reserve,
746 			   int align, __qdf_frag_cache_t *pf_cache,
747 			   const char *func, uint32_t line)
748 {
749 	struct sk_buff *skb;
750 	qdf_frag_t frag_data;
751 	size_t orig_size = size;
752 	int flags = GFP_KERNEL;
753 
754 	if (align)
755 		size += (align - 1);
756 
757 	size += NET_SKB_PAD;
758 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
759 	size = SKB_DATA_ALIGN(size);
760 
761 	if (in_interrupt() || irqs_disabled() || in_atomic())
762 		flags = GFP_ATOMIC;
763 
764 	frag_data = page_frag_alloc(pf_cache, size, flags);
765 	if (!frag_data) {
766 		qdf_rl_nofl_err("page frag alloc failed %zuB @ %s:%d",
767 				size, func, line);
768 		return __qdf_nbuf_alloc(osdev, orig_size, reserve, align, 0,
769 					func, line);
770 	}
771 
772 	skb = build_skb(frag_data, size);
773 	if (skb) {
774 		skb_reserve(skb, NET_SKB_PAD);
775 		goto skb_alloc;
776 	}
777 
778 	/* Free the data allocated from pf_cache */
779 	page_frag_free(frag_data);
780 
781 	size = orig_size + align - 1;
782 
783 	skb = pld_nbuf_pre_alloc(size);
784 	if (!skb) {
785 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
786 				size, func, line);
787 		__qdf_nbuf_start_replenish_timer();
788 		return NULL;
789 	}
790 
791 	__qdf_nbuf_stop_replenish_timer();
792 
793 skb_alloc:
794 	qdf_nbuf_set_defaults(skb, align, reserve);
795 
796 	return skb;
797 }
798 
799 qdf_export_symbol(__qdf_nbuf_page_frag_alloc);
800 
801 #ifdef QCA_DP_TX_NBUF_LIST_FREE
802 void
803 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
804 {
805 	dev_kfree_skb_list_fast(nbuf_queue_head);
806 }
807 #else
808 void
809 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
810 {
811 }
812 #endif
813 
814 qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
815 
816 #ifdef NBUF_MEMORY_DEBUG
817 struct qdf_nbuf_event {
818 	qdf_nbuf_t nbuf;
819 	char func[QDF_MEM_FUNC_NAME_SIZE];
820 	uint32_t line;
821 	enum qdf_nbuf_event_type type;
822 	uint64_t timestamp;
823 	qdf_dma_addr_t iova;
824 };
825 
826 #ifndef QDF_NBUF_HISTORY_SIZE
827 #define QDF_NBUF_HISTORY_SIZE 4096
828 #endif
829 static qdf_atomic_t qdf_nbuf_history_index;
830 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
831 
832 void qdf_nbuf_ssr_register_region(void)
833 {
834 	qdf_ssr_driver_dump_register_region("qdf_nbuf_history",
835 					    qdf_nbuf_history,
836 					    sizeof(qdf_nbuf_history));
837 }
838 
839 qdf_export_symbol(qdf_nbuf_ssr_register_region);
840 
841 void qdf_nbuf_ssr_unregister_region(void)
842 {
843 	qdf_ssr_driver_dump_unregister_region("qdf_nbuf_history");
844 }
845 
846 qdf_export_symbol(qdf_nbuf_ssr_unregister_region);
847 
848 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
849 {
850 	int32_t next = qdf_atomic_inc_return(index);
851 
852 	if (next == size)
853 		qdf_atomic_sub(size, index);
854 
855 	return next % size;
856 }
857 
858 void
859 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
860 		     enum qdf_nbuf_event_type type)
861 {
862 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
863 						   QDF_NBUF_HISTORY_SIZE);
864 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
865 
866 	if (qdf_atomic_read(&smmu_crashed)) {
867 		g_histroy_add_drop++;
868 		return;
869 	}
870 
871 	event->nbuf = nbuf;
872 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
873 	event->line = line;
874 	event->type = type;
875 	event->timestamp = qdf_get_log_timestamp();
876 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
877 	    type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
878 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
879 	else
880 		event->iova = 0;
881 }
882 
883 void qdf_set_smmu_fault_state(bool smmu_fault_state)
884 {
885 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
886 	if (!smmu_fault_state)
887 		g_histroy_add_drop = 0;
888 }
889 qdf_export_symbol(qdf_set_smmu_fault_state);
890 #endif /* NBUF_MEMORY_DEBUG */
891 
892 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
893 #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
894 qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
895 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
896 
897 static void qdf_nbuf_smmu_map_tracking_init(void)
898 {
899 	qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
900 }
901 
902 static void qdf_nbuf_smmu_map_tracking_deinit(void)
903 {
904 	qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
905 }
906 
907 static QDF_STATUS
908 qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
909 {
910 	if (is_initial_mem_debug_disabled)
911 		return QDF_STATUS_SUCCESS;
912 
913 	return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
914 }
915 
916 static void
917 qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
918 {
919 	if (is_initial_mem_debug_disabled)
920 		return;
921 
922 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
923 	qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
924 }
925 
926 void qdf_nbuf_map_check_for_smmu_leaks(void)
927 {
928 	qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
929 }
930 
931 #ifdef IPA_OFFLOAD
932 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
933 				   uint8_t hdl,
934 				   uint8_t num_buffers,
935 				   qdf_mem_info_t *info,
936 				   const char *func,
937 				   uint32_t line)
938 {
939 	QDF_STATUS status;
940 
941 	status = qdf_nbuf_track_smmu_map(nbuf, func, line);
942 	if (QDF_IS_STATUS_ERROR(status))
943 		return status;
944 
945 	status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
946 
947 	if (QDF_IS_STATUS_ERROR(status)) {
948 		qdf_nbuf_untrack_smmu_map(nbuf, func, line);
949 	} else {
950 		if (!is_initial_mem_debug_disabled)
951 			qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
952 		qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
953 						       info->pa, func, line);
954 	}
955 
956 	return status;
957 }
958 
959 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
960 
961 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
962 				     uint8_t hdl,
963 				     uint8_t num_buffers,
964 				     qdf_mem_info_t *info,
965 				     const char *func,
966 				     uint32_t line)
967 {
968 	QDF_STATUS status;
969 
970 	qdf_nbuf_untrack_smmu_map(nbuf, func, line);
971 	status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
972 	qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
973 						 info->pa, func, line);
974 	return status;
975 }
976 
977 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
978 #endif /* IPA_OFFLOAD */
979 
980 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
981 						  const char *func,
982 						  uint32_t line)
983 {
984 	char map_func[QDF_TRACKER_FUNC_SIZE];
985 	uint32_t map_line;
986 
987 	if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
988 				&map_func, &map_line))
989 		return;
990 
991 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
992 			   func, line, map_func, map_line);
993 }
994 
995 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
996 {
997 	p_node->smmu_unmap_line_num = 0;
998 	p_node->is_nbuf_smmu_mapped = false;
999 	p_node->smmu_map_line_num = 0;
1000 	p_node->smmu_map_func_name[0] = '\0';
1001 	p_node->smmu_unmap_func_name[0] = '\0';
1002 	p_node->smmu_unmap_iova_addr = 0;
1003 	p_node->smmu_unmap_pa_addr = 0;
1004 	p_node->smmu_map_iova_addr = 0;
1005 	p_node->smmu_map_pa_addr = 0;
1006 }
1007 #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
1008 #ifdef NBUF_MEMORY_DEBUG
1009 static void qdf_nbuf_smmu_map_tracking_init(void)
1010 {
1011 }
1012 
1013 static void qdf_nbuf_smmu_map_tracking_deinit(void)
1014 {
1015 }
1016 
1017 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
1018 						  const char *func,
1019 						  uint32_t line)
1020 {
1021 }
1022 
1023 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
1024 {
1025 }
1026 #endif /* NBUF_MEMORY_DEBUG */
1027 
1028 #ifdef IPA_OFFLOAD
1029 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
1030 				   uint8_t hdl,
1031 				   uint8_t num_buffers,
1032 				   qdf_mem_info_t *info,
1033 				   const char *func,
1034 				   uint32_t line)
1035 {
1036 	return  __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
1037 }
1038 
1039 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
1040 
1041 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
1042 				     uint8_t hdl,
1043 				     uint8_t num_buffers,
1044 				     qdf_mem_info_t *info,
1045 				     const char *func,
1046 				     uint32_t line)
1047 {
1048 	return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
1049 }
1050 
1051 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
1052 #endif /* IPA_OFFLOAD */
1053 #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
1054 
1055 #ifdef NBUF_MAP_UNMAP_DEBUG
1056 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
1057 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
1058 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
1059 
1060 static void qdf_nbuf_map_tracking_init(void)
1061 {
1062 	qdf_tracker_init(&qdf_nbuf_map_tracker);
1063 }
1064 
1065 static void qdf_nbuf_map_tracking_deinit(void)
1066 {
1067 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
1068 }
1069 
1070 static QDF_STATUS
1071 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1072 {
1073 	if (is_initial_mem_debug_disabled)
1074 		return QDF_STATUS_SUCCESS;
1075 
1076 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
1077 }
1078 
1079 static void
1080 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1081 {
1082 	if (is_initial_mem_debug_disabled)
1083 		return;
1084 
1085 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
1086 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
1087 }
1088 
1089 void qdf_nbuf_map_check_for_leaks(void)
1090 {
1091 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
1092 }
1093 
1094 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
1095 			      qdf_nbuf_t buf,
1096 			      qdf_dma_dir_t dir,
1097 			      const char *func,
1098 			      uint32_t line)
1099 {
1100 	QDF_STATUS status;
1101 
1102 	status = qdf_nbuf_track_map(buf, func, line);
1103 	if (QDF_IS_STATUS_ERROR(status))
1104 		return status;
1105 
1106 	status = __qdf_nbuf_map(osdev, buf, dir);
1107 	if (QDF_IS_STATUS_ERROR(status)) {
1108 		qdf_nbuf_untrack_map(buf, func, line);
1109 	} else {
1110 		if (!is_initial_mem_debug_disabled)
1111 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1112 		qdf_net_buf_debug_update_map_node(buf, func, line);
1113 	}
1114 
1115 	return status;
1116 }
1117 
1118 qdf_export_symbol(qdf_nbuf_map_debug);
1119 
1120 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
1121 			  qdf_nbuf_t buf,
1122 			  qdf_dma_dir_t dir,
1123 			  const char *func,
1124 			  uint32_t line)
1125 {
1126 	qdf_nbuf_untrack_map(buf, func, line);
1127 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1128 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1129 }
1130 
1131 qdf_export_symbol(qdf_nbuf_unmap_debug);
1132 
1133 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
1134 				     qdf_nbuf_t buf,
1135 				     qdf_dma_dir_t dir,
1136 				     const char *func,
1137 				     uint32_t line)
1138 {
1139 	QDF_STATUS status;
1140 
1141 	status = qdf_nbuf_track_map(buf, func, line);
1142 	if (QDF_IS_STATUS_ERROR(status))
1143 		return status;
1144 
1145 	status = __qdf_nbuf_map_single(osdev, buf, dir);
1146 	if (QDF_IS_STATUS_ERROR(status)) {
1147 		qdf_nbuf_untrack_map(buf, func, line);
1148 	} else {
1149 		if (!is_initial_mem_debug_disabled)
1150 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1151 		qdf_net_buf_debug_update_map_node(buf, func, line);
1152 	}
1153 
1154 	return status;
1155 }
1156 
1157 qdf_export_symbol(qdf_nbuf_map_single_debug);
1158 
1159 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
1160 				 qdf_nbuf_t buf,
1161 				 qdf_dma_dir_t dir,
1162 				 const char *func,
1163 				 uint32_t line)
1164 {
1165 	qdf_nbuf_untrack_map(buf, func, line);
1166 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1167 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1168 }
1169 
1170 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
1171 
1172 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
1173 				     qdf_nbuf_t buf,
1174 				     qdf_dma_dir_t dir,
1175 				     int nbytes,
1176 				     const char *func,
1177 				     uint32_t line)
1178 {
1179 	QDF_STATUS status;
1180 
1181 	status = qdf_nbuf_track_map(buf, func, line);
1182 	if (QDF_IS_STATUS_ERROR(status))
1183 		return status;
1184 
1185 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
1186 	if (QDF_IS_STATUS_ERROR(status)) {
1187 		qdf_nbuf_untrack_map(buf, func, line);
1188 	} else {
1189 		if (!is_initial_mem_debug_disabled)
1190 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1191 		qdf_net_buf_debug_update_map_node(buf, func, line);
1192 	}
1193 
1194 	return status;
1195 }
1196 
1197 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
1198 
1199 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
1200 				 qdf_nbuf_t buf,
1201 				 qdf_dma_dir_t dir,
1202 				 int nbytes,
1203 				 const char *func,
1204 				 uint32_t line)
1205 {
1206 	qdf_nbuf_untrack_map(buf, func, line);
1207 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
1208 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1209 }
1210 
1211 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1212 
1213 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1214 					    qdf_nbuf_t buf,
1215 					    qdf_dma_dir_t dir,
1216 					    int nbytes,
1217 					    const char *func,
1218 					    uint32_t line)
1219 {
1220 	QDF_STATUS status;
1221 
1222 	status = qdf_nbuf_track_map(buf, func, line);
1223 	if (QDF_IS_STATUS_ERROR(status))
1224 		return status;
1225 
1226 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1227 	if (QDF_IS_STATUS_ERROR(status)) {
1228 		qdf_nbuf_untrack_map(buf, func, line);
1229 	} else {
1230 		if (!is_initial_mem_debug_disabled)
1231 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1232 		qdf_net_buf_debug_update_map_node(buf, func, line);
1233 	}
1234 
1235 	return status;
1236 }
1237 
1238 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1239 
1240 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1241 					qdf_nbuf_t buf,
1242 					qdf_dma_dir_t dir,
1243 					int nbytes,
1244 					const char *func,
1245 					uint32_t line)
1246 {
1247 	qdf_nbuf_untrack_map(buf, func, line);
1248 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1249 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1250 }
1251 
1252 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1253 
1254 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1255 					      qdf_nbuf_t buf,
1256 					      qdf_dma_addr_t phy_addr,
1257 					      qdf_dma_dir_t dir, int nbytes,
1258 					      const char *func, uint32_t line)
1259 {
1260 	qdf_nbuf_untrack_map(buf, func, line);
1261 	__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), dir, false);
1262 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1263 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1264 }
1265 
1266 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1267 
1268 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1269 					     const char *func,
1270 					     uint32_t line)
1271 {
1272 	char map_func[QDF_TRACKER_FUNC_SIZE];
1273 	uint32_t map_line;
1274 
1275 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1276 				&map_func, &map_line))
1277 		return;
1278 
1279 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1280 			   func, line, map_func, map_line);
1281 }
1282 #else
1283 static inline void qdf_nbuf_map_tracking_init(void)
1284 {
1285 }
1286 
1287 static inline void qdf_nbuf_map_tracking_deinit(void)
1288 {
1289 }
1290 
1291 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1292 						    const char *func,
1293 						    uint32_t line)
1294 {
1295 }
1296 #endif /* NBUF_MAP_UNMAP_DEBUG */
1297 
1298 #ifdef QDF_OS_DEBUG
1299 QDF_STATUS
1300 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1301 {
1302 	struct skb_shared_info *sh = skb_shinfo(skb);
1303 
1304 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1305 			|| (dir == QDF_DMA_FROM_DEVICE));
1306 
1307 	/*
1308 	 * Assume there's only a single fragment.
1309 	 * To support multiple fragments, it would be necessary to change
1310 	 * qdf_nbuf_t to be a separate object that stores meta-info
1311 	 * (including the bus address for each fragment) and a pointer
1312 	 * to the underlying sk_buff.
1313 	 */
1314 	qdf_assert(sh->nr_frags == 0);
1315 
1316 	return __qdf_nbuf_map_single(osdev, skb, dir);
1317 }
1318 qdf_export_symbol(__qdf_nbuf_map);
1319 
1320 #else
1321 QDF_STATUS
1322 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1323 {
1324 	return __qdf_nbuf_map_single(osdev, skb, dir);
1325 }
1326 qdf_export_symbol(__qdf_nbuf_map);
1327 #endif
1328 
1329 void
1330 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1331 			qdf_dma_dir_t dir)
1332 {
1333 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1334 		   || (dir == QDF_DMA_FROM_DEVICE));
1335 
1336 	/*
1337 	 * Assume there's a single fragment.
1338 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1339 	 */
1340 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1341 }
1342 qdf_export_symbol(__qdf_nbuf_unmap);
1343 
1344 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1345 QDF_STATUS
1346 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1347 {
1348 	qdf_dma_addr_t paddr;
1349 
1350 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1351 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1352 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1353 	return QDF_STATUS_SUCCESS;
1354 }
1355 qdf_export_symbol(__qdf_nbuf_map_single);
1356 #else
1357 QDF_STATUS
1358 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1359 {
1360 	qdf_dma_addr_t paddr;
1361 
1362 	/* assume that the OS only provides a single fragment */
1363 	QDF_NBUF_CB_PADDR(buf) = paddr =
1364 		dma_map_single(osdev->dev, buf->data,
1365 				skb_end_pointer(buf) - buf->data,
1366 				__qdf_dma_dir_to_os(dir));
1367 	__qdf_record_nbuf_nbytes(
1368 		__qdf_nbuf_get_end_offset(buf), dir, true);
1369 	return dma_mapping_error(osdev->dev, paddr)
1370 		? QDF_STATUS_E_FAILURE
1371 		: QDF_STATUS_SUCCESS;
1372 }
1373 qdf_export_symbol(__qdf_nbuf_map_single);
1374 #endif
1375 
1376 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1377 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1378 				qdf_dma_dir_t dir)
1379 {
1380 }
1381 #else
1382 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1383 					qdf_dma_dir_t dir)
1384 {
1385 	if (QDF_NBUF_CB_PADDR(buf)) {
1386 		__qdf_record_nbuf_nbytes(
1387 			__qdf_nbuf_get_end_offset(buf), dir, false);
1388 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1389 			skb_end_pointer(buf) - buf->data,
1390 			__qdf_dma_dir_to_os(dir));
1391 	}
1392 }
1393 #endif
1394 qdf_export_symbol(__qdf_nbuf_unmap_single);
1395 
1396 QDF_STATUS
1397 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1398 {
1399 	switch (cksum->l4_result) {
1400 	case QDF_NBUF_RX_CKSUM_NONE:
1401 		skb->ip_summed = CHECKSUM_NONE;
1402 		break;
1403 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1404 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1405 		skb->csum_level = cksum->csum_level;
1406 		break;
1407 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1408 		skb->ip_summed = CHECKSUM_PARTIAL;
1409 		skb->csum = cksum->val;
1410 		break;
1411 	default:
1412 		pr_err("Unknown checksum type\n");
1413 		qdf_assert(0);
1414 		return QDF_STATUS_E_NOSUPPORT;
1415 	}
1416 	return QDF_STATUS_SUCCESS;
1417 }
1418 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1419 
1420 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1421 {
1422 	switch (skb->ip_summed) {
1423 	case CHECKSUM_NONE:
1424 		return QDF_NBUF_TX_CKSUM_NONE;
1425 	case CHECKSUM_PARTIAL:
1426 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1427 	case CHECKSUM_COMPLETE:
1428 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1429 	default:
1430 		return QDF_NBUF_TX_CKSUM_NONE;
1431 	}
1432 }
1433 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1434 
1435 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1436 {
1437 	return skb->priority;
1438 }
1439 qdf_export_symbol(__qdf_nbuf_get_tid);
1440 
1441 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1442 {
1443 	skb->priority = tid;
1444 }
1445 qdf_export_symbol(__qdf_nbuf_set_tid);
1446 
1447 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1448 {
1449 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1450 }
1451 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1452 
1453 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1454 {
1455 	qdf_trace_update_cb = cb_func_ptr;
1456 }
1457 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1458 
1459 enum qdf_proto_subtype
1460 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1461 {
1462 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1463 
1464 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1465 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1466 					QDF_DHCP_OPTION53_LENGTH)) {
1467 
1468 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1469 		case QDF_DHCP_DISCOVER:
1470 			subtype = QDF_PROTO_DHCP_DISCOVER;
1471 			break;
1472 		case QDF_DHCP_REQUEST:
1473 			subtype = QDF_PROTO_DHCP_REQUEST;
1474 			break;
1475 		case QDF_DHCP_OFFER:
1476 			subtype = QDF_PROTO_DHCP_OFFER;
1477 			break;
1478 		case QDF_DHCP_ACK:
1479 			subtype = QDF_PROTO_DHCP_ACK;
1480 			break;
1481 		case QDF_DHCP_NAK:
1482 			subtype = QDF_PROTO_DHCP_NACK;
1483 			break;
1484 		case QDF_DHCP_RELEASE:
1485 			subtype = QDF_PROTO_DHCP_RELEASE;
1486 			break;
1487 		case QDF_DHCP_INFORM:
1488 			subtype = QDF_PROTO_DHCP_INFORM;
1489 			break;
1490 		case QDF_DHCP_DECLINE:
1491 			subtype = QDF_PROTO_DHCP_DECLINE;
1492 			break;
1493 		default:
1494 			break;
1495 		}
1496 	}
1497 
1498 	return subtype;
1499 }
1500 
1501 #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
1502 #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
1503 #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
1504 
1505 /**
1506  * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
1507  * @data: Pointer to EAPOL packet data buffer
1508  *
1509  * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
1510  * The ralationship between the ack bit and EAPOL type is as follows:
1511  *
1512  *  EAPOL type  |   M1    M2   M3  M4
1513  * --------------------------------------
1514  *     Ack      |   1     0    1   0
1515  * --------------------------------------
1516  *
1517  * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
1518  * M2/M4: by keyDataLength or Nonce value being 0 for M4.
1519  * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
1520  *
1521  * Return: subtype of the EAPOL packet.
1522  */
1523 static inline enum qdf_proto_subtype
1524 __qdf_nbuf_data_get_eapol_key(uint8_t *data)
1525 {
1526 	uint16_t key_info, key_data_length;
1527 	enum qdf_proto_subtype subtype;
1528 	uint64_t *key_nonce;
1529 
1530 	key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
1531 			(data + EAPOL_KEY_INFO_OFFSET)));
1532 
1533 	key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
1534 				(data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
1535 	key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
1536 
1537 	if (key_info & EAPOL_WPA_KEY_INFO_ACK)
1538 		if (key_info &
1539 		    (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
1540 			subtype = QDF_PROTO_EAPOL_M3;
1541 		else
1542 			subtype = QDF_PROTO_EAPOL_M1;
1543 	else
1544 		if (key_data_length == 0 ||
1545 		    !((*key_nonce) || (*(key_nonce + 1)) ||
1546 		      (*(key_nonce + 2)) || (*(key_nonce + 3))))
1547 			subtype = QDF_PROTO_EAPOL_M4;
1548 		else
1549 			subtype = QDF_PROTO_EAPOL_M2;
1550 
1551 	return subtype;
1552 }
1553 
1554 /**
1555  * __qdf_nbuf_data_get_exp_msg_type() - Get EAP expanded msg type
1556  * @data: Pointer to EAPOL packet data buffer
1557  * @code: EAP code
1558  *
1559  * Return: subtype of the EAPOL packet.
1560  */
1561 static inline enum qdf_proto_subtype
1562 __qdf_nbuf_data_get_exp_msg_type(uint8_t *data, uint8_t code)
1563 {
1564 	uint8_t msg_type;
1565 	uint8_t opcode = *(data + EAP_EXP_MSG_OPCODE_OFFSET);
1566 
1567 	switch (opcode) {
1568 	case WSC_START:
1569 		return QDF_PROTO_EAP_WSC_START;
1570 	case WSC_ACK:
1571 		return QDF_PROTO_EAP_WSC_ACK;
1572 	case WSC_NACK:
1573 		return QDF_PROTO_EAP_WSC_NACK;
1574 	case WSC_MSG:
1575 		msg_type = *(data + EAP_EXP_MSG_TYPE_OFFSET);
1576 		switch (msg_type) {
1577 		case EAP_EXP_TYPE_M1:
1578 			return QDF_PROTO_EAP_M1;
1579 		case EAP_EXP_TYPE_M2:
1580 			return QDF_PROTO_EAP_M2;
1581 		case EAP_EXP_TYPE_M3:
1582 			return QDF_PROTO_EAP_M3;
1583 		case EAP_EXP_TYPE_M4:
1584 			return QDF_PROTO_EAP_M4;
1585 		case EAP_EXP_TYPE_M5:
1586 			return QDF_PROTO_EAP_M5;
1587 		case EAP_EXP_TYPE_M6:
1588 			return QDF_PROTO_EAP_M6;
1589 		case EAP_EXP_TYPE_M7:
1590 			return QDF_PROTO_EAP_M7;
1591 		case EAP_EXP_TYPE_M8:
1592 			return QDF_PROTO_EAP_M8;
1593 		default:
1594 			break;
1595 		}
1596 		break;
1597 	case WSC_DONE:
1598 		return QDF_PROTO_EAP_WSC_DONE;
1599 	case WSC_FRAG_ACK:
1600 		return QDF_PROTO_EAP_WSC_FRAG_ACK;
1601 	default:
1602 		break;
1603 	}
1604 	switch (code) {
1605 	case QDF_EAP_REQUEST:
1606 		return QDF_PROTO_EAP_REQUEST;
1607 	case QDF_EAP_RESPONSE:
1608 		return QDF_PROTO_EAP_RESPONSE;
1609 	default:
1610 		return QDF_PROTO_INVALID;
1611 	}
1612 }
1613 
1614 /**
1615  * __qdf_nbuf_data_get_eap_type() - Get EAP type
1616  * @data: Pointer to EAPOL packet data buffer
1617  * @code: EAP code
1618  *
1619  * Return: subtype of the EAPOL packet.
1620  */
1621 static inline enum qdf_proto_subtype
1622 __qdf_nbuf_data_get_eap_type(uint8_t *data, uint8_t code)
1623 {
1624 	uint8_t type = *(data + EAP_TYPE_OFFSET);
1625 
1626 	switch (type) {
1627 	case EAP_PACKET_TYPE_EXP:
1628 		return __qdf_nbuf_data_get_exp_msg_type(data, code);
1629 	case EAP_PACKET_TYPE_ID:
1630 		switch (code) {
1631 		case QDF_EAP_REQUEST:
1632 			return QDF_PROTO_EAP_REQ_ID;
1633 		case QDF_EAP_RESPONSE:
1634 			return QDF_PROTO_EAP_RSP_ID;
1635 		default:
1636 			return QDF_PROTO_INVALID;
1637 		}
1638 	default:
1639 		switch (code) {
1640 		case QDF_EAP_REQUEST:
1641 			return QDF_PROTO_EAP_REQUEST;
1642 		case QDF_EAP_RESPONSE:
1643 			return QDF_PROTO_EAP_RESPONSE;
1644 		default:
1645 			return QDF_PROTO_INVALID;
1646 		}
1647 	}
1648 }
1649 
1650 /**
1651  * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
1652  * @data: Pointer to EAPOL packet data buffer
1653  *
1654  * Return: subtype of the EAPOL packet.
1655  */
1656 static inline enum qdf_proto_subtype
1657 __qdf_nbuf_data_get_eap_code(uint8_t *data)
1658 {
1659 	uint8_t code = *(data + EAP_CODE_OFFSET);
1660 
1661 	switch (code) {
1662 	case QDF_EAP_REQUEST:
1663 	case QDF_EAP_RESPONSE:
1664 		return __qdf_nbuf_data_get_eap_type(data, code);
1665 	case QDF_EAP_SUCCESS:
1666 		return QDF_PROTO_EAP_SUCCESS;
1667 	case QDF_EAP_FAILURE:
1668 		return QDF_PROTO_EAP_FAILURE;
1669 	case QDF_EAP_INITIATE:
1670 		return QDF_PROTO_EAP_INITIATE;
1671 	case QDF_EAP_FINISH:
1672 		return QDF_PROTO_EAP_FINISH;
1673 	default:
1674 		return QDF_PROTO_INVALID;
1675 	}
1676 }
1677 
1678 enum qdf_proto_subtype
1679 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1680 {
1681 	uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
1682 
1683 	switch (pkt_type) {
1684 	case EAPOL_PACKET_TYPE_EAP:
1685 		return __qdf_nbuf_data_get_eap_code(data);
1686 	case EAPOL_PACKET_TYPE_START:
1687 		return QDF_PROTO_EAPOL_START;
1688 	case EAPOL_PACKET_TYPE_LOGOFF:
1689 		return QDF_PROTO_EAPOL_LOGOFF;
1690 	case EAPOL_PACKET_TYPE_KEY:
1691 		return __qdf_nbuf_data_get_eapol_key(data);
1692 	case EAPOL_PACKET_TYPE_ASF:
1693 		return QDF_PROTO_EAPOL_ASF;
1694 	default:
1695 		return QDF_PROTO_INVALID;
1696 	}
1697 }
1698 
1699 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1700 
1701 enum qdf_proto_subtype
1702 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1703 {
1704 	uint16_t subtype;
1705 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1706 
1707 	subtype = (uint16_t)(*(uint16_t *)
1708 			(data + ARP_SUB_TYPE_OFFSET));
1709 
1710 	switch (QDF_SWAP_U16(subtype)) {
1711 	case ARP_REQUEST:
1712 		proto_subtype = QDF_PROTO_ARP_REQ;
1713 		break;
1714 	case ARP_RESPONSE:
1715 		proto_subtype = QDF_PROTO_ARP_RES;
1716 		break;
1717 	default:
1718 		break;
1719 	}
1720 
1721 	return proto_subtype;
1722 }
1723 
1724 enum qdf_proto_subtype
1725 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1726 {
1727 	uint8_t subtype;
1728 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1729 
1730 	subtype = (uint8_t)(*(uint8_t *)
1731 			(data + ICMP_SUBTYPE_OFFSET));
1732 
1733 	switch (subtype) {
1734 	case ICMP_REQUEST:
1735 		proto_subtype = QDF_PROTO_ICMP_REQ;
1736 		break;
1737 	case ICMP_RESPONSE:
1738 		proto_subtype = QDF_PROTO_ICMP_RES;
1739 		break;
1740 	default:
1741 		break;
1742 	}
1743 
1744 	return proto_subtype;
1745 }
1746 
1747 enum qdf_proto_subtype
1748 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1749 {
1750 	uint8_t subtype;
1751 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1752 
1753 	subtype = (uint8_t)(*(uint8_t *)
1754 			(data + ICMPV6_SUBTYPE_OFFSET));
1755 
1756 	switch (subtype) {
1757 	case ICMPV6_REQUEST:
1758 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1759 		break;
1760 	case ICMPV6_RESPONSE:
1761 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1762 		break;
1763 	case ICMPV6_RS:
1764 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1765 		break;
1766 	case ICMPV6_RA:
1767 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1768 		break;
1769 	case ICMPV6_NS:
1770 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1771 		break;
1772 	case ICMPV6_NA:
1773 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1774 		break;
1775 	default:
1776 		break;
1777 	}
1778 
1779 	return proto_subtype;
1780 }
1781 
1782 bool
1783 __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1784 {
1785 	if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1786 		return true;
1787 
1788 	return false;
1789 }
1790 
1791 bool
1792 __qdf_nbuf_is_ipv4_fragment(struct sk_buff *skb)
1793 {
1794 	if (ntohs(ip_hdr(skb)->frag_off) & IP_MF)
1795 		return true;
1796 
1797 	return false;
1798 }
1799 
1800 void
1801 __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1802 {
1803 	*(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1804 }
1805 
1806 uint8_t
1807 __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1808 {
1809 	uint8_t tos;
1810 
1811 	tos = (uint8_t)(*(uint8_t *)(data +
1812 			QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1813 	return tos;
1814 }
1815 
1816 uint8_t
1817 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1818 {
1819 	uint8_t proto_type;
1820 
1821 	proto_type = (uint8_t)(*(uint8_t *)(data +
1822 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1823 	return proto_type;
1824 }
1825 
1826 uint8_t
1827 __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1828 {
1829 	struct ipv6hdr *hdr;
1830 
1831 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1832 	return ip6_tclass(ip6_flowinfo(hdr));
1833 }
1834 
1835 void
1836 __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1837 {
1838 	struct ipv6hdr *hdr;
1839 
1840 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1841 	ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1842 }
1843 
1844 uint8_t
1845 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1846 {
1847 	uint8_t proto_type;
1848 
1849 	proto_type = (uint8_t)(*(uint8_t *)(data +
1850 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1851 	return proto_type;
1852 }
1853 
1854 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1855 {
1856 	uint16_t ether_type;
1857 
1858 	ether_type = (uint16_t)(*(uint16_t *)(data +
1859 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1860 
1861 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1862 		return true;
1863 	else
1864 		return false;
1865 }
1866 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1867 
1868 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1869 {
1870 	uint16_t sport;
1871 	uint16_t dport;
1872 	uint8_t ipv4_offset;
1873 	uint8_t ipv4_hdr_len;
1874 	struct iphdr *iphdr;
1875 
1876 	if (__qdf_nbuf_get_ether_type(data) !=
1877 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1878 		return false;
1879 
1880 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1881 	iphdr = (struct iphdr *)(data + ipv4_offset);
1882 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1883 
1884 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1885 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1886 			      sizeof(uint16_t));
1887 
1888 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1889 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1890 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1891 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1892 		return true;
1893 	else
1894 		return false;
1895 }
1896 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1897 
1898 /**
1899  * qdf_is_eapol_type() - check if packet is EAPOL
1900  * @type: Packet type
1901  *
1902  * This api is to check if frame is EAPOL packet type.
1903  *
1904  * Return: true if it is EAPOL frame
1905  *         false otherwise.
1906  */
1907 #ifdef BIG_ENDIAN_HOST
1908 static inline bool qdf_is_eapol_type(uint16_t type)
1909 {
1910 	return (type == QDF_NBUF_TRAC_EAPOL_ETH_TYPE);
1911 }
1912 #else
1913 static inline bool qdf_is_eapol_type(uint16_t type)
1914 {
1915 	return (type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE));
1916 }
1917 #endif
1918 
1919 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1920 {
1921 	uint16_t ether_type;
1922 
1923 	ether_type = __qdf_nbuf_get_ether_type(data);
1924 
1925 	return qdf_is_eapol_type(ether_type);
1926 }
1927 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1928 
1929 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1930 {
1931 	uint16_t ether_type;
1932 
1933 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1934 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1935 
1936 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1937 		return true;
1938 	else
1939 		return false;
1940 }
1941 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1942 
1943 /**
1944  * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
1945  * @data: Pointer to network data buffer
1946  *
1947  * This api is for vlan header included ipv6 packet.
1948  *
1949  * Return: true if packet is vlan header included IPV6
1950  *	   false otherwise.
1951  */
1952 static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
1953 {
1954 	uint16_t ether_type;
1955 
1956 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1957 
1958 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1959 		ether_type = *(uint16_t *)(data +
1960 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1961 
1962 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1963 			return true;
1964 	}
1965 	return false;
1966 }
1967 
1968 /**
1969  * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
1970  * @data: Pointer to network data buffer
1971  *
1972  * This api is for vlan header included ipv4 packet.
1973  *
1974  * Return: true if packet is vlan header included IPV4
1975  *	   false otherwise.
1976  */
1977 static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
1978 {
1979 	uint16_t ether_type;
1980 
1981 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1982 
1983 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1984 		ether_type = *(uint16_t *)(data +
1985 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1986 
1987 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1988 			return true;
1989 	}
1990 	return false;
1991 }
1992 
1993 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1994 {
1995 	uint8_t pkt_type;
1996 
1997 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1998 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1999 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2000 		goto is_igmp;
2001 	}
2002 
2003 	if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
2004 		pkt_type = (uint8_t)(*(uint8_t *)(
2005 				data +
2006 				QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
2007 		goto is_igmp;
2008 	}
2009 
2010 	return false;
2011 is_igmp:
2012 	if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
2013 		return true;
2014 
2015 	return false;
2016 }
2017 
2018 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
2019 
2020 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
2021 {
2022 	uint8_t pkt_type;
2023 	uint8_t next_hdr;
2024 
2025 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2026 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2027 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2028 		next_hdr = (uint8_t)(*(uint8_t *)(
2029 				data +
2030 				QDF_NBUF_TRAC_IPV6_OFFSET +
2031 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2032 		goto is_mld;
2033 	}
2034 
2035 	if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
2036 		pkt_type = (uint8_t)(*(uint8_t *)(
2037 				data +
2038 				QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
2039 		next_hdr = (uint8_t)(*(uint8_t *)(
2040 				data +
2041 				QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
2042 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2043 		goto is_mld;
2044 	}
2045 
2046 	return false;
2047 is_mld:
2048 	if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2049 		return true;
2050 	if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
2051 	    (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
2052 		return true;
2053 
2054 	return false;
2055 }
2056 
2057 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
2058 
2059 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
2060 {
2061 	qdf_ether_header_t *eh = NULL;
2062 	uint16_t ether_type;
2063 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2064 
2065 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2066 	ether_type = eh->ether_type;
2067 
2068 	if (ether_type == htons(ETH_P_8021Q)) {
2069 		struct vlan_ethhdr *veth =
2070 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2071 		ether_type = veth->h_vlan_encapsulated_proto;
2072 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2073 	}
2074 
2075 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
2076 		struct iphdr *iph = NULL;
2077 		struct igmphdr *ih = NULL;
2078 
2079 		iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2080 		ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
2081 		switch (ih->type) {
2082 		case IGMP_HOST_LEAVE_MESSAGE:
2083 			return true;
2084 		case IGMPV3_HOST_MEMBERSHIP_REPORT:
2085 		{
2086 			struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
2087 			struct igmpv3_grec *grec = NULL;
2088 			int num = 0;
2089 			int i = 0;
2090 			int len = 0;
2091 			int type = 0;
2092 
2093 			num = ntohs(ihv3->ngrec);
2094 			for (i = 0; i < num; i++) {
2095 				grec = (void *)((uint8_t *)(ihv3->grec) + len);
2096 				type = grec->grec_type;
2097 				if ((type == IGMPV3_MODE_IS_INCLUDE) ||
2098 				    (type == IGMPV3_CHANGE_TO_INCLUDE))
2099 					return true;
2100 
2101 				len += sizeof(struct igmpv3_grec);
2102 				len += ntohs(grec->grec_nsrcs) * 4;
2103 			}
2104 			break;
2105 		}
2106 		default:
2107 			break;
2108 		}
2109 	}
2110 
2111 	return false;
2112 }
2113 
2114 qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
2115 
2116 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
2117 {
2118 	qdf_ether_header_t *eh = NULL;
2119 	uint16_t ether_type;
2120 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2121 
2122 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2123 	ether_type = eh->ether_type;
2124 
2125 	if (ether_type == htons(ETH_P_8021Q)) {
2126 		struct vlan_ethhdr *veth =
2127 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2128 		ether_type = veth->h_vlan_encapsulated_proto;
2129 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2130 	}
2131 
2132 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
2133 		struct ipv6hdr *ip6h = NULL;
2134 		struct icmp6hdr *icmp6h = NULL;
2135 		uint8_t nexthdr;
2136 		uint16_t frag_off = 0;
2137 		int offset;
2138 		qdf_nbuf_t buf_copy = NULL;
2139 
2140 		ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2141 		if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
2142 		    ip6h->payload_len == 0)
2143 			return false;
2144 
2145 		buf_copy = qdf_nbuf_copy(buf);
2146 		if (qdf_likely(!buf_copy))
2147 			return false;
2148 
2149 		nexthdr = ip6h->nexthdr;
2150 		offset = ipv6_skip_exthdr(buf_copy,
2151 					  eth_hdr_size + sizeof(*ip6h),
2152 					  &nexthdr,
2153 					  &frag_off);
2154 		qdf_nbuf_free(buf_copy);
2155 		if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
2156 			return false;
2157 
2158 		icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
2159 
2160 		switch (icmp6h->icmp6_type) {
2161 		case ICMPV6_MGM_REDUCTION:
2162 			return true;
2163 		case ICMPV6_MLD2_REPORT:
2164 		{
2165 			struct mld2_report *mh = NULL;
2166 			struct mld2_grec *grec = NULL;
2167 			int num = 0;
2168 			int i = 0;
2169 			int len = 0;
2170 			int type = -1;
2171 
2172 			mh = (struct mld2_report *)icmp6h;
2173 			num = ntohs(mh->mld2r_ngrec);
2174 			for (i = 0; i < num; i++) {
2175 				grec = (void *)(((uint8_t *)mh->mld2r_grec) +
2176 						len);
2177 				type = grec->grec_type;
2178 				if ((type == MLD2_MODE_IS_INCLUDE) ||
2179 				    (type == MLD2_CHANGE_TO_INCLUDE))
2180 					return true;
2181 				else if (type == MLD2_BLOCK_OLD_SOURCES)
2182 					return true;
2183 
2184 				len += sizeof(struct mld2_grec);
2185 				len += ntohs(grec->grec_nsrcs) *
2186 						sizeof(struct in6_addr);
2187 			}
2188 			break;
2189 		}
2190 		default:
2191 			break;
2192 		}
2193 	}
2194 
2195 	return false;
2196 }
2197 
2198 qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
2199 
2200 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
2201 {
2202 	uint16_t ether_type;
2203 
2204 	ether_type = *(uint16_t *)(skb->data +
2205 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
2206 
2207 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
2208 		return true;
2209 	else
2210 		return false;
2211 }
2212 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
2213 
2214 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
2215 {
2216 	uint16_t ether_type;
2217 
2218 	ether_type = __qdf_nbuf_get_ether_type(data);
2219 
2220 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
2221 		return true;
2222 	else
2223 		return false;
2224 }
2225 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
2226 
2227 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
2228 {
2229 	uint16_t op_code;
2230 
2231 	op_code = (uint16_t)(*(uint16_t *)(data +
2232 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2233 
2234 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
2235 		return true;
2236 	return false;
2237 }
2238 
2239 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
2240 {
2241 	uint16_t op_code;
2242 
2243 	op_code = (uint16_t)(*(uint16_t *)(data +
2244 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2245 
2246 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
2247 		return true;
2248 	return false;
2249 }
2250 
2251 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
2252 {
2253 	uint32_t src_ip;
2254 
2255 	src_ip = (uint32_t)(*(uint32_t *)(data +
2256 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
2257 
2258 	return src_ip;
2259 }
2260 
2261 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
2262 {
2263 	uint32_t tgt_ip;
2264 
2265 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2266 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
2267 
2268 	return tgt_ip;
2269 }
2270 
2271 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
2272 {
2273 	uint8_t *domain_name;
2274 
2275 	domain_name = (uint8_t *)
2276 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
2277 	return domain_name;
2278 }
2279 
2280 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
2281 {
2282 	uint16_t op_code;
2283 	uint16_t tgt_port;
2284 
2285 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2286 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
2287 	/* Standard DNS query always happen on Dest Port 53. */
2288 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2289 		op_code = (uint16_t)(*(uint16_t *)(data +
2290 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2291 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2292 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
2293 			return true;
2294 	}
2295 	return false;
2296 }
2297 
2298 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
2299 {
2300 	uint16_t op_code;
2301 	uint16_t src_port;
2302 
2303 	src_port = (uint16_t)(*(uint16_t *)(data +
2304 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
2305 	/* Standard DNS response always comes on Src Port 53. */
2306 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2307 		op_code = (uint16_t)(*(uint16_t *)(data +
2308 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2309 
2310 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2311 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
2312 			return true;
2313 	}
2314 	return false;
2315 }
2316 
2317 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
2318 {
2319 	uint8_t op_code;
2320 
2321 	op_code = (uint8_t)(*(uint8_t *)(data +
2322 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2323 
2324 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
2325 		return true;
2326 
2327 	return false;
2328 }
2329 
2330 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
2331 {
2332 	uint8_t op_code;
2333 
2334 	op_code = (uint8_t)(*(uint8_t *)(data +
2335 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2336 
2337 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2338 		return true;
2339 
2340 	return false;
2341 }
2342 
2343 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2344 {
2345 	uint8_t op_code;
2346 
2347 	op_code = (uint8_t)(*(uint8_t *)(data +
2348 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2349 
2350 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2351 		return true;
2352 	return false;
2353 }
2354 
2355 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2356 {
2357 	uint8_t op_code;
2358 
2359 	op_code = (uint8_t)(*(uint8_t *)(data +
2360 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2361 
2362 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2363 		return true;
2364 	return false;
2365 }
2366 
2367 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2368 {
2369 	uint8_t op_code;
2370 
2371 	op_code = (uint8_t)(*(uint8_t *)(data +
2372 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2373 
2374 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2375 		return true;
2376 
2377 	return false;
2378 }
2379 
2380 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2381 {
2382 	uint8_t op_code;
2383 
2384 	op_code = (uint8_t)(*(uint8_t *)(data +
2385 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2386 
2387 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2388 		return true;
2389 	return false;
2390 }
2391 
2392 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2393 {
2394 	uint16_t src_port;
2395 
2396 	src_port = (uint16_t)(*(uint16_t *)(data +
2397 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2398 
2399 	return src_port;
2400 }
2401 
2402 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2403 {
2404 	uint16_t tgt_port;
2405 
2406 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2407 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2408 
2409 	return tgt_port;
2410 }
2411 
2412 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2413 {
2414 	uint8_t op_code;
2415 
2416 	op_code = (uint8_t)(*(uint8_t *)(data +
2417 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2418 
2419 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2420 		return true;
2421 	return false;
2422 }
2423 
2424 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2425 {
2426 	uint8_t op_code;
2427 
2428 	op_code = (uint8_t)(*(uint8_t *)(data +
2429 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2430 
2431 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2432 		return true;
2433 	return false;
2434 }
2435 
2436 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2437 {
2438 	uint8_t op_code;
2439 
2440 	op_code = (uint8_t)(*(uint8_t *)(data +
2441 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2442 
2443 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2444 		return true;
2445 	return false;
2446 }
2447 
2448 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2449 
2450 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2451 {
2452 	uint8_t subtype;
2453 
2454 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2455 
2456 	if (subtype == ICMPV6_REDIRECT)
2457 		return true;
2458 	return false;
2459 }
2460 
2461 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2462 
2463 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2464 {
2465 	uint32_t src_ip;
2466 
2467 	src_ip = (uint32_t)(*(uint32_t *)(data +
2468 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2469 
2470 	return src_ip;
2471 }
2472 
2473 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2474 {
2475 	uint32_t tgt_ip;
2476 
2477 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2478 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2479 
2480 	return tgt_ip;
2481 }
2482 
2483 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2484 {
2485 	uint16_t ether_type;
2486 
2487 	ether_type = (uint16_t)(*(uint16_t *)(data +
2488 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2489 
2490 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2491 		return true;
2492 	else
2493 		return false;
2494 }
2495 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2496 
2497 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2498 {
2499 	uint16_t sport;
2500 	uint16_t dport;
2501 	uint8_t ipv6_offset;
2502 
2503 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2504 		return false;
2505 
2506 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2507 	sport = *(uint16_t *)(data + ipv6_offset +
2508 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2509 	dport = *(uint16_t *)(data + ipv6_offset +
2510 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2511 			      sizeof(uint16_t));
2512 
2513 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2514 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2515 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2516 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2517 		return true;
2518 	else
2519 		return false;
2520 }
2521 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2522 
2523 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2524 {
2525 	uint16_t sport;
2526 	uint16_t dport;
2527 
2528 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2529 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2530 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2531 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2532 					sizeof(uint16_t));
2533 
2534 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2535 	    dport == sport)
2536 		return true;
2537 	else
2538 		return false;
2539 }
2540 
2541 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2542 
2543 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2544 {
2545 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2546 		uint32_t *dst_addr =
2547 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2548 
2549 		/*
2550 		 * Check first word of the IPV4 address and if it is
2551 		 * equal to 0xE then it represents multicast IP.
2552 		 */
2553 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2554 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2555 			return true;
2556 		else
2557 			return false;
2558 	} else
2559 		return false;
2560 }
2561 
2562 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2563 {
2564 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2565 		uint16_t *dst_addr;
2566 
2567 		dst_addr = (uint16_t *)
2568 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2569 
2570 		/*
2571 		 * Check first byte of the IP address and if it
2572 		 * 0xFF00 then it is a IPV6 mcast packet.
2573 		 */
2574 		if (*dst_addr ==
2575 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2576 			return true;
2577 		else
2578 			return false;
2579 	} else
2580 		return false;
2581 }
2582 
2583 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2584 {
2585 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2586 		uint8_t pkt_type;
2587 
2588 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2589 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2590 
2591 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2592 			return true;
2593 		else
2594 			return false;
2595 	} else
2596 		return false;
2597 }
2598 
2599 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2600 
2601 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2602 {
2603 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2604 		uint8_t pkt_type;
2605 
2606 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2607 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2608 
2609 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2610 			return true;
2611 		else
2612 			return false;
2613 	} else
2614 		return false;
2615 }
2616 
2617 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2618 
2619 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2620 {
2621 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2622 		uint8_t pkt_type;
2623 
2624 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2625 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2626 
2627 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2628 			return true;
2629 		else
2630 			return false;
2631 	} else
2632 		return false;
2633 }
2634 
2635 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2636 {
2637 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2638 		uint8_t pkt_type;
2639 
2640 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2641 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2642 
2643 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2644 			return true;
2645 		else
2646 			return false;
2647 	} else
2648 		return false;
2649 }
2650 
2651 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2652 {
2653 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2654 		uint8_t pkt_type;
2655 
2656 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2657 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2658 
2659 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2660 			return true;
2661 		else
2662 			return false;
2663 	} else
2664 		return false;
2665 }
2666 
2667 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2668 {
2669 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2670 		uint8_t pkt_type;
2671 
2672 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2673 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2674 
2675 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2676 			return true;
2677 		else
2678 			return false;
2679 	} else
2680 		return false;
2681 }
2682 
2683 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2684 {
2685 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2686 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2687 }
2688 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2689 
2690 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2691 {
2692 	struct sk_buff *skb = (struct sk_buff *)nbuf;
2693 	struct ethhdr *eth = eth_hdr(skb);
2694 
2695 	if (qdf_likely(skb->pkt_type != PACKET_MULTICAST))
2696 		return false;
2697 
2698 	if (qdf_unlikely(ether_addr_equal(eth->h_source, skb->dev->dev_addr)))
2699 		return true;
2700 
2701 	return false;
2702 }
2703 
2704 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2705 {
2706 	struct arphdr *arp;
2707 	struct in_ifaddr **ifap = NULL;
2708 	struct in_ifaddr *ifa = NULL;
2709 	struct in_device *in_dev;
2710 	unsigned char *arp_ptr;
2711 	__be32 tip;
2712 
2713 	arp = (struct arphdr *)skb->data;
2714 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
2715 		/* if fail to acquire rtnl lock, assume it's local arp */
2716 		if (!rtnl_trylock())
2717 			return true;
2718 
2719 		in_dev = __in_dev_get_rtnl(skb->dev);
2720 		if (in_dev) {
2721 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2722 				ifap = &ifa->ifa_next) {
2723 				if (!strcmp(skb->dev->name, ifa->ifa_label))
2724 					break;
2725 			}
2726 		}
2727 
2728 		if (ifa && ifa->ifa_local) {
2729 			arp_ptr = (unsigned char *)(arp + 1);
2730 			arp_ptr += (skb->dev->addr_len + 4 +
2731 					skb->dev->addr_len);
2732 			memcpy(&tip, arp_ptr, 4);
2733 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
2734 				  ifa->ifa_local, tip);
2735 			if (ifa->ifa_local == tip) {
2736 				rtnl_unlock();
2737 				return true;
2738 			}
2739 		}
2740 		rtnl_unlock();
2741 	}
2742 
2743 	return false;
2744 }
2745 
2746 /**
2747  * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
2748  * @data: pointer to data of network buffer
2749  * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
2750  *
2751  * Return: TCP header length in unit of byte
2752  */
2753 static inline
2754 uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
2755 					uint8_t tcp_hdr_len_offset)
2756 {
2757 	uint8_t tcp_hdr_len;
2758 
2759 	tcp_hdr_len =
2760 		*((uint8_t *)(data + tcp_hdr_len_offset));
2761 
2762 	tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
2763 		       QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
2764 		       QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
2765 
2766 	return tcp_hdr_len;
2767 }
2768 
2769 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
2770 {
2771 	bool is_tcp_ack = false;
2772 	uint8_t op_code, tcp_hdr_len;
2773 	uint16_t ip_payload_len;
2774 	uint8_t *data = skb->data;
2775 
2776 	/*
2777 	 * If packet length > TCP ACK max length or it's nonlinearized,
2778 	 * then it must not be TCP ACK.
2779 	 */
2780 	if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
2781 	    qdf_nbuf_is_nonlinear(skb))
2782 		return false;
2783 
2784 	if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
2785 		ip_payload_len =
2786 			QDF_SWAP_U16(*((uint16_t *)(data +
2787 				     QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
2788 					- QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
2789 
2790 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2791 					data,
2792 					QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
2793 
2794 		op_code = (uint8_t)(*(uint8_t *)(data +
2795 				QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
2796 
2797 		if (ip_payload_len == tcp_hdr_len &&
2798 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
2799 			is_tcp_ack = true;
2800 
2801 	} else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
2802 		ip_payload_len =
2803 			QDF_SWAP_U16(*((uint16_t *)(data +
2804 				QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
2805 
2806 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2807 					data,
2808 					QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
2809 		op_code = (uint8_t)(*(uint8_t *)(data +
2810 				QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
2811 
2812 		if (ip_payload_len == tcp_hdr_len &&
2813 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
2814 			is_tcp_ack = true;
2815 	}
2816 
2817 	return is_tcp_ack;
2818 }
2819 
2820 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2821 bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2822 {
2823 	return nbuf->fast_xmit;
2824 }
2825 
2826 qdf_export_symbol(qdf_nbuf_fast_xmit);
2827 
2828 void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2829 {
2830 	nbuf->fast_xmit = value;
2831 }
2832 
2833 qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2834 #else
2835 bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2836 {
2837 	return false;
2838 }
2839 
2840 qdf_export_symbol(qdf_nbuf_fast_xmit);
2841 
2842 void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2843 {
2844 }
2845 
2846 qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2847 #endif
2848 
2849 #ifdef NBUF_MEMORY_DEBUG
2850 
2851 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2852 
2853 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2854 static struct kmem_cache *nbuf_tracking_cache;
2855 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2856 static spinlock_t qdf_net_buf_track_free_list_lock;
2857 static uint32_t qdf_net_buf_track_free_list_count;
2858 static uint32_t qdf_net_buf_track_used_list_count;
2859 static uint32_t qdf_net_buf_track_max_used;
2860 static uint32_t qdf_net_buf_track_max_free;
2861 static uint32_t qdf_net_buf_track_max_allocated;
2862 static uint32_t qdf_net_buf_track_fail_count;
2863 
2864 /**
2865  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2866  *
2867  * tracks the max number of network buffers that the wlan driver was tracking
2868  * at any one time.
2869  *
2870  * Return: none
2871  */
2872 static inline void update_max_used(void)
2873 {
2874 	int sum;
2875 
2876 	if (qdf_net_buf_track_max_used <
2877 	    qdf_net_buf_track_used_list_count)
2878 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2879 	sum = qdf_net_buf_track_free_list_count +
2880 		qdf_net_buf_track_used_list_count;
2881 	if (qdf_net_buf_track_max_allocated < sum)
2882 		qdf_net_buf_track_max_allocated = sum;
2883 }
2884 
2885 /**
2886  * update_max_free() - update qdf_net_buf_track_free_list_count
2887  *
2888  * tracks the max number tracking buffers kept in the freelist.
2889  *
2890  * Return: none
2891  */
2892 static inline void update_max_free(void)
2893 {
2894 	if (qdf_net_buf_track_max_free <
2895 	    qdf_net_buf_track_free_list_count)
2896 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2897 }
2898 
2899 /**
2900  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2901  *
2902  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2903  * This function also ads fexibility to adjust the allocation and freelist
2904  * scheems.
2905  *
2906  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2907  */
2908 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2909 {
2910 	int flags = GFP_KERNEL;
2911 	unsigned long irq_flag;
2912 	QDF_NBUF_TRACK *new_node = NULL;
2913 
2914 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2915 	qdf_net_buf_track_used_list_count++;
2916 	if (qdf_net_buf_track_free_list) {
2917 		new_node = qdf_net_buf_track_free_list;
2918 		qdf_net_buf_track_free_list =
2919 			qdf_net_buf_track_free_list->p_next;
2920 		qdf_net_buf_track_free_list_count--;
2921 	}
2922 	update_max_used();
2923 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2924 
2925 	if (new_node)
2926 		return new_node;
2927 
2928 	if (in_interrupt() || irqs_disabled() || in_atomic())
2929 		flags = GFP_ATOMIC;
2930 
2931 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2932 }
2933 
2934 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2935 #define FREEQ_POOLSIZE 2048
2936 
2937 /**
2938  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2939  * @node: nbuf tracking node
2940  *
2941  * Matches calls to qdf_nbuf_track_alloc.
2942  * Either frees the tracking cookie to kernel or an internal
2943  * freelist based on the size of the freelist.
2944  *
2945  * Return: none
2946  */
2947 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2948 {
2949 	unsigned long irq_flag;
2950 
2951 	if (!node)
2952 		return;
2953 
2954 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2955 	 * only shrink the freelist if it is bigger than twice the number of
2956 	 * nbufs in use. If the driver is stalling in a consistent bursty
2957 	 * fashion, this will keep 3/4 of thee allocations from the free list
2958 	 * while also allowing the system to recover memory as less frantic
2959 	 * traffic occurs.
2960 	 */
2961 
2962 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2963 
2964 	qdf_net_buf_track_used_list_count--;
2965 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2966 	   (qdf_net_buf_track_free_list_count >
2967 	    qdf_net_buf_track_used_list_count << 1)) {
2968 		kmem_cache_free(nbuf_tracking_cache, node);
2969 	} else {
2970 		node->p_next = qdf_net_buf_track_free_list;
2971 		qdf_net_buf_track_free_list = node;
2972 		qdf_net_buf_track_free_list_count++;
2973 	}
2974 	update_max_free();
2975 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2976 }
2977 
2978 /**
2979  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2980  *
2981  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2982  * the freelist first makes it performant for the first iperf udp burst
2983  * as well as steady state.
2984  *
2985  * Return: None
2986  */
2987 static void qdf_nbuf_track_prefill(void)
2988 {
2989 	int i;
2990 	QDF_NBUF_TRACK *node, *head;
2991 
2992 	/* prepopulate the freelist */
2993 	head = NULL;
2994 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2995 		node = qdf_nbuf_track_alloc();
2996 		if (!node)
2997 			continue;
2998 		node->p_next = head;
2999 		head = node;
3000 	}
3001 	while (head) {
3002 		node = head->p_next;
3003 		qdf_nbuf_track_free(head);
3004 		head = node;
3005 	}
3006 
3007 	/* prefilled buffers should not count as used */
3008 	qdf_net_buf_track_max_used = 0;
3009 }
3010 
3011 /**
3012  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
3013  *
3014  * This initializes the memory manager for the nbuf tracking cookies.  Because
3015  * these cookies are all the same size and only used in this feature, we can
3016  * use a kmem_cache to provide tracking as well as to speed up allocations.
3017  * To avoid the overhead of allocating and freeing the buffers (including SLUB
3018  * features) a freelist is prepopulated here.
3019  *
3020  * Return: None
3021  */
3022 static void qdf_nbuf_track_memory_manager_create(void)
3023 {
3024 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
3025 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
3026 						sizeof(QDF_NBUF_TRACK),
3027 						0, 0, NULL);
3028 
3029 	qdf_nbuf_track_prefill();
3030 }
3031 
3032 /**
3033  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
3034  *
3035  * Empty the freelist and print out usage statistics when it is no longer
3036  * needed. Also the kmem_cache should be destroyed here so that it can warn if
3037  * any nbuf tracking cookies were leaked.
3038  *
3039  * Return: None
3040  */
3041 static void qdf_nbuf_track_memory_manager_destroy(void)
3042 {
3043 	QDF_NBUF_TRACK *node, *tmp;
3044 	unsigned long irq_flag;
3045 
3046 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3047 	node = qdf_net_buf_track_free_list;
3048 
3049 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
3050 		qdf_print("%s: unexpectedly large max_used count %d",
3051 			  __func__, qdf_net_buf_track_max_used);
3052 
3053 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
3054 		qdf_print("%s: %d unused trackers were allocated",
3055 			  __func__,
3056 			  qdf_net_buf_track_max_allocated -
3057 			  qdf_net_buf_track_max_used);
3058 
3059 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
3060 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
3061 		qdf_print("%s: check freelist shrinking functionality",
3062 			  __func__);
3063 
3064 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3065 		  "%s: %d residual freelist size",
3066 		  __func__, qdf_net_buf_track_free_list_count);
3067 
3068 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3069 		  "%s: %d max freelist size observed",
3070 		  __func__, qdf_net_buf_track_max_free);
3071 
3072 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3073 		  "%s: %d max buffers used observed",
3074 		  __func__, qdf_net_buf_track_max_used);
3075 
3076 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3077 		  "%s: %d max buffers allocated observed",
3078 		  __func__, qdf_net_buf_track_max_allocated);
3079 
3080 	while (node) {
3081 		tmp = node;
3082 		node = node->p_next;
3083 		kmem_cache_free(nbuf_tracking_cache, tmp);
3084 		qdf_net_buf_track_free_list_count--;
3085 	}
3086 
3087 	if (qdf_net_buf_track_free_list_count != 0)
3088 		qdf_info("%d unfreed tracking memory lost in freelist",
3089 			 qdf_net_buf_track_free_list_count);
3090 
3091 	if (qdf_net_buf_track_used_list_count != 0)
3092 		qdf_info("%d unfreed tracking memory still in use",
3093 			 qdf_net_buf_track_used_list_count);
3094 
3095 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3096 	kmem_cache_destroy(nbuf_tracking_cache);
3097 	qdf_net_buf_track_free_list = NULL;
3098 }
3099 
3100 void qdf_net_buf_debug_init(void)
3101 {
3102 	uint32_t i;
3103 
3104 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
3105 
3106 	if (is_initial_mem_debug_disabled)
3107 		return;
3108 
3109 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
3110 
3111 	qdf_nbuf_map_tracking_init();
3112 	qdf_nbuf_smmu_map_tracking_init();
3113 	qdf_nbuf_track_memory_manager_create();
3114 
3115 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3116 		gp_qdf_net_buf_track_tbl[i] = NULL;
3117 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
3118 	}
3119 }
3120 qdf_export_symbol(qdf_net_buf_debug_init);
3121 
3122 void qdf_net_buf_debug_exit(void)
3123 {
3124 	uint32_t i;
3125 	uint32_t count = 0;
3126 	unsigned long irq_flag;
3127 	QDF_NBUF_TRACK *p_node;
3128 	QDF_NBUF_TRACK *p_prev;
3129 
3130 	if (is_initial_mem_debug_disabled)
3131 		return;
3132 
3133 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3134 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3135 		p_node = gp_qdf_net_buf_track_tbl[i];
3136 		while (p_node) {
3137 			p_prev = p_node;
3138 			p_node = p_node->p_next;
3139 			count++;
3140 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
3141 				 p_prev->func_name, p_prev->line_num,
3142 				 p_prev->size, p_prev->net_buf);
3143 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
3144 				 p_prev->map_func_name,
3145 				 p_prev->map_line_num,
3146 				 p_prev->unmap_func_name,
3147 				 p_prev->unmap_line_num,
3148 				 p_prev->is_nbuf_mapped);
3149 			qdf_nbuf_track_free(p_prev);
3150 		}
3151 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3152 	}
3153 
3154 	qdf_nbuf_track_memory_manager_destroy();
3155 	qdf_nbuf_map_tracking_deinit();
3156 	qdf_nbuf_smmu_map_tracking_deinit();
3157 
3158 #ifdef CONFIG_HALT_KMEMLEAK
3159 	if (count) {
3160 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
3161 		QDF_BUG(0);
3162 	}
3163 #endif
3164 }
3165 qdf_export_symbol(qdf_net_buf_debug_exit);
3166 
3167 /**
3168  * qdf_net_buf_debug_hash() - hash network buffer pointer
3169  * @net_buf: network buffer
3170  *
3171  * Return: hash value
3172  */
3173 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
3174 {
3175 	uint32_t i;
3176 
3177 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
3178 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
3179 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
3180 
3181 	return i;
3182 }
3183 
3184 /**
3185  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
3186  * @net_buf: network buffer
3187  *
3188  * Return: If skb is found in hash table then return pointer to network buffer
3189  *	else return %NULL
3190  */
3191 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
3192 {
3193 	uint32_t i;
3194 	QDF_NBUF_TRACK *p_node;
3195 
3196 	i = qdf_net_buf_debug_hash(net_buf);
3197 	p_node = gp_qdf_net_buf_track_tbl[i];
3198 
3199 	while (p_node) {
3200 		if (p_node->net_buf == net_buf)
3201 			return p_node;
3202 		p_node = p_node->p_next;
3203 	}
3204 
3205 	return NULL;
3206 }
3207 
3208 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
3209 				const char *func_name, uint32_t line_num)
3210 {
3211 	uint32_t i;
3212 	unsigned long irq_flag;
3213 	QDF_NBUF_TRACK *p_node;
3214 	QDF_NBUF_TRACK *new_node;
3215 
3216 	if (is_initial_mem_debug_disabled)
3217 		return;
3218 
3219 	new_node = qdf_nbuf_track_alloc();
3220 
3221 	i = qdf_net_buf_debug_hash(net_buf);
3222 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3223 
3224 	p_node = qdf_net_buf_debug_look_up(net_buf);
3225 
3226 	if (p_node) {
3227 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3228 			  p_node->net_buf, p_node->func_name, p_node->line_num,
3229 			  net_buf, func_name, line_num);
3230 		qdf_nbuf_track_free(new_node);
3231 	} else {
3232 		p_node = new_node;
3233 		if (p_node) {
3234 			p_node->net_buf = net_buf;
3235 			qdf_str_lcopy(p_node->func_name, func_name,
3236 				      QDF_MEM_FUNC_NAME_SIZE);
3237 			p_node->line_num = line_num;
3238 			p_node->is_nbuf_mapped = false;
3239 			p_node->map_line_num = 0;
3240 			p_node->unmap_line_num = 0;
3241 			p_node->map_func_name[0] = '\0';
3242 			p_node->unmap_func_name[0] = '\0';
3243 			p_node->size = size;
3244 			p_node->time = qdf_get_log_timestamp();
3245 			qdf_net_buf_update_smmu_params(p_node);
3246 			qdf_mem_skb_inc(size);
3247 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3248 			gp_qdf_net_buf_track_tbl[i] = p_node;
3249 		} else {
3250 			qdf_net_buf_track_fail_count++;
3251 			qdf_print(
3252 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3253 				  func_name, line_num, size);
3254 		}
3255 	}
3256 
3257 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3258 }
3259 qdf_export_symbol(qdf_net_buf_debug_add_node);
3260 
3261 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3262 				   uint32_t line_num)
3263 {
3264 	uint32_t i;
3265 	unsigned long irq_flag;
3266 	QDF_NBUF_TRACK *p_node;
3267 
3268 	if (is_initial_mem_debug_disabled)
3269 		return;
3270 
3271 	i = qdf_net_buf_debug_hash(net_buf);
3272 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3273 
3274 	p_node = qdf_net_buf_debug_look_up(net_buf);
3275 
3276 	if (p_node) {
3277 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3278 			      QDF_MEM_FUNC_NAME_SIZE);
3279 		p_node->line_num = line_num;
3280 	}
3281 
3282 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3283 }
3284 
3285 qdf_export_symbol(qdf_net_buf_debug_update_node);
3286 
3287 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3288 				       const char *func_name,
3289 				       uint32_t line_num)
3290 {
3291 	uint32_t i;
3292 	unsigned long irq_flag;
3293 	QDF_NBUF_TRACK *p_node;
3294 
3295 	if (is_initial_mem_debug_disabled)
3296 		return;
3297 
3298 	i = qdf_net_buf_debug_hash(net_buf);
3299 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3300 
3301 	p_node = qdf_net_buf_debug_look_up(net_buf);
3302 
3303 	if (p_node) {
3304 		qdf_str_lcopy(p_node->map_func_name, func_name,
3305 			      QDF_MEM_FUNC_NAME_SIZE);
3306 		p_node->map_line_num = line_num;
3307 		p_node->is_nbuf_mapped = true;
3308 	}
3309 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3310 }
3311 
3312 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
3313 void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
3314 					    unsigned long iova,
3315 					    unsigned long pa,
3316 					    const char *func,
3317 					    uint32_t line)
3318 {
3319 	uint32_t i;
3320 	unsigned long irq_flag;
3321 	QDF_NBUF_TRACK *p_node;
3322 
3323 	if (is_initial_mem_debug_disabled)
3324 		return;
3325 
3326 	i = qdf_net_buf_debug_hash(nbuf);
3327 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3328 
3329 	p_node = qdf_net_buf_debug_look_up(nbuf);
3330 
3331 	if (p_node) {
3332 		qdf_str_lcopy(p_node->smmu_map_func_name, func,
3333 			      QDF_MEM_FUNC_NAME_SIZE);
3334 		p_node->smmu_map_line_num = line;
3335 		p_node->is_nbuf_smmu_mapped = true;
3336 		p_node->smmu_map_iova_addr = iova;
3337 		p_node->smmu_map_pa_addr = pa;
3338 	}
3339 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3340 }
3341 
3342 void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
3343 					      unsigned long iova,
3344 					      unsigned long pa,
3345 					      const char *func,
3346 					      uint32_t line)
3347 {
3348 	uint32_t i;
3349 	unsigned long irq_flag;
3350 	QDF_NBUF_TRACK *p_node;
3351 
3352 	if (is_initial_mem_debug_disabled)
3353 		return;
3354 
3355 	i = qdf_net_buf_debug_hash(nbuf);
3356 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3357 
3358 	p_node = qdf_net_buf_debug_look_up(nbuf);
3359 
3360 	if (p_node) {
3361 		qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
3362 			      QDF_MEM_FUNC_NAME_SIZE);
3363 		p_node->smmu_unmap_line_num = line;
3364 		p_node->is_nbuf_smmu_mapped = false;
3365 		p_node->smmu_unmap_iova_addr = iova;
3366 		p_node->smmu_unmap_pa_addr = pa;
3367 	}
3368 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3369 }
3370 #endif
3371 
3372 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3373 					 const char *func_name,
3374 					 uint32_t line_num)
3375 {
3376 	uint32_t i;
3377 	unsigned long irq_flag;
3378 	QDF_NBUF_TRACK *p_node;
3379 
3380 	if (is_initial_mem_debug_disabled)
3381 		return;
3382 
3383 	i = qdf_net_buf_debug_hash(net_buf);
3384 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3385 
3386 	p_node = qdf_net_buf_debug_look_up(net_buf);
3387 
3388 	if (p_node) {
3389 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3390 			      QDF_MEM_FUNC_NAME_SIZE);
3391 		p_node->unmap_line_num = line_num;
3392 		p_node->is_nbuf_mapped = false;
3393 	}
3394 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3395 }
3396 
3397 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3398 {
3399 	uint32_t i;
3400 	QDF_NBUF_TRACK *p_head;
3401 	QDF_NBUF_TRACK *p_node = NULL;
3402 	unsigned long irq_flag;
3403 	QDF_NBUF_TRACK *p_prev;
3404 
3405 	if (is_initial_mem_debug_disabled)
3406 		return;
3407 
3408 	i = qdf_net_buf_debug_hash(net_buf);
3409 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3410 
3411 	p_head = gp_qdf_net_buf_track_tbl[i];
3412 
3413 	/* Unallocated SKB */
3414 	if (!p_head)
3415 		goto done;
3416 
3417 	p_node = p_head;
3418 	/* Found at head of the table */
3419 	if (p_head->net_buf == net_buf) {
3420 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3421 		goto done;
3422 	}
3423 
3424 	/* Search in collision list */
3425 	while (p_node) {
3426 		p_prev = p_node;
3427 		p_node = p_node->p_next;
3428 		if ((p_node) && (p_node->net_buf == net_buf)) {
3429 			p_prev->p_next = p_node->p_next;
3430 			break;
3431 		}
3432 	}
3433 
3434 done:
3435 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3436 
3437 	if (p_node) {
3438 		qdf_mem_skb_dec(p_node->size);
3439 		qdf_nbuf_track_free(p_node);
3440 	} else {
3441 		if (qdf_net_buf_track_fail_count) {
3442 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3443 				  net_buf, qdf_net_buf_track_fail_count);
3444 		} else
3445 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3446 					   net_buf);
3447 	}
3448 }
3449 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3450 
3451 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3452 				   const char *func_name, uint32_t line_num)
3453 {
3454 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3455 
3456 	if (is_initial_mem_debug_disabled)
3457 		return;
3458 
3459 	while (ext_list) {
3460 		/*
3461 		 * Take care to add if it is Jumbo packet connected using
3462 		 * frag_list
3463 		 */
3464 		qdf_nbuf_t next;
3465 
3466 		next = qdf_nbuf_queue_next(ext_list);
3467 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3468 		ext_list = next;
3469 	}
3470 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3471 }
3472 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3473 
3474 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3475 {
3476 	qdf_nbuf_t ext_list;
3477 
3478 	if (is_initial_mem_debug_disabled)
3479 		return;
3480 
3481 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3482 	while (ext_list) {
3483 		/*
3484 		 * Take care to free if it is Jumbo packet connected using
3485 		 * frag_list
3486 		 */
3487 		qdf_nbuf_t next;
3488 
3489 		next = qdf_nbuf_queue_next(ext_list);
3490 
3491 		if (qdf_nbuf_get_users(ext_list) > 1) {
3492 			ext_list = next;
3493 			continue;
3494 		}
3495 
3496 		qdf_net_buf_debug_delete_node(ext_list);
3497 		ext_list = next;
3498 	}
3499 
3500 	if (qdf_nbuf_get_users(net_buf) > 1)
3501 		return;
3502 
3503 	qdf_net_buf_debug_delete_node(net_buf);
3504 }
3505 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3506 
3507 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3508 				int reserve, int align, int prio,
3509 				const char *func, uint32_t line)
3510 {
3511 	qdf_nbuf_t nbuf;
3512 
3513 	if (is_initial_mem_debug_disabled)
3514 		return __qdf_nbuf_alloc(osdev, size,
3515 					reserve, align,
3516 					prio, func, line);
3517 
3518 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3519 
3520 	/* Store SKB in internal QDF tracking table */
3521 	if (qdf_likely(nbuf)) {
3522 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3523 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3524 	} else {
3525 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3526 	}
3527 
3528 	return nbuf;
3529 }
3530 qdf_export_symbol(qdf_nbuf_alloc_debug);
3531 
3532 qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3533 				     int reserve, int align, int prio,
3534 				     const char *func, uint32_t line)
3535 {
3536 	qdf_nbuf_t nbuf;
3537 
3538 	if (is_initial_mem_debug_disabled)
3539 		return __qdf_nbuf_frag_alloc(osdev, size,
3540 					reserve, align,
3541 					prio, func, line);
3542 
3543 	nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
3544 				     func, line);
3545 
3546 	/* Store SKB in internal QDF tracking table */
3547 	if (qdf_likely(nbuf)) {
3548 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3549 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3550 	} else {
3551 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3552 	}
3553 
3554 	return nbuf;
3555 }
3556 
3557 qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
3558 
3559 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3560 					    const char *func, uint32_t line)
3561 {
3562 	qdf_nbuf_t nbuf;
3563 
3564 	if (is_initial_mem_debug_disabled)
3565 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3566 						    line);
3567 
3568 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3569 
3570 	/* Store SKB in internal QDF tracking table */
3571 	if (qdf_likely(nbuf)) {
3572 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3573 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3574 	} else {
3575 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3576 	}
3577 
3578 	return nbuf;
3579 }
3580 
3581 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3582 
3583 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3584 {
3585 	qdf_nbuf_t ext_list;
3586 	qdf_frag_t p_frag;
3587 	uint32_t num_nr_frags;
3588 	uint32_t idx = 0;
3589 
3590 	if (qdf_unlikely(!nbuf))
3591 		return;
3592 
3593 	if (is_initial_mem_debug_disabled)
3594 		goto free_buf;
3595 
3596 	if (qdf_nbuf_get_users(nbuf) > 1)
3597 		goto free_buf;
3598 
3599 	/* Remove SKB from internal QDF tracking table */
3600 	qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
3601 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3602 	qdf_net_buf_debug_delete_node(nbuf);
3603 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3604 
3605 	/* Take care to delete the debug entries for frags */
3606 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3607 
3608 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3609 
3610 	while (idx < num_nr_frags) {
3611 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3612 		if (qdf_likely(p_frag))
3613 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3614 		idx++;
3615 	}
3616 
3617 	/*
3618 	 * Take care to update the debug entries for frag_list and also
3619 	 * for the frags attached to frag_list
3620 	 */
3621 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3622 	while (ext_list) {
3623 		if (qdf_nbuf_get_users(ext_list) == 1) {
3624 			qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
3625 							      line);
3626 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3627 			idx = 0;
3628 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3629 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3630 			while (idx < num_nr_frags) {
3631 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3632 				if (qdf_likely(p_frag))
3633 					qdf_frag_debug_refcount_dec(p_frag,
3634 								    func, line);
3635 				idx++;
3636 			}
3637 			qdf_net_buf_debug_delete_node(ext_list);
3638 		}
3639 
3640 		ext_list = qdf_nbuf_queue_next(ext_list);
3641 	}
3642 
3643 free_buf:
3644 	__qdf_nbuf_free(nbuf);
3645 }
3646 qdf_export_symbol(qdf_nbuf_free_debug);
3647 
3648 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3649 					const char *func, uint32_t line)
3650 {
3651 	struct sk_buff *skb;
3652 	int flags = GFP_KERNEL;
3653 
3654 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3655 		flags = GFP_ATOMIC;
3656 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3657 		/*
3658 		 * Observed that kcompactd burns out CPU to make order-3 page.
3659 		 *__netdev_alloc_skb has 4k page fallback option just in case of
3660 		 * failing high order page allocation so we don't need to be
3661 		 * hard. Make kcompactd rest in piece.
3662 		 */
3663 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3664 #endif
3665 	}
3666 
3667 	skb = __netdev_alloc_skb(NULL, size, flags);
3668 
3669 
3670 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3671 		if (qdf_likely(skb))
3672 			qdf_nbuf_count_inc(skb);
3673 	} else {
3674 		if (qdf_likely(skb)) {
3675 			qdf_nbuf_count_inc(skb);
3676 			qdf_net_buf_debug_add_node(skb, size, func, line);
3677 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3678 		} else {
3679 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3680 		}
3681 	}
3682 
3683 
3684 	return skb;
3685 }
3686 
3687 qdf_export_symbol(__qdf_nbuf_alloc_simple);
3688 
3689 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3690 				uint32_t line)
3691 {
3692 	if (qdf_likely(nbuf)) {
3693 		if (is_initial_mem_debug_disabled) {
3694 			dev_kfree_skb_any(nbuf);
3695 		} else {
3696 			qdf_nbuf_free_debug(nbuf, func, line);
3697 		}
3698 	}
3699 }
3700 
3701 qdf_export_symbol(qdf_nbuf_free_debug_simple);
3702 
3703 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3704 {
3705 	uint32_t num_nr_frags;
3706 	uint32_t idx = 0;
3707 	qdf_nbuf_t ext_list;
3708 	qdf_frag_t p_frag;
3709 
3710 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3711 
3712 	if (is_initial_mem_debug_disabled)
3713 		return cloned_buf;
3714 
3715 	if (qdf_unlikely(!cloned_buf))
3716 		return NULL;
3717 
3718 	/* Take care to update the debug entries for frags */
3719 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3720 
3721 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3722 
3723 	while (idx < num_nr_frags) {
3724 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3725 		if (qdf_likely(p_frag))
3726 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3727 		idx++;
3728 	}
3729 
3730 	/* Take care to update debug entries for frags attached to frag_list */
3731 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3732 	while (ext_list) {
3733 		idx = 0;
3734 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3735 
3736 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3737 
3738 		while (idx < num_nr_frags) {
3739 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3740 			if (qdf_likely(p_frag))
3741 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3742 			idx++;
3743 		}
3744 		ext_list = qdf_nbuf_queue_next(ext_list);
3745 	}
3746 
3747 	/* Store SKB in internal QDF tracking table */
3748 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3749 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3750 
3751 	return cloned_buf;
3752 }
3753 qdf_export_symbol(qdf_nbuf_clone_debug);
3754 
3755 qdf_nbuf_t
3756 qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
3757 			       int align, __qdf_frag_cache_t *pf_cache,
3758 			       const char *func, uint32_t line)
3759 {
3760 	qdf_nbuf_t nbuf;
3761 
3762 	if (is_initial_mem_debug_disabled)
3763 		return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3764 						  pf_cache, func, line);
3765 
3766 	nbuf = __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3767 					  pf_cache, func, line);
3768 
3769 	/* Store SKB in internal QDF tracking table */
3770 	if (qdf_likely(nbuf)) {
3771 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3772 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3773 	} else {
3774 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3775 	}
3776 
3777 	return nbuf;
3778 }
3779 
3780 qdf_export_symbol(qdf_nbuf_page_frag_alloc_debug);
3781 
3782 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3783 {
3784 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3785 
3786 	if (is_initial_mem_debug_disabled)
3787 		return copied_buf;
3788 
3789 	if (qdf_unlikely(!copied_buf))
3790 		return NULL;
3791 
3792 	/* Store SKB in internal QDF tracking table */
3793 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3794 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3795 
3796 	return copied_buf;
3797 }
3798 qdf_export_symbol(qdf_nbuf_copy_debug);
3799 
3800 qdf_nbuf_t
3801 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3802 			   const char *func, uint32_t line)
3803 {
3804 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3805 
3806 	if (qdf_unlikely(!copied_buf))
3807 		return NULL;
3808 
3809 	if (is_initial_mem_debug_disabled)
3810 		return copied_buf;
3811 
3812 	/* Store SKB in internal QDF tracking table */
3813 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3814 	qdf_nbuf_history_add(copied_buf, func, line,
3815 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3816 
3817 	return copied_buf;
3818 }
3819 
3820 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3821 
3822 qdf_nbuf_t
3823 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3824 		       uint32_t line_num)
3825 {
3826 	qdf_nbuf_t unshared_buf;
3827 	qdf_frag_t p_frag;
3828 	uint32_t num_nr_frags;
3829 	uint32_t idx = 0;
3830 	qdf_nbuf_t ext_list, next;
3831 
3832 	if (is_initial_mem_debug_disabled)
3833 		return __qdf_nbuf_unshare(buf);
3834 
3835 	/* Not a shared buffer, nothing to do */
3836 	if (!qdf_nbuf_is_cloned(buf))
3837 		return buf;
3838 
3839 	if (qdf_nbuf_get_users(buf) > 1)
3840 		goto unshare_buf;
3841 
3842 	/* Take care to delete the debug entries for frags */
3843 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3844 
3845 	while (idx < num_nr_frags) {
3846 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3847 		if (qdf_likely(p_frag))
3848 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3849 						    line_num);
3850 		idx++;
3851 	}
3852 
3853 	qdf_net_buf_debug_delete_node(buf);
3854 
3855 	 /* Take care of jumbo packet connected using frag_list and frags */
3856 	ext_list = qdf_nbuf_get_ext_list(buf);
3857 	while (ext_list) {
3858 		idx = 0;
3859 		next = qdf_nbuf_queue_next(ext_list);
3860 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3861 
3862 		if (qdf_nbuf_get_users(ext_list) > 1) {
3863 			ext_list = next;
3864 			continue;
3865 		}
3866 
3867 		while (idx < num_nr_frags) {
3868 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3869 			if (qdf_likely(p_frag))
3870 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3871 							    line_num);
3872 			idx++;
3873 		}
3874 
3875 		qdf_net_buf_debug_delete_node(ext_list);
3876 		ext_list = next;
3877 	}
3878 
3879 unshare_buf:
3880 	unshared_buf = __qdf_nbuf_unshare(buf);
3881 
3882 	if (qdf_likely(unshared_buf))
3883 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3884 					   line_num);
3885 
3886 	return unshared_buf;
3887 }
3888 
3889 qdf_export_symbol(qdf_nbuf_unshare_debug);
3890 
3891 void
3892 qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3893 			      const char *func, uint32_t line)
3894 {
3895 	qdf_nbuf_t  buf;
3896 
3897 	if (qdf_nbuf_queue_empty(nbuf_queue_head))
3898 		return;
3899 
3900 	if (is_initial_mem_debug_disabled)
3901 		return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
3902 
3903 	while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
3904 		qdf_nbuf_free_debug(buf, func, line);
3905 }
3906 
3907 qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
3908 #endif /* NBUF_MEMORY_DEBUG */
3909 
3910 #if defined(QCA_DP_NBUF_FAST_PPEDS)
3911 #if defined(NBUF_MEMORY_DEBUG)
3912 struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3913 					const char *func, uint32_t line)
3914 {
3915 	struct sk_buff *skb;
3916 	int flags = GFP_KERNEL;
3917 
3918 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3919 		flags = GFP_ATOMIC;
3920 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3921 		/*
3922 		 * Observed that kcompactd burns out CPU to make order-3
3923 		 * page.__netdev_alloc_skb has 4k page fallback option
3924 		 * just in case of
3925 		 * failing high order page allocation so we don't need
3926 		 * to be hard. Make kcompactd rest in piece.
3927 		 */
3928 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3929 #endif
3930 	}
3931 	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3932 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3933 		if (qdf_likely(skb))
3934 			qdf_nbuf_count_inc(skb);
3935 	} else {
3936 		if (qdf_likely(skb)) {
3937 			qdf_nbuf_count_inc(skb);
3938 			qdf_net_buf_debug_add_node(skb, size, func, line);
3939 			qdf_nbuf_history_add(skb, func, line,
3940 					     QDF_NBUF_ALLOC);
3941 		} else {
3942 			qdf_nbuf_history_add(skb, func, line,
3943 					     QDF_NBUF_ALLOC_FAILURE);
3944 		}
3945 	}
3946 	return skb;
3947 }
3948 #else
3949 struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3950 					const char *func, uint32_t line)
3951 {
3952 	struct sk_buff *skb;
3953 	int flags = GFP_KERNEL;
3954 
3955 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3956 		flags = GFP_ATOMIC;
3957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3958 		/*
3959 		 * Observed that kcompactd burns out CPU to make order-3
3960 		 * page.__netdev_alloc_skb has 4k page fallback option
3961 		 * just in case of
3962 		 * failing high order page allocation so we don't need
3963 		 * to be hard. Make kcompactd rest in piece.
3964 		 */
3965 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3966 #endif
3967 	}
3968 	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3969 	if (qdf_likely(skb))
3970 		qdf_nbuf_count_inc(skb);
3971 
3972 	return skb;
3973 }
3974 #endif
3975 qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
3976 #endif
3977 
3978 #if defined(FEATURE_TSO)
3979 
3980 /**
3981  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3982  *
3983  * @ethproto: ethernet type of the msdu
3984  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3985  * @l2_len: L2 length for the msdu
3986  * @eit_hdr: pointer to EIT header
3987  * @eit_hdr_len: EIT header length for the msdu
3988  * @eit_hdr_dma_map_addr: dma addr for EIT header
3989  * @tcphdr: pointer to tcp header
3990  * @ipv4_csum_en: ipv4 checksum enable
3991  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3992  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3993  * @ip_id: IP id
3994  * @tcp_seq_num: TCP sequence number
3995  *
3996  * This structure holds the TSO common info that is common
3997  * across all the TCP segments of the jumbo packet.
3998  */
3999 struct qdf_tso_cmn_seg_info_t {
4000 	uint16_t ethproto;
4001 	uint16_t ip_tcp_hdr_len;
4002 	uint16_t l2_len;
4003 	uint8_t *eit_hdr;
4004 	uint32_t eit_hdr_len;
4005 	qdf_dma_addr_t eit_hdr_dma_map_addr;
4006 	struct tcphdr *tcphdr;
4007 	uint16_t ipv4_csum_en;
4008 	uint16_t tcp_ipv4_csum_en;
4009 	uint16_t tcp_ipv6_csum_en;
4010 	uint16_t ip_id;
4011 	uint32_t tcp_seq_num;
4012 };
4013 
4014 /**
4015  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
4016  * @skb: network buffer
4017  *
4018  * Return: byte offset length of 8 bytes aligned.
4019  */
4020 #ifdef FIX_TXDMA_LIMITATION
4021 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4022 {
4023 	uint32_t eit_hdr_len;
4024 	uint8_t *eit_hdr;
4025 	uint8_t byte_8_align_offset;
4026 
4027 	eit_hdr = skb->data;
4028 	eit_hdr_len = (skb_transport_header(skb)
4029 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4030 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
4031 	if (qdf_unlikely(byte_8_align_offset)) {
4032 		TSO_DEBUG("%pK,Len %d %d",
4033 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
4034 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
4035 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
4036 				  __LINE__, skb->head, skb->data,
4037 				 byte_8_align_offset);
4038 			return 0;
4039 		}
4040 		qdf_nbuf_push_head(skb, byte_8_align_offset);
4041 		qdf_mem_move(skb->data,
4042 			     skb->data + byte_8_align_offset,
4043 			     eit_hdr_len);
4044 		skb->len -= byte_8_align_offset;
4045 		skb->mac_header -= byte_8_align_offset;
4046 		skb->network_header -= byte_8_align_offset;
4047 		skb->transport_header -= byte_8_align_offset;
4048 	}
4049 	return byte_8_align_offset;
4050 }
4051 #else
4052 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4053 {
4054 	return 0;
4055 }
4056 #endif
4057 
4058 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
4059 void qdf_record_nbuf_nbytes(
4060 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
4061 {
4062 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
4063 }
4064 
4065 qdf_export_symbol(qdf_record_nbuf_nbytes);
4066 
4067 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
4068 
4069 /**
4070  * qdf_nbuf_tso_map_frag() - Map TSO segment
4071  * @osdev: qdf device handle
4072  * @tso_frag_vaddr: addr of tso fragment
4073  * @nbytes: number of bytes
4074  * @dir: direction
4075  *
4076  * Map TSO segment and for MCL record the amount of memory mapped
4077  *
4078  * Return: DMA address of mapped TSO fragment in success and
4079  * NULL in case of DMA mapping failure
4080  */
4081 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
4082 	qdf_device_t osdev, void *tso_frag_vaddr,
4083 	uint32_t nbytes, qdf_dma_dir_t dir)
4084 {
4085 	qdf_dma_addr_t tso_frag_paddr = 0;
4086 
4087 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
4088 					nbytes, __qdf_dma_dir_to_os(dir));
4089 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
4090 		qdf_err("DMA mapping error!");
4091 		qdf_assert_always(0);
4092 		return 0;
4093 	}
4094 	qdf_record_nbuf_nbytes(nbytes, dir, true);
4095 	return tso_frag_paddr;
4096 }
4097 
4098 /**
4099  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
4100  * @osdev: qdf device handle
4101  * @tso_frag_paddr: DMA addr of tso fragment
4102  * @dir: direction
4103  * @nbytes: number of bytes
4104  *
4105  * Unmap TSO segment and for MCL record the amount of memory mapped
4106  *
4107  * Return: None
4108  */
4109 static inline void qdf_nbuf_tso_unmap_frag(
4110 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
4111 	uint32_t nbytes, qdf_dma_dir_t dir)
4112 {
4113 	qdf_record_nbuf_nbytes(nbytes, dir, false);
4114 	dma_unmap_single(osdev->dev, tso_frag_paddr,
4115 			 nbytes, __qdf_dma_dir_to_os(dir));
4116 }
4117 
4118 /**
4119  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
4120  * information
4121  * @osdev: qdf device handle
4122  * @skb: skb buffer
4123  * @tso_info: Parameters common to all segments
4124  *
4125  * Get the TSO information that is common across all the TCP
4126  * segments of the jumbo packet
4127  *
4128  * Return: 0 - success 1 - failure
4129  */
4130 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
4131 			struct sk_buff *skb,
4132 			struct qdf_tso_cmn_seg_info_t *tso_info)
4133 {
4134 	/* Get ethernet type and ethernet header length */
4135 	tso_info->ethproto = vlan_get_protocol(skb);
4136 
4137 	/* Determine whether this is an IPv4 or IPv6 packet */
4138 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
4139 		/* for IPv4, get the IP ID and enable TCP and IP csum */
4140 		struct iphdr *ipv4_hdr = ip_hdr(skb);
4141 
4142 		tso_info->ip_id = ntohs(ipv4_hdr->id);
4143 		tso_info->ipv4_csum_en = 1;
4144 		tso_info->tcp_ipv4_csum_en = 1;
4145 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
4146 			qdf_err("TSO IPV4 proto 0x%x not TCP",
4147 				ipv4_hdr->protocol);
4148 			return 1;
4149 		}
4150 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
4151 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
4152 		tso_info->tcp_ipv6_csum_en = 1;
4153 	} else {
4154 		qdf_err("TSO: ethertype 0x%x is not supported!",
4155 			tso_info->ethproto);
4156 		return 1;
4157 	}
4158 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
4159 	tso_info->tcphdr = tcp_hdr(skb);
4160 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
4161 	/* get pointer to the ethernet + IP + TCP header and their length */
4162 	tso_info->eit_hdr = skb->data;
4163 	tso_info->eit_hdr_len = (skb_transport_header(skb)
4164 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4165 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
4166 						osdev, tso_info->eit_hdr,
4167 						tso_info->eit_hdr_len,
4168 						QDF_DMA_TO_DEVICE);
4169 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
4170 		return 1;
4171 
4172 	if (tso_info->ethproto == htons(ETH_P_IP)) {
4173 		/* include IPv4 header length for IPV4 (total length) */
4174 		tso_info->ip_tcp_hdr_len =
4175 			tso_info->eit_hdr_len - tso_info->l2_len;
4176 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
4177 		/* exclude IPv6 header length for IPv6 (payload length) */
4178 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
4179 	}
4180 	/*
4181 	 * The length of the payload (application layer data) is added to
4182 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
4183 	 * descriptor.
4184 	 */
4185 
4186 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
4187 		tso_info->tcp_seq_num,
4188 		tso_info->eit_hdr_len,
4189 		tso_info->l2_len,
4190 		skb->len);
4191 	return 0;
4192 }
4193 
4194 
4195 /**
4196  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
4197  *
4198  * @curr_seg: Segment whose contents are initialized
4199  * @tso_cmn_info: Parameters common to all segments
4200  *
4201  * Return: None
4202  */
4203 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
4204 				struct qdf_tso_seg_elem_t *curr_seg,
4205 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
4206 {
4207 	/* Initialize the flags to 0 */
4208 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
4209 
4210 	/*
4211 	 * The following fields remain the same across all segments of
4212 	 * a jumbo packet
4213 	 */
4214 	curr_seg->seg.tso_flags.tso_enable = 1;
4215 	curr_seg->seg.tso_flags.ipv4_checksum_en =
4216 		tso_cmn_info->ipv4_csum_en;
4217 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
4218 		tso_cmn_info->tcp_ipv6_csum_en;
4219 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
4220 		tso_cmn_info->tcp_ipv4_csum_en;
4221 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
4222 
4223 	/* The following fields change for the segments */
4224 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
4225 	tso_cmn_info->ip_id++;
4226 
4227 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
4228 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
4229 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
4230 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
4231 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
4232 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
4233 
4234 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
4235 
4236 	/*
4237 	 * First fragment for each segment always contains the ethernet,
4238 	 * IP and TCP header
4239 	 */
4240 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
4241 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
4242 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
4243 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
4244 
4245 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
4246 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
4247 		   tso_cmn_info->eit_hdr_len,
4248 		   curr_seg->seg.tso_flags.tcp_seq_num,
4249 		   curr_seg->seg.total_len);
4250 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
4251 }
4252 
4253 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
4254 		struct qdf_tso_info_t *tso_info)
4255 {
4256 	/* common across all segments */
4257 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
4258 	/* segment specific */
4259 	void *tso_frag_vaddr;
4260 	qdf_dma_addr_t tso_frag_paddr = 0;
4261 	uint32_t num_seg = 0;
4262 	struct qdf_tso_seg_elem_t *curr_seg;
4263 	struct qdf_tso_num_seg_elem_t *total_num_seg;
4264 	skb_frag_t *frag = NULL;
4265 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
4266 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
4267 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
4268 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4269 	int j = 0; /* skb fragment index */
4270 	uint8_t byte_8_align_offset;
4271 
4272 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
4273 	total_num_seg = tso_info->tso_num_seg_list;
4274 	curr_seg = tso_info->tso_seg_list;
4275 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
4276 
4277 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
4278 
4279 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
4280 						skb, &tso_cmn_info))) {
4281 		qdf_warn("TSO: error getting common segment info");
4282 		return 0;
4283 	}
4284 
4285 	/* length of the first chunk of data in the skb */
4286 	skb_frag_len = skb_headlen(skb);
4287 
4288 	/* the 0th tso segment's 0th fragment always contains the EIT header */
4289 	/* update the remaining skb fragment length and TSO segment length */
4290 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
4291 	skb_proc -= tso_cmn_info.eit_hdr_len;
4292 
4293 	/* get the address to the next tso fragment */
4294 	tso_frag_vaddr = skb->data +
4295 			 tso_cmn_info.eit_hdr_len +
4296 			 byte_8_align_offset;
4297 	/* get the length of the next tso fragment */
4298 	tso_frag_len = min(skb_frag_len, tso_seg_size);
4299 
4300 	if (tso_frag_len != 0) {
4301 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
4302 					osdev, tso_frag_vaddr, tso_frag_len,
4303 					QDF_DMA_TO_DEVICE);
4304 		if (qdf_unlikely(!tso_frag_paddr))
4305 			return 0;
4306 	}
4307 
4308 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
4309 		__LINE__, skb_frag_len, tso_frag_len);
4310 	num_seg = tso_info->num_segs;
4311 	tso_info->num_segs = 0;
4312 	tso_info->is_tso = 1;
4313 
4314 	while (num_seg && curr_seg) {
4315 		int i = 1; /* tso fragment index */
4316 		uint8_t more_tso_frags = 1;
4317 
4318 		curr_seg->seg.num_frags = 0;
4319 		tso_info->num_segs++;
4320 		total_num_seg->num_seg.tso_cmn_num_seg++;
4321 
4322 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
4323 						 &tso_cmn_info);
4324 
4325 		/* If TCP PSH flag is set, set it in the last or only segment */
4326 		if (num_seg == 1)
4327 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
4328 
4329 		if (unlikely(skb_proc == 0))
4330 			return tso_info->num_segs;
4331 
4332 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
4333 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
4334 		/* frag len is added to ip_len in while loop below*/
4335 
4336 		curr_seg->seg.num_frags++;
4337 
4338 		while (more_tso_frags) {
4339 			if (tso_frag_len != 0) {
4340 				curr_seg->seg.tso_frags[i].vaddr =
4341 					tso_frag_vaddr;
4342 				curr_seg->seg.tso_frags[i].length =
4343 					tso_frag_len;
4344 				curr_seg->seg.total_len += tso_frag_len;
4345 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
4346 				curr_seg->seg.num_frags++;
4347 				skb_proc = skb_proc - tso_frag_len;
4348 
4349 				/* increment the TCP sequence number */
4350 
4351 				tso_cmn_info.tcp_seq_num += tso_frag_len;
4352 				curr_seg->seg.tso_frags[i].paddr =
4353 					tso_frag_paddr;
4354 
4355 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
4356 			}
4357 
4358 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
4359 					__func__, __LINE__,
4360 					i,
4361 					tso_frag_len,
4362 					curr_seg->seg.total_len,
4363 					curr_seg->seg.tso_frags[i].vaddr);
4364 
4365 			/* if there is no more data left in the skb */
4366 			if (!skb_proc)
4367 				return tso_info->num_segs;
4368 
4369 			/* get the next payload fragment information */
4370 			/* check if there are more fragments in this segment */
4371 			if (tso_frag_len < tso_seg_size) {
4372 				more_tso_frags = 1;
4373 				if (tso_frag_len != 0) {
4374 					tso_seg_size = tso_seg_size -
4375 						tso_frag_len;
4376 					i++;
4377 					if (curr_seg->seg.num_frags ==
4378 								FRAG_NUM_MAX) {
4379 						more_tso_frags = 0;
4380 						/*
4381 						 * reset i and the tso
4382 						 * payload size
4383 						 */
4384 						i = 1;
4385 						tso_seg_size =
4386 							skb_shinfo(skb)->
4387 								gso_size;
4388 					}
4389 				}
4390 			} else {
4391 				more_tso_frags = 0;
4392 				/* reset i and the tso payload size */
4393 				i = 1;
4394 				tso_seg_size = skb_shinfo(skb)->gso_size;
4395 			}
4396 
4397 			/* if the next fragment is contiguous */
4398 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
4399 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4400 				skb_frag_len = skb_frag_len - tso_frag_len;
4401 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4402 
4403 			} else { /* the next fragment is not contiguous */
4404 				if (skb_shinfo(skb)->nr_frags == 0) {
4405 					qdf_info("TSO: nr_frags == 0!");
4406 					qdf_assert(0);
4407 					return 0;
4408 				}
4409 				if (j >= skb_shinfo(skb)->nr_frags) {
4410 					qdf_info("TSO: nr_frags %d j %d",
4411 						 skb_shinfo(skb)->nr_frags, j);
4412 					qdf_assert(0);
4413 					return 0;
4414 				}
4415 				frag = &skb_shinfo(skb)->frags[j];
4416 				skb_frag_len = skb_frag_size(frag);
4417 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4418 				tso_frag_vaddr = skb_frag_address_safe(frag);
4419 				j++;
4420 			}
4421 
4422 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4423 				__func__, __LINE__, skb_frag_len, tso_frag_len,
4424 				tso_seg_size);
4425 
4426 			if (!(tso_frag_vaddr)) {
4427 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
4428 						__func__);
4429 				return 0;
4430 			}
4431 
4432 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
4433 						osdev, tso_frag_vaddr,
4434 						tso_frag_len,
4435 						QDF_DMA_TO_DEVICE);
4436 			if (qdf_unlikely(!tso_frag_paddr))
4437 				return 0;
4438 		}
4439 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4440 				curr_seg->seg.tso_flags.tcp_seq_num);
4441 		num_seg--;
4442 		/* if TCP FIN flag was set, set it in the last segment */
4443 		if (!num_seg)
4444 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4445 
4446 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4447 		curr_seg = curr_seg->next;
4448 	}
4449 	return tso_info->num_segs;
4450 }
4451 qdf_export_symbol(__qdf_nbuf_get_tso_info);
4452 
4453 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4454 			  struct qdf_tso_seg_elem_t *tso_seg,
4455 			  bool is_last_seg)
4456 {
4457 	uint32_t num_frags = 0;
4458 
4459 	if (tso_seg->seg.num_frags > 0)
4460 		num_frags = tso_seg->seg.num_frags - 1;
4461 
4462 	/*Num of frags in a tso seg cannot be less than 2 */
4463 	if (num_frags < 1) {
4464 		/*
4465 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
4466 		 * this may happen when qdf_nbuf_get_tso_info failed,
4467 		 * do dma unmap for the 0th frag in this seg.
4468 		 */
4469 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4470 			goto last_seg_free_first_frag;
4471 
4472 		qdf_assert(0);
4473 		qdf_err("ERROR: num of frags in a tso segment is %d",
4474 			(num_frags + 1));
4475 		return;
4476 	}
4477 
4478 	while (num_frags) {
4479 		/*Do dma unmap the tso seg except the 0th frag */
4480 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4481 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4482 				num_frags);
4483 			qdf_assert(0);
4484 			return;
4485 		}
4486 		qdf_nbuf_tso_unmap_frag(
4487 			osdev,
4488 			tso_seg->seg.tso_frags[num_frags].paddr,
4489 			tso_seg->seg.tso_frags[num_frags].length,
4490 			QDF_DMA_TO_DEVICE);
4491 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4492 		num_frags--;
4493 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4494 	}
4495 
4496 last_seg_free_first_frag:
4497 	if (is_last_seg) {
4498 		/*Do dma unmap for the tso seg 0th frag */
4499 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4500 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4501 			qdf_assert(0);
4502 			return;
4503 		}
4504 		qdf_nbuf_tso_unmap_frag(osdev,
4505 					tso_seg->seg.tso_frags[0].paddr,
4506 					tso_seg->seg.tso_frags[0].length,
4507 					QDF_DMA_TO_DEVICE);
4508 		tso_seg->seg.tso_frags[0].paddr = 0;
4509 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4510 	}
4511 }
4512 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4513 
4514 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4515 {
4516 	size_t packet_len;
4517 
4518 	packet_len = skb->len -
4519 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4520 		 tcp_hdrlen(skb));
4521 
4522 	return packet_len;
4523 }
4524 
4525 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4526 
4527 #ifndef BUILD_X86
4528 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4529 {
4530 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4531 	uint32_t remainder, num_segs = 0;
4532 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4533 	uint8_t frags_per_tso = 0;
4534 	uint32_t skb_frag_len = 0;
4535 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4536 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4537 	skb_frag_t *frag = NULL;
4538 	int j = 0;
4539 	uint32_t temp_num_seg = 0;
4540 
4541 	/* length of the first chunk of data in the skb minus eit header*/
4542 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4543 
4544 	/* Calculate num of segs for skb's first chunk of data*/
4545 	remainder = skb_frag_len % tso_seg_size;
4546 	num_segs = skb_frag_len / tso_seg_size;
4547 	/*
4548 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4549 	 * In that case, one more tso seg is required to accommodate
4550 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4551 	 * then remaining data will be accommodated while doing the calculation
4552 	 * for nr_frags data. Hence, frags_per_tso++.
4553 	 */
4554 	if (remainder) {
4555 		if (!skb_nr_frags)
4556 			num_segs++;
4557 		else
4558 			frags_per_tso++;
4559 	}
4560 
4561 	while (skb_nr_frags) {
4562 		if (j >= skb_shinfo(skb)->nr_frags) {
4563 			qdf_info("TSO: nr_frags %d j %d",
4564 				 skb_shinfo(skb)->nr_frags, j);
4565 			qdf_assert(0);
4566 			return 0;
4567 		}
4568 		/*
4569 		 * Calculate the number of tso seg for nr_frags data:
4570 		 * Get the length of each frag in skb_frag_len, add to
4571 		 * remainder.Get the number of segments by dividing it to
4572 		 * tso_seg_size and calculate the new remainder.
4573 		 * Decrement the nr_frags value and keep
4574 		 * looping all the skb_fragments.
4575 		 */
4576 		frag = &skb_shinfo(skb)->frags[j];
4577 		skb_frag_len = skb_frag_size(frag);
4578 		temp_num_seg = num_segs;
4579 		remainder += skb_frag_len;
4580 		num_segs += remainder / tso_seg_size;
4581 		remainder = remainder % tso_seg_size;
4582 		skb_nr_frags--;
4583 		if (remainder) {
4584 			if (num_segs > temp_num_seg)
4585 				frags_per_tso = 0;
4586 			/*
4587 			 * increment the tso per frags whenever remainder is
4588 			 * positive. If frags_per_tso reaches the (max-1),
4589 			 * [First frags always have EIT header, therefore max-1]
4590 			 * increment the num_segs as no more data can be
4591 			 * accommodated in the curr tso seg. Reset the remainder
4592 			 * and frags per tso and keep looping.
4593 			 */
4594 			frags_per_tso++;
4595 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4596 				num_segs++;
4597 				frags_per_tso = 0;
4598 				remainder = 0;
4599 			}
4600 			/*
4601 			 * If this is the last skb frag and still remainder is
4602 			 * non-zero(frags_per_tso is not reached to the max-1)
4603 			 * then increment the num_segs to take care of the
4604 			 * remaining length.
4605 			 */
4606 			if (!skb_nr_frags && remainder) {
4607 				num_segs++;
4608 				frags_per_tso = 0;
4609 			}
4610 		} else {
4611 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4612 			frags_per_tso = 0;
4613 		}
4614 		j++;
4615 	}
4616 
4617 	return num_segs;
4618 }
4619 #elif !defined(QCA_WIFI_QCN9000)
4620 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4621 {
4622 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4623 	skb_frag_t *frag = NULL;
4624 
4625 	/*
4626 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4627 	 * region which cannot be accessed by Target
4628 	 */
4629 	if (virt_to_phys(skb->data) < 0x50000040) {
4630 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4631 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4632 				virt_to_phys(skb->data));
4633 		goto fail;
4634 
4635 	}
4636 
4637 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4638 		frag = &skb_shinfo(skb)->frags[i];
4639 
4640 		if (!frag)
4641 			goto fail;
4642 
4643 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4644 			goto fail;
4645 	}
4646 
4647 
4648 	gso_size = skb_shinfo(skb)->gso_size;
4649 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4650 			+ tcp_hdrlen(skb));
4651 	while (tmp_len) {
4652 		num_segs++;
4653 		if (tmp_len > gso_size)
4654 			tmp_len -= gso_size;
4655 		else
4656 			break;
4657 	}
4658 
4659 	return num_segs;
4660 
4661 	/*
4662 	 * Do not free this frame, just do socket level accounting
4663 	 * so that this is not reused.
4664 	 */
4665 fail:
4666 	if (skb->sk)
4667 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4668 
4669 	return 0;
4670 }
4671 #else
4672 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4673 {
4674 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4675 	skb_frag_t *frag = NULL;
4676 
4677 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4678 		frag = &skb_shinfo(skb)->frags[i];
4679 
4680 		if (!frag)
4681 			goto fail;
4682 	}
4683 
4684 	gso_size = skb_shinfo(skb)->gso_size;
4685 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4686 			+ tcp_hdrlen(skb));
4687 	while (tmp_len) {
4688 		num_segs++;
4689 		if (tmp_len > gso_size)
4690 			tmp_len -= gso_size;
4691 		else
4692 			break;
4693 	}
4694 
4695 	return num_segs;
4696 
4697 	/*
4698 	 * Do not free this frame, just do socket level accounting
4699 	 * so that this is not reused.
4700 	 */
4701 fail:
4702 	if (skb->sk)
4703 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4704 
4705 	return 0;
4706 }
4707 #endif
4708 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4709 
4710 #endif /* FEATURE_TSO */
4711 
4712 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4713 			  uint32_t *lo, uint32_t *hi)
4714 {
4715 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4716 		*lo = lower_32_bits(dmaaddr);
4717 		*hi = upper_32_bits(dmaaddr);
4718 	} else {
4719 		*lo = dmaaddr;
4720 		*hi = 0;
4721 	}
4722 }
4723 
4724 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4725 
4726 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4727 {
4728 	qdf_nbuf_users_inc(&skb->users);
4729 	return skb;
4730 }
4731 qdf_export_symbol(__qdf_nbuf_inc_users);
4732 
4733 int __qdf_nbuf_get_users(struct sk_buff *skb)
4734 {
4735 	return qdf_nbuf_users_read(&skb->users);
4736 }
4737 qdf_export_symbol(__qdf_nbuf_get_users);
4738 
4739 void __qdf_nbuf_ref(struct sk_buff *skb)
4740 {
4741 	skb_get(skb);
4742 }
4743 qdf_export_symbol(__qdf_nbuf_ref);
4744 
4745 int __qdf_nbuf_shared(struct sk_buff *skb)
4746 {
4747 	return skb_shared(skb);
4748 }
4749 qdf_export_symbol(__qdf_nbuf_shared);
4750 
4751 QDF_STATUS
4752 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4753 {
4754 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4755 	/*
4756 	 * driver can tell its SG capability, it must be handled.
4757 	 * Bounce buffers if they are there
4758 	 */
4759 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4760 	if (!(*dmap))
4761 		error = QDF_STATUS_E_NOMEM;
4762 
4763 	return error;
4764 }
4765 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4766 
4767 void
4768 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4769 {
4770 	kfree(dmap);
4771 }
4772 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4773 
4774 #ifdef QDF_OS_DEBUG
4775 QDF_STATUS
4776 __qdf_nbuf_map_nbytes(
4777 	qdf_device_t osdev,
4778 	struct sk_buff *skb,
4779 	qdf_dma_dir_t dir,
4780 	int nbytes)
4781 {
4782 	struct skb_shared_info  *sh = skb_shinfo(skb);
4783 
4784 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4785 
4786 	/*
4787 	 * Assume there's only a single fragment.
4788 	 * To support multiple fragments, it would be necessary to change
4789 	 * adf_nbuf_t to be a separate object that stores meta-info
4790 	 * (including the bus address for each fragment) and a pointer
4791 	 * to the underlying sk_buff.
4792 	 */
4793 	qdf_assert(sh->nr_frags == 0);
4794 
4795 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4796 }
4797 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4798 #else
4799 QDF_STATUS
4800 __qdf_nbuf_map_nbytes(
4801 	qdf_device_t osdev,
4802 	struct sk_buff *skb,
4803 	qdf_dma_dir_t dir,
4804 	int nbytes)
4805 {
4806 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4807 }
4808 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4809 #endif
4810 void
4811 __qdf_nbuf_unmap_nbytes(
4812 	qdf_device_t osdev,
4813 	struct sk_buff *skb,
4814 	qdf_dma_dir_t dir,
4815 	int nbytes)
4816 {
4817 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4818 
4819 	/*
4820 	 * Assume there's a single fragment.
4821 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4822 	 */
4823 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4824 }
4825 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4826 
4827 void
4828 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4829 {
4830 	qdf_assert(bmap->mapped);
4831 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4832 
4833 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4834 			sizeof(struct __qdf_segment));
4835 	sg->nsegs = bmap->nsegs;
4836 }
4837 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4838 
4839 #if defined(__QDF_SUPPORT_FRAG_MEM)
4840 void
4841 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4842 {
4843 	qdf_assert(skb);
4844 	sg->sg_segs[0].vaddr = skb->data;
4845 	sg->sg_segs[0].len   = skb->len;
4846 	sg->nsegs            = 1;
4847 
4848 	for (int i = 1; i <= sh->nr_frags; i++) {
4849 		skb_frag_t    *f        = &sh->frags[i - 1];
4850 
4851 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4852 			f->page_offset);
4853 		sg->sg_segs[i].len      = f->size;
4854 
4855 		qdf_assert(i < QDF_MAX_SGLIST);
4856 	}
4857 	sg->nsegs += i;
4858 
4859 }
4860 qdf_export_symbol(__qdf_nbuf_frag_info);
4861 #else
4862 #ifdef QDF_OS_DEBUG
4863 void
4864 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4865 {
4866 
4867 	struct skb_shared_info  *sh = skb_shinfo(skb);
4868 
4869 	qdf_assert(skb);
4870 	sg->sg_segs[0].vaddr = skb->data;
4871 	sg->sg_segs[0].len   = skb->len;
4872 	sg->nsegs            = 1;
4873 
4874 	qdf_assert(sh->nr_frags == 0);
4875 }
4876 qdf_export_symbol(__qdf_nbuf_frag_info);
4877 #else
4878 void
4879 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4880 {
4881 	sg->sg_segs[0].vaddr = skb->data;
4882 	sg->sg_segs[0].len   = skb->len;
4883 	sg->nsegs            = 1;
4884 }
4885 qdf_export_symbol(__qdf_nbuf_frag_info);
4886 #endif
4887 #endif
4888 uint32_t
4889 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4890 {
4891 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4892 	const skb_frag_t *frag = sh->frags + cur_frag;
4893 
4894 	return skb_frag_size(frag);
4895 }
4896 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4897 
4898 #ifdef A_SIMOS_DEVHOST
4899 QDF_STATUS __qdf_nbuf_frag_map(
4900 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4901 	int offset, qdf_dma_dir_t dir, int cur_frag)
4902 {
4903 	int32_t paddr, frag_len;
4904 
4905 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4906 	return QDF_STATUS_SUCCESS;
4907 }
4908 qdf_export_symbol(__qdf_nbuf_frag_map);
4909 #else
4910 QDF_STATUS __qdf_nbuf_frag_map(
4911 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4912 	int offset, qdf_dma_dir_t dir, int cur_frag)
4913 {
4914 	dma_addr_t paddr, frag_len;
4915 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4916 	const skb_frag_t *frag = sh->frags + cur_frag;
4917 
4918 	frag_len = skb_frag_size(frag);
4919 
4920 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4921 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4922 					__qdf_dma_dir_to_os(dir));
4923 	return dma_mapping_error(osdev->dev, paddr) ?
4924 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4925 }
4926 qdf_export_symbol(__qdf_nbuf_frag_map);
4927 #endif
4928 void
4929 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4930 {
4931 	return;
4932 }
4933 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4934 
4935 /**
4936  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4937  * @osdev: os device
4938  * @buf: sk buff
4939  * @dir: direction
4940  *
4941  * Return: none
4942  */
4943 #if defined(A_SIMOS_DEVHOST)
4944 static void __qdf_nbuf_sync_single_for_cpu(
4945 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4946 {
4947 	return;
4948 }
4949 #else
4950 static void __qdf_nbuf_sync_single_for_cpu(
4951 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4952 {
4953 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4954 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4955 		return;
4956 	}
4957 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4958 		skb_end_offset(buf) - skb_headroom(buf),
4959 		__qdf_dma_dir_to_os(dir));
4960 }
4961 #endif
4962 
4963 void
4964 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4965 	struct sk_buff *skb, qdf_dma_dir_t dir)
4966 {
4967 	qdf_assert(
4968 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4969 
4970 	/*
4971 	 * Assume there's a single fragment.
4972 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4973 	 */
4974 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4975 }
4976 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4977 
4978 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4979 /**
4980  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4981  * @rx_status: Pointer to rx_status.
4982  * @rtap_buf: Buf to which VHT info has to be updated.
4983  * @rtap_len: Current length of radiotap buffer
4984  *
4985  * Return: Length of radiotap after VHT flags updated.
4986  */
4987 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4988 					struct mon_rx_status *rx_status,
4989 					int8_t *rtap_buf,
4990 					uint32_t rtap_len)
4991 {
4992 	uint16_t vht_flags = 0;
4993 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4994 
4995 	rtap_len = qdf_align(rtap_len, 2);
4996 
4997 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4998 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4999 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
5000 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
5001 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
5002 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
5003 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
5004 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
5005 	rtap_len += 2;
5006 
5007 	rtap_buf[rtap_len] |=
5008 		(rx_status->is_stbc ?
5009 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
5010 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
5011 		(rx_status->ldpc ?
5012 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
5013 		(rx_status->beamformed ?
5014 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
5015 	rtap_len += 1;
5016 
5017 	if (!rx_user_status) {
5018 		switch (rx_status->vht_flag_values2) {
5019 		case IEEE80211_RADIOTAP_VHT_BW_20:
5020 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5021 			break;
5022 		case IEEE80211_RADIOTAP_VHT_BW_40:
5023 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5024 			break;
5025 		case IEEE80211_RADIOTAP_VHT_BW_80:
5026 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5027 			break;
5028 		case IEEE80211_RADIOTAP_VHT_BW_160:
5029 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5030 			break;
5031 		}
5032 		rtap_len += 1;
5033 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
5034 		rtap_len += 1;
5035 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
5036 		rtap_len += 1;
5037 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
5038 		rtap_len += 1;
5039 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
5040 		rtap_len += 1;
5041 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
5042 		rtap_len += 1;
5043 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
5044 		rtap_len += 1;
5045 		put_unaligned_le16(rx_status->vht_flag_values6,
5046 				   &rtap_buf[rtap_len]);
5047 		rtap_len += 2;
5048 	} else {
5049 		switch (rx_user_status->vht_flag_values2) {
5050 		case IEEE80211_RADIOTAP_VHT_BW_20:
5051 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5052 			break;
5053 		case IEEE80211_RADIOTAP_VHT_BW_40:
5054 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5055 			break;
5056 		case IEEE80211_RADIOTAP_VHT_BW_80:
5057 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5058 			break;
5059 		case IEEE80211_RADIOTAP_VHT_BW_160:
5060 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5061 			break;
5062 		}
5063 		rtap_len += 1;
5064 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
5065 		rtap_len += 1;
5066 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
5067 		rtap_len += 1;
5068 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
5069 		rtap_len += 1;
5070 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
5071 		rtap_len += 1;
5072 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
5073 		rtap_len += 1;
5074 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
5075 		rtap_len += 1;
5076 		put_unaligned_le16(rx_user_status->vht_flag_values6,
5077 				   &rtap_buf[rtap_len]);
5078 		rtap_len += 2;
5079 	}
5080 
5081 	return rtap_len;
5082 }
5083 
5084 /**
5085  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
5086  * @rx_status: Pointer to rx_status.
5087  * @rtap_buf: buffer to which radiotap has to be updated
5088  * @rtap_len: radiotap length
5089  *
5090  * API update high-efficiency (11ax) fields in the radiotap header
5091  *
5092  * Return: length of rtap_len updated.
5093  */
5094 static unsigned int
5095 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5096 				     int8_t *rtap_buf, uint32_t rtap_len)
5097 {
5098 	/*
5099 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
5100 	 * Enable all "known" HE radiotap flags for now
5101 	 */
5102 
5103 	rtap_len = qdf_align(rtap_len, 2);
5104 
5105 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5106 	rtap_len += 2;
5107 
5108 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5109 	rtap_len += 2;
5110 
5111 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5112 	rtap_len += 2;
5113 
5114 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5115 	rtap_len += 2;
5116 
5117 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5118 	rtap_len += 2;
5119 
5120 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5121 	rtap_len += 2;
5122 
5123 	return rtap_len;
5124 }
5125 
5126 
5127 /**
5128  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
5129  * @rx_status: Pointer to rx_status.
5130  * @rtap_buf: buffer to which radiotap has to be updated
5131  * @rtap_len: radiotap length
5132  *
5133  * API update HE-MU fields in the radiotap header
5134  *
5135  * Return: length of rtap_len updated.
5136  */
5137 static unsigned int
5138 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
5139 				     int8_t *rtap_buf, uint32_t rtap_len)
5140 {
5141 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5142 
5143 	rtap_len = qdf_align(rtap_len, 2);
5144 
5145 	/*
5146 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
5147 	 * Enable all "known" he-mu radiotap flags for now
5148 	 */
5149 
5150 	if (!rx_user_status) {
5151 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5152 		rtap_len += 2;
5153 
5154 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5155 		rtap_len += 2;
5156 
5157 		rtap_buf[rtap_len] = rx_status->he_RU[0];
5158 		rtap_len += 1;
5159 
5160 		rtap_buf[rtap_len] = rx_status->he_RU[1];
5161 		rtap_len += 1;
5162 
5163 		rtap_buf[rtap_len] = rx_status->he_RU[2];
5164 		rtap_len += 1;
5165 
5166 		rtap_buf[rtap_len] = rx_status->he_RU[3];
5167 		rtap_len += 1;
5168 	} else {
5169 		put_unaligned_le16(rx_user_status->he_flags1,
5170 				   &rtap_buf[rtap_len]);
5171 		rtap_len += 2;
5172 
5173 		put_unaligned_le16(rx_user_status->he_flags2,
5174 				   &rtap_buf[rtap_len]);
5175 		rtap_len += 2;
5176 
5177 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
5178 		rtap_len += 1;
5179 
5180 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
5181 		rtap_len += 1;
5182 
5183 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
5184 		rtap_len += 1;
5185 
5186 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
5187 		rtap_len += 1;
5188 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5189 			  rx_user_status->he_flags1,
5190 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
5191 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
5192 			  rx_user_status->he_RU[3]);
5193 	}
5194 
5195 	return rtap_len;
5196 }
5197 
5198 /**
5199  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
5200  * @rx_status: Pointer to rx_status.
5201  * @rtap_buf: buffer to which radiotap has to be updated
5202  * @rtap_len: radiotap length
5203  *
5204  * API update he-mu-other fields in the radiotap header
5205  *
5206  * Return: length of rtap_len updated.
5207  */
5208 static unsigned int
5209 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
5210 				     int8_t *rtap_buf, uint32_t rtap_len)
5211 {
5212 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5213 
5214 	rtap_len = qdf_align(rtap_len, 2);
5215 
5216 	/*
5217 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
5218 	 * Enable all "known" he-mu-other radiotap flags for now
5219 	 */
5220 	if (!rx_user_status) {
5221 		put_unaligned_le16(rx_status->he_per_user_1,
5222 				   &rtap_buf[rtap_len]);
5223 		rtap_len += 2;
5224 
5225 		put_unaligned_le16(rx_status->he_per_user_2,
5226 				   &rtap_buf[rtap_len]);
5227 		rtap_len += 2;
5228 
5229 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
5230 		rtap_len += 1;
5231 
5232 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
5233 		rtap_len += 1;
5234 	} else {
5235 		put_unaligned_le16(rx_user_status->he_per_user_1,
5236 				   &rtap_buf[rtap_len]);
5237 		rtap_len += 2;
5238 
5239 		put_unaligned_le16(rx_user_status->he_per_user_2,
5240 				   &rtap_buf[rtap_len]);
5241 		rtap_len += 2;
5242 
5243 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5244 		rtap_len += 1;
5245 
5246 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5247 		rtap_len += 1;
5248 	}
5249 
5250 	return rtap_len;
5251 }
5252 
5253 /**
5254  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5255  *						from rx_status
5256  * @rx_status: Pointer to rx_status.
5257  * @rtap_buf: buffer to which radiotap has to be updated
5258  * @rtap_len: radiotap length
5259  *
5260  * API update Extra High Throughput (11be) fields in the radiotap header
5261  *
5262  * Return: length of rtap_len updated.
5263  */
5264 static unsigned int
5265 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5266 				    int8_t *rtap_buf, uint32_t rtap_len)
5267 {
5268 	/*
5269 	 * IEEE80211_RADIOTAP_USIG:
5270 	 *		u32, u32, u32
5271 	 */
5272 	rtap_len = qdf_align(rtap_len, 4);
5273 
5274 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5275 	rtap_len += 4;
5276 
5277 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5278 	rtap_len += 4;
5279 
5280 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5281 	rtap_len += 4;
5282 
5283 	qdf_rl_debug("U-SIG data %x %x %x",
5284 		     rx_status->usig_common, rx_status->usig_value,
5285 		     rx_status->usig_mask);
5286 
5287 	return rtap_len;
5288 }
5289 
5290 /**
5291  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5292  *					from rx_status
5293  * @rx_status: Pointer to rx_status.
5294  * @rtap_buf: buffer to which radiotap has to be updated
5295  * @rtap_len: radiotap length
5296  *
5297  * API update Extra High Throughput (11be) fields in the radiotap header
5298  *
5299  * Return: length of rtap_len updated.
5300  */
5301 static unsigned int
5302 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5303 				   int8_t *rtap_buf, uint32_t rtap_len)
5304 {
5305 	uint32_t user;
5306 
5307 	/*
5308 	 * IEEE80211_RADIOTAP_EHT:
5309 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5310 	 */
5311 	rtap_len = qdf_align(rtap_len, 4);
5312 
5313 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5314 	rtap_len += 4;
5315 
5316 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5317 	rtap_len += 4;
5318 
5319 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5320 	rtap_len += 4;
5321 
5322 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5323 	rtap_len += 4;
5324 
5325 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5326 	rtap_len += 4;
5327 
5328 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5329 	rtap_len += 4;
5330 
5331 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5332 	rtap_len += 4;
5333 
5334 	for (user = 0; user < EHT_USER_INFO_LEN &&
5335 	     rx_status->num_eht_user_info_valid &&
5336 	     user < rx_status->num_eht_user_info_valid; user++) {
5337 		put_unaligned_le32(rx_status->eht_user_info[user],
5338 				   &rtap_buf[rtap_len]);
5339 		rtap_len += 4;
5340 	}
5341 
5342 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5343 		     rx_status->eht_known, rx_status->eht_data[0],
5344 		     rx_status->eht_data[1], rx_status->eht_data[2],
5345 		     rx_status->eht_data[3], rx_status->eht_data[4],
5346 		     rx_status->eht_data[5]);
5347 
5348 	return rtap_len;
5349 }
5350 
5351 #define IEEE80211_RADIOTAP_TX_STATUS 0
5352 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5353 #define IEEE80211_RADIOTAP_EXTENSION2 2
5354 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5355 
5356 /**
5357  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5358  * @rx_status: Pointer to rx_status.
5359  * @rtap_buf: Buf to which AMPDU info has to be updated.
5360  * @rtap_len: Current length of radiotap buffer
5361  *
5362  * Return: Length of radiotap after AMPDU flags updated.
5363  */
5364 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5365 					struct mon_rx_status *rx_status,
5366 					uint8_t *rtap_buf,
5367 					uint32_t rtap_len)
5368 {
5369 	/*
5370 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5371 	 * First 32 bits of AMPDU represents the reference number
5372 	 */
5373 
5374 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5375 	uint16_t ampdu_flags = 0;
5376 	uint16_t ampdu_reserved_flags = 0;
5377 
5378 	rtap_len = qdf_align(rtap_len, 4);
5379 
5380 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5381 	rtap_len += 4;
5382 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5383 	rtap_len += 2;
5384 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5385 	rtap_len += 2;
5386 
5387 	return rtap_len;
5388 }
5389 
5390 #ifdef DP_MON_RSSI_IN_DBM
5391 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5392 (rx_status->rssi_comb)
5393 #else
5394 #ifdef QCA_RSSI_DB2DBM
5395 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5396 (((rx_status)->rssi_dbm_conv_support) ? \
5397 ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
5398 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5399 #else
5400 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5401 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5402 #endif
5403 #endif
5404 
5405 /**
5406  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5407  * @rx_status: Pointer to rx_status.
5408  * @rtap_buf: Buf to which tx info has to be updated.
5409  * @rtap_len: Current length of radiotap buffer
5410  *
5411  * Return: Length of radiotap after tx flags updated.
5412  */
5413 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5414 						struct mon_rx_status *rx_status,
5415 						uint8_t *rtap_buf,
5416 						uint32_t rtap_len)
5417 {
5418 	/*
5419 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5420 	 */
5421 
5422 	uint16_t tx_flags = 0;
5423 
5424 	rtap_len = qdf_align(rtap_len, 2);
5425 
5426 	switch (rx_status->tx_status) {
5427 	case RADIOTAP_TX_STATUS_FAIL:
5428 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5429 		break;
5430 	case RADIOTAP_TX_STATUS_NOACK:
5431 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5432 		break;
5433 	}
5434 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5435 	rtap_len += 2;
5436 
5437 	return rtap_len;
5438 }
5439 
5440 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5441 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5442 {
5443 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5444 	struct ieee80211_radiotap_header *rthdr =
5445 		(struct ieee80211_radiotap_header *)rtap_buf;
5446 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5447 	uint32_t rtap_len = rtap_hdr_len;
5448 	uint8_t length = rtap_len;
5449 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5450 	struct qdf_radiotap_ext2 *rtap_ext2;
5451 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5452 
5453 	/* per user info */
5454 	qdf_le32_t *it_present;
5455 	uint32_t it_present_val;
5456 	bool radiotap_ext1_hdr_present = false;
5457 
5458 	it_present = &rthdr->it_present;
5459 
5460 	/* Adding Extended Header space */
5461 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5462 	    rx_status->usig_flags || rx_status->eht_flags) {
5463 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5464 		rtap_len = rtap_hdr_len;
5465 		radiotap_ext1_hdr_present = true;
5466 	}
5467 
5468 	length = rtap_len;
5469 
5470 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5471 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5472 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5473 	rtap_len += 8;
5474 
5475 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5476 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5477 
5478 	if (rx_status->rs_fcs_err)
5479 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5480 
5481 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5482 	rtap_len += 1;
5483 
5484 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5485 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5486 	    !rx_status->he_flags && !rx_status->eht_flags) {
5487 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5488 		rtap_buf[rtap_len] = rx_status->rate;
5489 	} else
5490 		rtap_buf[rtap_len] = 0;
5491 	rtap_len += 1;
5492 
5493 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5494 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5495 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5496 	rtap_len += 2;
5497 	/* Channel flags. */
5498 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5499 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5500 	else
5501 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5502 	if (rx_status->cck_flag)
5503 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5504 	if (rx_status->ofdm_flag)
5505 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5506 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5507 	rtap_len += 2;
5508 
5509 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5510 	 *					(dBm)
5511 	 */
5512 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5513 	/*
5514 	 * rssi_comb is int dB, need to convert it to dBm.
5515 	 * normalize value to noise floor of -96 dBm
5516 	 */
5517 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5518 	rtap_len += 1;
5519 
5520 	/* RX signal noise floor */
5521 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5522 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5523 	rtap_len += 1;
5524 
5525 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5526 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5527 	rtap_buf[rtap_len] = rx_status->nr_ant;
5528 	rtap_len += 1;
5529 
5530 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5531 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5532 		return 0;
5533 	}
5534 
5535 	/* update tx flags for pkt capture*/
5536 	if (rx_status->add_rtap_ext) {
5537 		it_present_val |=
5538 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5539 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5540 							     rtap_buf,
5541 							     rtap_len);
5542 
5543 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5544 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5545 			return 0;
5546 		}
5547 	}
5548 
5549 	if (rx_status->ht_flags) {
5550 		length = rtap_len;
5551 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5552 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5553 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5554 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5555 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5556 		rtap_len += 1;
5557 
5558 		if (rx_status->sgi)
5559 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5560 		if (rx_status->bw)
5561 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5562 		else
5563 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5564 		rtap_len += 1;
5565 
5566 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5567 		rtap_len += 1;
5568 
5569 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5570 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5571 			return 0;
5572 		}
5573 	}
5574 
5575 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5576 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5577 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5578 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5579 								rtap_buf,
5580 								rtap_len);
5581 	}
5582 
5583 	if (rx_status->vht_flags) {
5584 		length = rtap_len;
5585 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5586 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5587 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5588 								rtap_buf,
5589 								rtap_len);
5590 
5591 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5592 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5593 			return 0;
5594 		}
5595 	}
5596 
5597 	if (rx_status->he_flags) {
5598 		length = rtap_len;
5599 		/* IEEE80211_RADIOTAP_HE */
5600 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5601 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5602 								rtap_buf,
5603 								rtap_len);
5604 
5605 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5606 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5607 			return 0;
5608 		}
5609 	}
5610 
5611 	if (rx_status->he_mu_flags) {
5612 		length = rtap_len;
5613 		/* IEEE80211_RADIOTAP_HE-MU */
5614 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5615 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5616 								rtap_buf,
5617 								rtap_len);
5618 
5619 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5620 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5621 			return 0;
5622 		}
5623 	}
5624 
5625 	if (rx_status->he_mu_other_flags) {
5626 		length = rtap_len;
5627 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5628 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5629 		rtap_len =
5630 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5631 								rtap_buf,
5632 								rtap_len);
5633 
5634 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5635 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5636 			return 0;
5637 		}
5638 	}
5639 
5640 	rtap_len = qdf_align(rtap_len, 2);
5641 	/*
5642 	 * Radiotap Vendor Namespace
5643 	 */
5644 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5645 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5646 					(rtap_buf + rtap_len);
5647 	/*
5648 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5649 	 */
5650 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5651 	/*
5652 	 * Name space selector = 0
5653 	 * We only will have one namespace for now
5654 	 */
5655 	radiotap_vendor_ns_ath->hdr.selector = 0;
5656 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5657 					sizeof(*radiotap_vendor_ns_ath) -
5658 					sizeof(radiotap_vendor_ns_ath->hdr));
5659 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5660 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5661 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5662 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5663 				cpu_to_le32(rx_status->ppdu_timestamp);
5664 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5665 
5666 	/* Move to next it_present */
5667 	if (radiotap_ext1_hdr_present) {
5668 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5669 		put_unaligned_le32(it_present_val, it_present);
5670 		it_present_val = 0;
5671 		it_present++;
5672 	}
5673 
5674 	/* Add Extension to Radiotap Header & corresponding data */
5675 	if (rx_status->add_rtap_ext) {
5676 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5677 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5678 
5679 		rtap_buf[rtap_len] = rx_status->tx_status;
5680 		rtap_len += 1;
5681 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5682 		rtap_len += 1;
5683 	}
5684 
5685 	/* Add Extension2 to Radiotap Header */
5686 	if (rx_status->add_rtap_ext2) {
5687 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5688 
5689 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5690 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5691 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5692 		if (!rx_user_status) {
5693 			rtap_ext2->tid = rx_status->tid;
5694 			rtap_ext2->start_seq = rx_status->start_seq;
5695 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5696 				     rx_status->ba_bitmap,
5697 				     8 * (sizeof(uint32_t)));
5698 		} else {
5699 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5700 
5701 			/* set default bitmap sz if not set */
5702 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5703 			rtap_ext2->tid = rx_user_status->tid;
5704 			rtap_ext2->start_seq = rx_user_status->start_seq;
5705 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5706 				     rx_user_status->ba_bitmap,
5707 				     ba_bitmap_sz * (sizeof(uint32_t)));
5708 		}
5709 
5710 		rtap_len += sizeof(*rtap_ext2);
5711 	}
5712 
5713 	if (rx_status->usig_flags) {
5714 		length = rtap_len;
5715 		/* IEEE80211_RADIOTAP_USIG */
5716 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5717 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5718 							       rtap_buf,
5719 							       rtap_len);
5720 
5721 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5722 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5723 			return 0;
5724 		}
5725 	}
5726 
5727 	if (rx_status->eht_flags) {
5728 		length = rtap_len;
5729 		/* IEEE80211_RADIOTAP_EHT */
5730 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5731 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5732 							      rtap_buf,
5733 							      rtap_len);
5734 
5735 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5736 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5737 			return 0;
5738 		}
5739 	}
5740 
5741 	put_unaligned_le32(it_present_val, it_present);
5742 	rthdr->it_len = cpu_to_le16(rtap_len);
5743 
5744 	if (headroom_sz < rtap_len) {
5745 		qdf_debug("DEBUG: Not enough space to update radiotap");
5746 		return 0;
5747 	}
5748 
5749 	qdf_nbuf_push_head(nbuf, rtap_len);
5750 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5751 	return rtap_len;
5752 }
5753 #else
5754 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5755 					struct mon_rx_status *rx_status,
5756 					int8_t *rtap_buf,
5757 					uint32_t rtap_len)
5758 {
5759 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5760 	return 0;
5761 }
5762 
5763 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5764 				      int8_t *rtap_buf, uint32_t rtap_len)
5765 {
5766 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5767 	return 0;
5768 }
5769 
5770 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5771 					struct mon_rx_status *rx_status,
5772 					uint8_t *rtap_buf,
5773 					uint32_t rtap_len)
5774 {
5775 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5776 	return 0;
5777 }
5778 
5779 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5780 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5781 {
5782 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5783 	return 0;
5784 }
5785 #endif
5786 qdf_export_symbol(qdf_nbuf_update_radiotap);
5787 
5788 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5789 {
5790 	nbuf_free_cb = cb_func_ptr;
5791 }
5792 
5793 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5794 
5795 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5796 {
5797 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5798 
5799 	/* check destination mac address is broadcast/multicast */
5800 	if (is_broadcast_ether_addr((uint8_t *)eh))
5801 		QDF_NBUF_CB_SET_BCAST(skb);
5802 	else if (is_multicast_ether_addr((uint8_t *)eh))
5803 		QDF_NBUF_CB_SET_MCAST(skb);
5804 
5805 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5806 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5807 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5808 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5809 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5810 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5811 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5812 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5813 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5814 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5815 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5816 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5817 }
5818 qdf_export_symbol(qdf_nbuf_classify_pkt);
5819 
5820 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5821 {
5822 	qdf_nbuf_users_set(&nbuf->users, 1);
5823 	nbuf->data = nbuf->head + NET_SKB_PAD;
5824 	skb_reset_tail_pointer(nbuf);
5825 }
5826 qdf_export_symbol(__qdf_nbuf_init);
5827 
5828 #ifdef WLAN_FEATURE_FASTPATH
5829 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5830 {
5831 	qdf_nbuf_users_set(&nbuf->users, 1);
5832 	skb_reset_tail_pointer(nbuf);
5833 }
5834 qdf_export_symbol(qdf_nbuf_init_fast);
5835 #endif /* WLAN_FEATURE_FASTPATH */
5836 
5837 
5838 #ifdef QDF_NBUF_GLOBAL_COUNT
5839 void __qdf_nbuf_mod_init(void)
5840 {
5841 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5842 	qdf_atomic_init(&nbuf_count);
5843 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5844 }
5845 
5846 void __qdf_nbuf_mod_exit(void)
5847 {
5848 }
5849 #endif
5850 
5851 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
5852 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5853 					    int offset)
5854 {
5855 	unsigned int frag_offset;
5856 	skb_frag_t *frag;
5857 
5858 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5859 		return QDF_STATUS_E_FAILURE;
5860 
5861 	frag = &skb_shinfo(nbuf)->frags[idx];
5862 	frag_offset = skb_frag_off(frag);
5863 
5864 	frag_offset += offset;
5865 	skb_frag_off_set(frag, frag_offset);
5866 
5867 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5868 
5869 	return QDF_STATUS_SUCCESS;
5870 }
5871 
5872 #else
5873 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5874 					    int offset)
5875 {
5876 	uint16_t frag_offset;
5877 	skb_frag_t *frag;
5878 
5879 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5880 		return QDF_STATUS_E_FAILURE;
5881 
5882 	frag = &skb_shinfo(nbuf)->frags[idx];
5883 	frag_offset = frag->page_offset;
5884 
5885 	frag_offset += offset;
5886 	frag->page_offset = frag_offset;
5887 
5888 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5889 
5890 	return QDF_STATUS_SUCCESS;
5891 }
5892 #endif
5893 
5894 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5895 
5896 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5897 			    uint16_t idx,
5898 			    uint16_t truesize)
5899 {
5900 	struct page *page;
5901 	uint16_t frag_len;
5902 
5903 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5904 
5905 	if (qdf_unlikely(!page))
5906 		return;
5907 
5908 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5909 	put_page(page);
5910 	nbuf->len -= frag_len;
5911 	nbuf->data_len -= frag_len;
5912 	nbuf->truesize -= truesize;
5913 	skb_shinfo(nbuf)->nr_frags--;
5914 }
5915 
5916 qdf_export_symbol(__qdf_nbuf_remove_frag);
5917 
5918 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5919 			    int offset, int frag_len,
5920 			    unsigned int truesize, bool take_frag_ref)
5921 {
5922 	struct page *page;
5923 	int frag_offset;
5924 	uint8_t nr_frag;
5925 
5926 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5927 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5928 
5929 	page = virt_to_head_page(buf);
5930 	frag_offset = buf - page_address(page);
5931 
5932 	skb_add_rx_frag(nbuf, nr_frag, page,
5933 			(frag_offset + offset),
5934 			frag_len, truesize);
5935 
5936 	if (unlikely(take_frag_ref)) {
5937 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5938 		skb_frag_ref(nbuf, nr_frag);
5939 	}
5940 }
5941 
5942 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5943 
5944 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
5945 {
5946 	struct page *page;
5947 	skb_frag_t frag = {0};
5948 
5949 	page = virt_to_head_page(buf);
5950 	__skb_frag_set_page(&frag, page);
5951 
5952 	/*
5953 	 * since __skb_frag_ref() just use page to increase ref
5954 	 * we just decode page alone
5955 	 */
5956 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5957 	__skb_frag_ref(&frag);
5958 }
5959 
5960 qdf_export_symbol(__qdf_nbuf_ref_frag);
5961 
5962 #ifdef NBUF_FRAG_MEMORY_DEBUG
5963 
5964 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5965 						int offset, const char *func,
5966 						uint32_t line)
5967 {
5968 	QDF_STATUS result;
5969 	qdf_frag_t p_fragp, n_fragp;
5970 
5971 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5972 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5973 
5974 	if (qdf_likely(is_initial_mem_debug_disabled))
5975 		return result;
5976 
5977 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5978 
5979 	/*
5980 	 * Update frag address in frag debug tracker
5981 	 * when frag offset is successfully changed in skb
5982 	 */
5983 	if (result == QDF_STATUS_SUCCESS)
5984 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
5985 
5986 	return result;
5987 }
5988 
5989 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
5990 
5991 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
5992 				int offset, int frag_len,
5993 				unsigned int truesize, bool take_frag_ref,
5994 				const char *func, uint32_t line)
5995 {
5996 	qdf_frag_t fragp;
5997 	uint32_t num_nr_frags;
5998 
5999 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
6000 			       frag_len, truesize, take_frag_ref);
6001 
6002 	if (qdf_likely(is_initial_mem_debug_disabled))
6003 		return;
6004 
6005 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
6006 
6007 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6008 
6009 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
6010 
6011 	/* Update frag address in frag debug tracking table */
6012 	if (fragp != buf && !take_frag_ref)
6013 		qdf_frag_debug_update_addr(buf, fragp, func, line);
6014 
6015 	/* Update frag refcount in frag debug tracking table */
6016 	qdf_frag_debug_refcount_inc(fragp, func, line);
6017 }
6018 
6019 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
6020 
6021 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
6022 {
6023 	__qdf_nbuf_ref_frag(buf);
6024 
6025 	if (qdf_likely(is_initial_mem_debug_disabled))
6026 		return;
6027 
6028 	/* Update frag refcount in frag debug tracking table */
6029 	qdf_frag_debug_refcount_inc(buf, func, line);
6030 }
6031 
6032 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
6033 
6034 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
6035 				    uint32_t line)
6036 {
6037 	uint32_t num_nr_frags;
6038 	uint32_t idx = 0;
6039 	qdf_nbuf_t ext_list;
6040 	qdf_frag_t p_frag;
6041 
6042 	if (qdf_likely(is_initial_mem_debug_disabled))
6043 		return;
6044 
6045 	if (qdf_unlikely(!buf))
6046 		return;
6047 
6048 	/* Take care to update the refcount in the debug entries for frags */
6049 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6050 
6051 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6052 
6053 	while (idx < num_nr_frags) {
6054 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6055 		if (qdf_likely(p_frag))
6056 			qdf_frag_debug_refcount_inc(p_frag, func, line);
6057 		idx++;
6058 	}
6059 
6060 	/*
6061 	 * Take care to update the refcount in the debug entries for the
6062 	 * frags attached to frag_list
6063 	 */
6064 	ext_list = qdf_nbuf_get_ext_list(buf);
6065 	while (ext_list) {
6066 		idx = 0;
6067 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6068 
6069 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6070 
6071 		while (idx < num_nr_frags) {
6072 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6073 			if (qdf_likely(p_frag))
6074 				qdf_frag_debug_refcount_inc(p_frag, func, line);
6075 			idx++;
6076 		}
6077 		ext_list = qdf_nbuf_queue_next(ext_list);
6078 	}
6079 }
6080 
6081 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
6082 
6083 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
6084 				    uint32_t line)
6085 {
6086 	uint32_t num_nr_frags;
6087 	qdf_nbuf_t ext_list;
6088 	uint32_t idx = 0;
6089 	qdf_frag_t p_frag;
6090 
6091 	if (qdf_likely(is_initial_mem_debug_disabled))
6092 		return;
6093 
6094 	if (qdf_unlikely(!buf))
6095 		return;
6096 
6097 	/*
6098 	 * Decrement refcount for frag debug nodes only when last user
6099 	 * of nbuf calls this API so as to avoid decrementing refcount
6100 	 * on every call expect the last one in case where nbuf has multiple
6101 	 * users
6102 	 */
6103 	if (qdf_nbuf_get_users(buf) > 1)
6104 		return;
6105 
6106 	/* Take care to update the refcount in the debug entries for frags */
6107 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6108 
6109 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6110 
6111 	while (idx < num_nr_frags) {
6112 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6113 		if (qdf_likely(p_frag))
6114 			qdf_frag_debug_refcount_dec(p_frag, func, line);
6115 		idx++;
6116 	}
6117 
6118 	/* Take care to update debug entries for frags attached to frag_list */
6119 	ext_list = qdf_nbuf_get_ext_list(buf);
6120 	while (ext_list) {
6121 		if (qdf_nbuf_get_users(ext_list) == 1) {
6122 			idx = 0;
6123 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6124 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6125 			while (idx < num_nr_frags) {
6126 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6127 				if (qdf_likely(p_frag))
6128 					qdf_frag_debug_refcount_dec(p_frag,
6129 								    func, line);
6130 				idx++;
6131 			}
6132 		}
6133 		ext_list = qdf_nbuf_queue_next(ext_list);
6134 	}
6135 }
6136 
6137 qdf_export_symbol(qdf_net_buf_debug_release_frag);
6138 
6139 QDF_STATUS
6140 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
6141 			   uint16_t idx,
6142 			   uint16_t truesize,
6143 			   const char *func,
6144 			   uint32_t line)
6145 {
6146 	uint16_t num_frags;
6147 	qdf_frag_t frag;
6148 
6149 	if (qdf_unlikely(!nbuf))
6150 		return QDF_STATUS_E_INVAL;
6151 
6152 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6153 	if (idx >= num_frags)
6154 		return QDF_STATUS_E_INVAL;
6155 
6156 	if (qdf_likely(is_initial_mem_debug_disabled)) {
6157 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6158 		return QDF_STATUS_SUCCESS;
6159 	}
6160 
6161 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
6162 	if (qdf_likely(frag))
6163 		qdf_frag_debug_refcount_dec(frag, func, line);
6164 
6165 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6166 
6167 	return QDF_STATUS_SUCCESS;
6168 }
6169 
6170 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6171 
6172 #endif /* NBUF_FRAG_MEMORY_DEBUG */
6173 
6174 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6175 {
6176 	qdf_nbuf_t last_nbuf;
6177 	uint32_t num_frags;
6178 
6179 	if (qdf_unlikely(!nbuf))
6180 		return NULL;
6181 
6182 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6183 
6184 	/* Check nbuf has enough memory to store frag memory */
6185 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6186 		return nbuf;
6187 
6188 	if (!__qdf_nbuf_has_fraglist(nbuf))
6189 		return NULL;
6190 
6191 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6192 	if (qdf_unlikely(!last_nbuf))
6193 		return NULL;
6194 
6195 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6196 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6197 		return last_nbuf;
6198 
6199 	return NULL;
6200 }
6201 
6202 qdf_export_symbol(qdf_get_nbuf_valid_frag);
6203 
6204 QDF_STATUS
6205 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6206 			qdf_nbuf_t nbuf, int offset,
6207 			int frag_len, unsigned int truesize,
6208 			bool take_frag_ref, unsigned int minsize,
6209 			const char *func, uint32_t line)
6210 {
6211 	qdf_nbuf_t cur_nbuf;
6212 	qdf_nbuf_t this_nbuf;
6213 
6214 	cur_nbuf = nbuf;
6215 	this_nbuf = nbuf;
6216 
6217 	if (qdf_unlikely(!frag_len || !buf)) {
6218 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6219 			     func, line,
6220 			     buf, frag_len);
6221 		return QDF_STATUS_E_INVAL;
6222 	}
6223 
6224 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6225 
6226 	if (this_nbuf) {
6227 		cur_nbuf = this_nbuf;
6228 	} else {
6229 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6230 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6231 		if (qdf_unlikely(!this_nbuf)) {
6232 			qdf_nofl_err("%s : %d no memory to allocate\n",
6233 				     func, line);
6234 			return QDF_STATUS_E_NOMEM;
6235 		}
6236 	}
6237 
6238 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6239 			     take_frag_ref);
6240 
6241 	if (this_nbuf != cur_nbuf) {
6242 		/* add new skb to frag list */
6243 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6244 					 qdf_nbuf_len(this_nbuf));
6245 	}
6246 
6247 	return QDF_STATUS_SUCCESS;
6248 }
6249 
6250 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6251 
6252 #ifdef MEMORY_DEBUG
6253 void qdf_nbuf_acquire_track_lock(uint32_t index,
6254 				 unsigned long irq_flag)
6255 {
6256 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6257 			  irq_flag);
6258 }
6259 
6260 void qdf_nbuf_release_track_lock(uint32_t index,
6261 				 unsigned long irq_flag)
6262 {
6263 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6264 			       irq_flag);
6265 }
6266 
6267 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6268 {
6269 	return gp_qdf_net_buf_track_tbl[index];
6270 }
6271 #endif /* MEMORY_DEBUG */
6272 
6273 #ifdef ENHANCED_OS_ABSTRACTION
6274 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6275 {
6276 	__qdf_nbuf_set_timestamp(buf);
6277 }
6278 
6279 qdf_export_symbol(qdf_nbuf_set_timestamp);
6280 
6281 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6282 {
6283 	return __qdf_nbuf_get_timestamp(buf);
6284 }
6285 
6286 qdf_export_symbol(qdf_nbuf_get_timestamp);
6287 
6288 uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
6289 {
6290 	return __qdf_nbuf_get_timestamp_us(buf);
6291 }
6292 
6293 qdf_export_symbol(qdf_nbuf_get_timestamp_us);
6294 
6295 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6296 {
6297 	return __qdf_nbuf_get_timedelta_us(buf);
6298 }
6299 
6300 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6301 
6302 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6303 {
6304 	return __qdf_nbuf_get_timedelta_ms(buf);
6305 }
6306 
6307 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6308 
6309 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6310 {
6311 	return __qdf_nbuf_net_timedelta(t);
6312 }
6313 
6314 qdf_export_symbol(qdf_nbuf_net_timedelta);
6315 #endif
6316