xref: /wlan-dirver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 30bc8285d2c21db8126f402d9b60553e85914fce)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_nbuf.c
22  * QCA driver framework(QDF) network buffer management APIs
23  */
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <linux/inetdevice.h>
31 #include <qdf_atomic.h>
32 #include <qdf_debugfs.h>
33 #include <qdf_lock.h>
34 #include <qdf_mem.h>
35 #include <qdf_module.h>
36 #include <qdf_nbuf.h>
37 #include <qdf_status.h>
38 #include "qdf_str.h"
39 #include <qdf_trace.h>
40 #include "qdf_tracker.h"
41 #include <qdf_types.h>
42 #include <net/ieee80211_radiotap.h>
43 #include <pld_common.h>
44 #include <qdf_crypto.h>
45 #include <linux/igmp.h>
46 #include <net/mld.h>
47 
48 #if defined(FEATURE_TSO)
49 #include <net/ipv6.h>
50 #include <linux/ipv6.h>
51 #include <linux/tcp.h>
52 #include <linux/if_vlan.h>
53 #include <linux/ip.h>
54 #endif /* FEATURE_TSO */
55 
56 #ifdef IPA_OFFLOAD
57 #include <i_qdf_ipa_wdi3.h>
58 #endif /* IPA_OFFLOAD */
59 
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
61 
62 #define qdf_nbuf_users_inc atomic_inc
63 #define qdf_nbuf_users_dec atomic_dec
64 #define qdf_nbuf_users_set atomic_set
65 #define qdf_nbuf_users_read atomic_read
66 #else
67 #define qdf_nbuf_users_inc refcount_inc
68 #define qdf_nbuf_users_dec refcount_dec
69 #define qdf_nbuf_users_set refcount_set
70 #define qdf_nbuf_users_read refcount_read
71 #endif /* KERNEL_VERSION(4, 13, 0) */
72 
73 #define IEEE80211_RADIOTAP_VHT_BW_20	0
74 #define IEEE80211_RADIOTAP_VHT_BW_40	1
75 #define IEEE80211_RADIOTAP_VHT_BW_80	2
76 #define IEEE80211_RADIOTAP_VHT_BW_160	3
77 
78 #define RADIOTAP_VHT_BW_20	0
79 #define RADIOTAP_VHT_BW_40	1
80 #define RADIOTAP_VHT_BW_80	4
81 #define RADIOTAP_VHT_BW_160	11
82 
83 /* tx status */
84 #define RADIOTAP_TX_STATUS_FAIL		1
85 #define RADIOTAP_TX_STATUS_NOACK	2
86 
87 /* channel number to freq conversion */
88 #define CHANNEL_NUM_14 14
89 #define CHANNEL_NUM_15 15
90 #define CHANNEL_NUM_27 27
91 #define CHANNEL_NUM_35 35
92 #define CHANNEL_NUM_182 182
93 #define CHANNEL_NUM_197 197
94 #define CHANNEL_FREQ_2484 2484
95 #define CHANNEL_FREQ_2407 2407
96 #define CHANNEL_FREQ_2512 2512
97 #define CHANNEL_FREQ_5000 5000
98 #define CHANNEL_FREQ_4000 4000
99 #define CHANNEL_FREQ_5150 5150
100 #define FREQ_MULTIPLIER_CONST_5MHZ 5
101 #define FREQ_MULTIPLIER_CONST_20MHZ 20
102 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
103 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
104 #define RADIOTAP_CCK_CHANNEL 0x0020
105 #define RADIOTAP_OFDM_CHANNEL 0x0040
106 
107 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
108 #include <qdf_mc_timer.h>
109 
110 struct qdf_track_timer {
111 	qdf_mc_timer_t track_timer;
112 	qdf_atomic_t alloc_fail_cnt;
113 };
114 
115 static struct qdf_track_timer alloc_track_timer;
116 
117 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
118 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
119 #endif
120 
121 #ifdef NBUF_MEMORY_DEBUG
122 /* SMMU crash indication*/
123 static qdf_atomic_t smmu_crashed;
124 /* Number of nbuf not added to history*/
125 unsigned long g_histroy_add_drop;
126 #endif
127 
128 /* Packet Counter */
129 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
130 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
131 #ifdef QDF_NBUF_GLOBAL_COUNT
132 #define NBUF_DEBUGFS_NAME      "nbuf_counters"
133 static qdf_atomic_t nbuf_count;
134 #endif
135 
136 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
137 static bool is_initial_mem_debug_disabled;
138 #endif
139 
140 /**
141  *  __qdf_nbuf_get_ip_offset - Get IPV4/V6 header offset
142  * @data: Pointer to network data buffer
143  *
144  * Get the IP header offset in case of 8021Q and 8021AD
145  * tag is present in L2 header.
146  *
147  * Return: IP header offset
148  */
149 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
150 {
151 	uint16_t ether_type;
152 
153 	ether_type = *(uint16_t *)(data +
154 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
155 
156 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
157 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
158 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
159 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
160 
161 	return QDF_NBUF_TRAC_IP_OFFSET;
162 }
163 
164 /**
165  *  __qdf_nbuf_get_ether_type - Get the ether type
166  * @data: Pointer to network data buffer
167  *
168  * Get the ether type in case of 8021Q and 8021AD tag
169  * is present in L2 header, e.g for the returned ether type
170  * value, if IPV4 data ether type 0x0800, return 0x0008.
171  *
172  * Return ether type.
173  */
174 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
175 {
176 	uint16_t ether_type;
177 
178 	ether_type = *(uint16_t *)(data +
179 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
180 
181 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
182 		ether_type = *(uint16_t *)(data +
183 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
184 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
185 		ether_type = *(uint16_t *)(data +
186 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
187 
188 	return ether_type;
189 }
190 
191 /**
192  * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
193  *
194  * Return: none
195  */
196 void qdf_nbuf_tx_desc_count_display(void)
197 {
198 	qdf_debug("Current Snapshot of the Driver:");
199 	qdf_debug("Data Packets:");
200 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
201 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
202 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
203 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
204 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
205 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
206 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
207 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
208 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
209 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
210 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
211 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
212 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
213 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
214 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
215 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
216 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
217 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
218 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
219 	qdf_debug("Mgmt Packets:");
220 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
221 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
222 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
223 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
224 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
225 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
226 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
227 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
228 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
229 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
230 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
231 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
232 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
233 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
234 }
235 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
236 
237 /**
238  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
239  * @packet_type   : packet type either mgmt/data
240  * @current_state : layer at which the packet currently present
241  *
242  * Return: none
243  */
244 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
245 			uint8_t current_state)
246 {
247 	switch (packet_type) {
248 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
249 		nbuf_tx_mgmt[current_state]++;
250 		break;
251 	case QDF_NBUF_TX_PKT_DATA_TRACK:
252 		nbuf_tx_data[current_state]++;
253 		break;
254 	default:
255 		break;
256 	}
257 }
258 
259 /**
260  * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
261  *
262  * Return: none
263  */
264 void qdf_nbuf_tx_desc_count_clear(void)
265 {
266 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
267 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
268 }
269 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
270 
271 /**
272  * qdf_nbuf_set_state() - Updates the packet state
273  * @nbuf:            network buffer
274  * @current_state :  layer at which the packet currently is
275  *
276  * This function updates the packet state to the layer at which the packet
277  * currently is
278  *
279  * Return: none
280  */
281 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
282 {
283 	/*
284 	 * Only Mgmt, Data Packets are tracked. WMI messages
285 	 * such as scan commands are not tracked
286 	 */
287 	uint8_t packet_type;
288 
289 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
290 
291 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
292 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
293 		return;
294 	}
295 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
296 	qdf_nbuf_tx_desc_count_update(packet_type,
297 					current_state);
298 }
299 qdf_export_symbol(qdf_nbuf_set_state);
300 
301 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
302 /**
303  * __qdf_nbuf_start_replenish_timer - Start alloc fail replenish timer
304  *
305  * This function starts the alloc fail replenish timer.
306  *
307  * Return: void
308  */
309 static inline void __qdf_nbuf_start_replenish_timer(void)
310 {
311 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
312 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
313 	    QDF_TIMER_STATE_RUNNING)
314 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
315 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
316 }
317 
318 /**
319  * __qdf_nbuf_stop_replenish_timer - Stop alloc fail replenish timer
320  *
321  * This function stops the alloc fail replenish timer.
322  *
323  * Return: void
324  */
325 static inline void __qdf_nbuf_stop_replenish_timer(void)
326 {
327 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
328 		return;
329 
330 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
331 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
332 	    QDF_TIMER_STATE_RUNNING)
333 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
334 }
335 
336 /**
337  * qdf_replenish_expire_handler - Replenish expire handler
338  *
339  * This function triggers when the alloc fail replenish timer expires.
340  *
341  * Return: void
342  */
343 static void qdf_replenish_expire_handler(void *arg)
344 {
345 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
346 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
347 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
348 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
349 
350 		/* Error handling here */
351 	}
352 }
353 
354 /**
355  * __qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
356  *
357  * This function initializes the nbuf alloc fail replenish timer.
358  *
359  * Return: void
360  */
361 void __qdf_nbuf_init_replenish_timer(void)
362 {
363 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
364 			  qdf_replenish_expire_handler, NULL);
365 }
366 
367 /**
368  * __qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
369  *
370  * This function deinitializes the nbuf alloc fail replenish timer.
371  *
372  * Return: void
373  */
374 void __qdf_nbuf_deinit_replenish_timer(void)
375 {
376 	__qdf_nbuf_stop_replenish_timer();
377 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
378 }
379 
380 void qdf_nbuf_stop_replenish_timer(void)
381 {
382 	__qdf_nbuf_stop_replenish_timer();
383 }
384 #else
385 
386 static inline void __qdf_nbuf_start_replenish_timer(void) {}
387 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
388 void qdf_nbuf_stop_replenish_timer(void)
389 {
390 }
391 #endif
392 
393 /* globals do not need to be initialized to NULL/0 */
394 qdf_nbuf_trace_update_t qdf_trace_update_cb;
395 qdf_nbuf_free_t nbuf_free_cb;
396 
397 #ifdef QDF_NBUF_GLOBAL_COUNT
398 
399 /**
400  * __qdf_nbuf_count_get() - get nbuf global count
401  *
402  * Return: nbuf global count
403  */
404 int __qdf_nbuf_count_get(void)
405 {
406 	return qdf_atomic_read(&nbuf_count);
407 }
408 qdf_export_symbol(__qdf_nbuf_count_get);
409 
410 /**
411  * __qdf_nbuf_count_inc() - increment nbuf global count
412  *
413  * @buf: sk buff
414  *
415  * Return: void
416  */
417 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
418 {
419 	int num_nbuf = 1;
420 	qdf_nbuf_t ext_list;
421 
422 	if (qdf_likely(is_initial_mem_debug_disabled))
423 		return;
424 
425 	ext_list = qdf_nbuf_get_ext_list(nbuf);
426 
427 	/* Take care to account for frag_list */
428 	while (ext_list) {
429 		++num_nbuf;
430 		ext_list = qdf_nbuf_queue_next(ext_list);
431 	}
432 
433 	qdf_atomic_add(num_nbuf, &nbuf_count);
434 }
435 qdf_export_symbol(__qdf_nbuf_count_inc);
436 
437 /**
438  * __qdf_nbuf_count_dec() - decrement nbuf global count
439  *
440  * @buf: sk buff
441  *
442  * Return: void
443  */
444 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
445 {
446 	qdf_nbuf_t ext_list;
447 	int num_nbuf;
448 
449 	if (qdf_likely(is_initial_mem_debug_disabled))
450 		return;
451 
452 	if (qdf_nbuf_get_users(nbuf) > 1)
453 		return;
454 
455 	num_nbuf = 1;
456 
457 	/* Take care to account for frag_list */
458 	ext_list = qdf_nbuf_get_ext_list(nbuf);
459 	while (ext_list) {
460 		if (qdf_nbuf_get_users(ext_list) == 1)
461 			++num_nbuf;
462 		ext_list = qdf_nbuf_queue_next(ext_list);
463 	}
464 
465 	qdf_atomic_sub(num_nbuf, &nbuf_count);
466 }
467 qdf_export_symbol(__qdf_nbuf_count_dec);
468 #endif
469 
470 #ifdef NBUF_FRAG_MEMORY_DEBUG
471 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
472 {
473 	qdf_nbuf_t ext_list;
474 	uint32_t num_nr_frags;
475 	uint32_t total_num_nr_frags;
476 
477 	if (qdf_likely(is_initial_mem_debug_disabled))
478 		return;
479 
480 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
481 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
482 
483 	total_num_nr_frags = num_nr_frags;
484 
485 	/* Take into account the frags attached to frag_list */
486 	ext_list = qdf_nbuf_get_ext_list(nbuf);
487 	while (ext_list) {
488 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
489 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
490 		total_num_nr_frags += num_nr_frags;
491 		ext_list = qdf_nbuf_queue_next(ext_list);
492 	}
493 
494 	qdf_frag_count_inc(total_num_nr_frags);
495 }
496 
497 qdf_export_symbol(qdf_nbuf_frag_count_inc);
498 
499 void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
500 {
501 	qdf_nbuf_t ext_list;
502 	uint32_t num_nr_frags;
503 	uint32_t total_num_nr_frags;
504 
505 	if (qdf_likely(is_initial_mem_debug_disabled))
506 		return;
507 
508 	if (qdf_nbuf_get_users(nbuf) > 1)
509 		return;
510 
511 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
512 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
513 
514 	total_num_nr_frags = num_nr_frags;
515 
516 	/* Take into account the frags attached to frag_list */
517 	ext_list = qdf_nbuf_get_ext_list(nbuf);
518 	while (ext_list) {
519 		if (qdf_nbuf_get_users(ext_list) == 1) {
520 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
521 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
522 			total_num_nr_frags += num_nr_frags;
523 		}
524 		ext_list = qdf_nbuf_queue_next(ext_list);
525 	}
526 
527 	qdf_frag_count_dec(total_num_nr_frags);
528 }
529 
530 qdf_export_symbol(qdf_nbuf_frag_count_dec);
531 
532 #endif
533 
534 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
535 	!defined(QCA_WIFI_QCN9000)
536 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
537 				 int align, int prio, const char *func,
538 				 uint32_t line)
539 {
540 	struct sk_buff *skb;
541 	unsigned long offset;
542 	uint32_t lowmem_alloc_tries = 0;
543 
544 	if (align)
545 		size += (align - 1);
546 
547 realloc:
548 	skb = dev_alloc_skb(size);
549 
550 	if (skb)
551 		goto skb_alloc;
552 
553 	skb = pld_nbuf_pre_alloc(size);
554 
555 	if (!skb) {
556 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
557 				size, func, line);
558 		return NULL;
559 	}
560 
561 skb_alloc:
562 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
563 	 * Though we are trying to reserve low memory upfront to prevent this,
564 	 * we sometimes see SKBs allocated from low memory.
565 	 */
566 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
567 		lowmem_alloc_tries++;
568 		if (lowmem_alloc_tries > 100) {
569 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
570 				     size, func, line);
571 			return NULL;
572 		} else {
573 			/* Not freeing to make sure it
574 			 * will not get allocated again
575 			 */
576 			goto realloc;
577 		}
578 	}
579 	memset(skb->cb, 0x0, sizeof(skb->cb));
580 
581 	/*
582 	 * The default is for netbuf fragments to be interpreted
583 	 * as wordstreams rather than bytestreams.
584 	 */
585 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
586 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
587 
588 	/*
589 	 * XXX:how about we reserve first then align
590 	 * Align & make sure that the tail & data are adjusted properly
591 	 */
592 
593 	if (align) {
594 		offset = ((unsigned long)skb->data) % align;
595 		if (offset)
596 			skb_reserve(skb, align - offset);
597 	}
598 
599 	/*
600 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
601 	 * pointer
602 	 */
603 	skb_reserve(skb, reserve);
604 	qdf_nbuf_count_inc(skb);
605 
606 	return skb;
607 }
608 #else
609 
610 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
611 				 int align, int prio, const char *func,
612 				 uint32_t line)
613 {
614 	struct sk_buff *skb;
615 	unsigned long offset;
616 	int flags = GFP_KERNEL;
617 
618 	if (align)
619 		size += (align - 1);
620 
621 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
622 		flags = GFP_ATOMIC;
623 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
624 		/*
625 		 * Observed that kcompactd burns out CPU to make order-3 page.
626 		 *__netdev_alloc_skb has 4k page fallback option just in case of
627 		 * failing high order page allocation so we don't need to be
628 		 * hard. Make kcompactd rest in piece.
629 		 */
630 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
631 #endif
632 	}
633 
634 	skb = __netdev_alloc_skb(NULL, size, flags);
635 
636 	if (skb)
637 		goto skb_alloc;
638 
639 	skb = pld_nbuf_pre_alloc(size);
640 
641 	if (!skb) {
642 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
643 				size, func, line);
644 		__qdf_nbuf_start_replenish_timer();
645 		return NULL;
646 	} else {
647 		__qdf_nbuf_stop_replenish_timer();
648 	}
649 
650 skb_alloc:
651 	memset(skb->cb, 0x0, sizeof(skb->cb));
652 
653 	/*
654 	 * The default is for netbuf fragments to be interpreted
655 	 * as wordstreams rather than bytestreams.
656 	 */
657 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
658 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
659 
660 	/*
661 	 * XXX:how about we reserve first then align
662 	 * Align & make sure that the tail & data are adjusted properly
663 	 */
664 
665 	if (align) {
666 		offset = ((unsigned long)skb->data) % align;
667 		if (offset)
668 			skb_reserve(skb, align - offset);
669 	}
670 
671 	/*
672 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
673 	 * pointer
674 	 */
675 	skb_reserve(skb, reserve);
676 	qdf_nbuf_count_inc(skb);
677 
678 	return skb;
679 }
680 #endif
681 qdf_export_symbol(__qdf_nbuf_alloc);
682 
683 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
684 					  const char *func, uint32_t line)
685 {
686 	qdf_nbuf_t nbuf;
687 	unsigned long offset;
688 
689 	if (align)
690 		size += (align - 1);
691 
692 	nbuf = alloc_skb(size, GFP_ATOMIC);
693 	if (!nbuf)
694 		goto ret_nbuf;
695 
696 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
697 
698 	skb_reserve(nbuf, reserve);
699 
700 	if (align) {
701 		offset = ((unsigned long)nbuf->data) % align;
702 		if (offset)
703 			skb_reserve(nbuf, align - offset);
704 	}
705 
706 	qdf_nbuf_count_inc(nbuf);
707 
708 ret_nbuf:
709 	return nbuf;
710 }
711 
712 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
713 
714 /**
715  * __qdf_nbuf_free() - free the nbuf its interrupt safe
716  * @skb: Pointer to network buffer
717  *
718  * Return: none
719  */
720 
721 void __qdf_nbuf_free(struct sk_buff *skb)
722 {
723 	if (pld_nbuf_pre_alloc_free(skb))
724 		return;
725 
726 	qdf_nbuf_frag_count_dec(skb);
727 
728 	qdf_nbuf_count_dec(skb);
729 	if (nbuf_free_cb)
730 		nbuf_free_cb(skb);
731 	else
732 		dev_kfree_skb_any(skb);
733 }
734 
735 qdf_export_symbol(__qdf_nbuf_free);
736 
737 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
738 {
739 	qdf_nbuf_t skb_new = NULL;
740 
741 	skb_new = skb_clone(skb, GFP_ATOMIC);
742 	if (skb_new) {
743 		qdf_nbuf_frag_count_inc(skb_new);
744 		qdf_nbuf_count_inc(skb_new);
745 	}
746 	return skb_new;
747 }
748 
749 qdf_export_symbol(__qdf_nbuf_clone);
750 
751 #ifdef QCA_DP_TX_NBUF_LIST_FREE
752 void
753 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
754 {
755 	dev_kfree_skb_list_fast(nbuf_queue_head);
756 }
757 #else
758 void
759 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
760 {
761 }
762 #endif
763 
764 qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
765 
766 #ifdef NBUF_MEMORY_DEBUG
767 struct qdf_nbuf_event {
768 	qdf_nbuf_t nbuf;
769 	char func[QDF_MEM_FUNC_NAME_SIZE];
770 	uint32_t line;
771 	enum qdf_nbuf_event_type type;
772 	uint64_t timestamp;
773 	qdf_dma_addr_t iova;
774 };
775 
776 #ifndef QDF_NBUF_HISTORY_SIZE
777 #define QDF_NBUF_HISTORY_SIZE 4096
778 #endif
779 static qdf_atomic_t qdf_nbuf_history_index;
780 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
781 
782 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
783 {
784 	int32_t next = qdf_atomic_inc_return(index);
785 
786 	if (next == size)
787 		qdf_atomic_sub(size, index);
788 
789 	return next % size;
790 }
791 
792 void
793 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
794 		     enum qdf_nbuf_event_type type)
795 {
796 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
797 						   QDF_NBUF_HISTORY_SIZE);
798 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
799 
800 	if (qdf_atomic_read(&smmu_crashed)) {
801 		g_histroy_add_drop++;
802 		return;
803 	}
804 
805 	event->nbuf = nbuf;
806 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
807 	event->line = line;
808 	event->type = type;
809 	event->timestamp = qdf_get_log_timestamp();
810 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
811 	    type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
812 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
813 	else
814 		event->iova = 0;
815 }
816 
817 void qdf_set_smmu_fault_state(bool smmu_fault_state)
818 {
819 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
820 	if (!smmu_fault_state)
821 		g_histroy_add_drop = 0;
822 }
823 qdf_export_symbol(qdf_set_smmu_fault_state);
824 #endif /* NBUF_MEMORY_DEBUG */
825 
826 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
827 #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
828 qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
829 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
830 
831 static void qdf_nbuf_smmu_map_tracking_init(void)
832 {
833 	qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
834 }
835 
836 static void qdf_nbuf_smmu_map_tracking_deinit(void)
837 {
838 	qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
839 }
840 
841 static QDF_STATUS
842 qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
843 {
844 	if (is_initial_mem_debug_disabled)
845 		return QDF_STATUS_SUCCESS;
846 
847 	return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
848 }
849 
850 static void
851 qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
852 {
853 	if (is_initial_mem_debug_disabled)
854 		return;
855 
856 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
857 	qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
858 }
859 
860 void qdf_nbuf_map_check_for_smmu_leaks(void)
861 {
862 	qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
863 }
864 
865 #ifdef IPA_OFFLOAD
866 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
867 				   uint8_t hdl,
868 				   uint8_t num_buffers,
869 				   qdf_mem_info_t *info,
870 				   const char *func,
871 				   uint32_t line)
872 {
873 	QDF_STATUS status;
874 
875 	status = qdf_nbuf_track_smmu_map(nbuf, func, line);
876 	if (QDF_IS_STATUS_ERROR(status))
877 		return status;
878 
879 	status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
880 
881 	if (QDF_IS_STATUS_ERROR(status)) {
882 		qdf_nbuf_untrack_smmu_map(nbuf, func, line);
883 	} else {
884 		if (!is_initial_mem_debug_disabled)
885 			qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
886 		qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
887 						       info->pa, func, line);
888 	}
889 
890 	return status;
891 }
892 
893 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
894 
895 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
896 				     uint8_t hdl,
897 				     uint8_t num_buffers,
898 				     qdf_mem_info_t *info,
899 				     const char *func,
900 				     uint32_t line)
901 {
902 	QDF_STATUS status;
903 
904 	qdf_nbuf_untrack_smmu_map(nbuf, func, line);
905 	status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
906 	qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
907 						 info->pa, func, line);
908 	return status;
909 }
910 
911 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
912 #endif /* IPA_OFFLOAD */
913 
914 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
915 						  const char *func,
916 						  uint32_t line)
917 {
918 	char map_func[QDF_TRACKER_FUNC_SIZE];
919 	uint32_t map_line;
920 
921 	if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
922 				&map_func, &map_line))
923 		return;
924 
925 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
926 			   func, line, map_func, map_line);
927 }
928 
929 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
930 {
931 	p_node->smmu_unmap_line_num = 0;
932 	p_node->is_nbuf_smmu_mapped = false;
933 	p_node->smmu_map_line_num = 0;
934 	p_node->smmu_map_func_name[0] = '\0';
935 	p_node->smmu_unmap_func_name[0] = '\0';
936 	p_node->smmu_unmap_iova_addr = 0;
937 	p_node->smmu_unmap_pa_addr = 0;
938 	p_node->smmu_map_iova_addr = 0;
939 	p_node->smmu_map_pa_addr = 0;
940 }
941 #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
942 #ifdef NBUF_MEMORY_DEBUG
943 static void qdf_nbuf_smmu_map_tracking_init(void)
944 {
945 }
946 
947 static void qdf_nbuf_smmu_map_tracking_deinit(void)
948 {
949 }
950 
951 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
952 						  const char *func,
953 						  uint32_t line)
954 {
955 }
956 
957 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
958 {
959 }
960 #endif /* NBUF_MEMORY_DEBUG */
961 
962 #ifdef IPA_OFFLOAD
963 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
964 				   uint8_t hdl,
965 				   uint8_t num_buffers,
966 				   qdf_mem_info_t *info,
967 				   const char *func,
968 				   uint32_t line)
969 {
970 	return  __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
971 }
972 
973 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
974 
975 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
976 				     uint8_t hdl,
977 				     uint8_t num_buffers,
978 				     qdf_mem_info_t *info,
979 				     const char *func,
980 				     uint32_t line)
981 {
982 	return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
983 }
984 
985 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
986 #endif /* IPA_OFFLOAD */
987 #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
988 
989 #ifdef NBUF_MAP_UNMAP_DEBUG
990 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
991 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
992 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
993 
994 static void qdf_nbuf_map_tracking_init(void)
995 {
996 	qdf_tracker_init(&qdf_nbuf_map_tracker);
997 }
998 
999 static void qdf_nbuf_map_tracking_deinit(void)
1000 {
1001 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
1002 }
1003 
1004 static QDF_STATUS
1005 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1006 {
1007 	if (is_initial_mem_debug_disabled)
1008 		return QDF_STATUS_SUCCESS;
1009 
1010 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
1011 }
1012 
1013 static void
1014 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1015 {
1016 	if (is_initial_mem_debug_disabled)
1017 		return;
1018 
1019 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
1020 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
1021 }
1022 
1023 void qdf_nbuf_map_check_for_leaks(void)
1024 {
1025 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
1026 }
1027 
1028 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
1029 			      qdf_nbuf_t buf,
1030 			      qdf_dma_dir_t dir,
1031 			      const char *func,
1032 			      uint32_t line)
1033 {
1034 	QDF_STATUS status;
1035 
1036 	status = qdf_nbuf_track_map(buf, func, line);
1037 	if (QDF_IS_STATUS_ERROR(status))
1038 		return status;
1039 
1040 	status = __qdf_nbuf_map(osdev, buf, dir);
1041 	if (QDF_IS_STATUS_ERROR(status)) {
1042 		qdf_nbuf_untrack_map(buf, func, line);
1043 	} else {
1044 		if (!is_initial_mem_debug_disabled)
1045 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1046 		qdf_net_buf_debug_update_map_node(buf, func, line);
1047 	}
1048 
1049 	return status;
1050 }
1051 
1052 qdf_export_symbol(qdf_nbuf_map_debug);
1053 
1054 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
1055 			  qdf_nbuf_t buf,
1056 			  qdf_dma_dir_t dir,
1057 			  const char *func,
1058 			  uint32_t line)
1059 {
1060 	qdf_nbuf_untrack_map(buf, func, line);
1061 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1062 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1063 }
1064 
1065 qdf_export_symbol(qdf_nbuf_unmap_debug);
1066 
1067 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
1068 				     qdf_nbuf_t buf,
1069 				     qdf_dma_dir_t dir,
1070 				     const char *func,
1071 				     uint32_t line)
1072 {
1073 	QDF_STATUS status;
1074 
1075 	status = qdf_nbuf_track_map(buf, func, line);
1076 	if (QDF_IS_STATUS_ERROR(status))
1077 		return status;
1078 
1079 	status = __qdf_nbuf_map_single(osdev, buf, dir);
1080 	if (QDF_IS_STATUS_ERROR(status)) {
1081 		qdf_nbuf_untrack_map(buf, func, line);
1082 	} else {
1083 		if (!is_initial_mem_debug_disabled)
1084 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1085 		qdf_net_buf_debug_update_map_node(buf, func, line);
1086 	}
1087 
1088 	return status;
1089 }
1090 
1091 qdf_export_symbol(qdf_nbuf_map_single_debug);
1092 
1093 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
1094 				 qdf_nbuf_t buf,
1095 				 qdf_dma_dir_t dir,
1096 				 const char *func,
1097 				 uint32_t line)
1098 {
1099 	qdf_nbuf_untrack_map(buf, func, line);
1100 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1101 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1102 }
1103 
1104 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
1105 
1106 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
1107 				     qdf_nbuf_t buf,
1108 				     qdf_dma_dir_t dir,
1109 				     int nbytes,
1110 				     const char *func,
1111 				     uint32_t line)
1112 {
1113 	QDF_STATUS status;
1114 
1115 	status = qdf_nbuf_track_map(buf, func, line);
1116 	if (QDF_IS_STATUS_ERROR(status))
1117 		return status;
1118 
1119 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
1120 	if (QDF_IS_STATUS_ERROR(status)) {
1121 		qdf_nbuf_untrack_map(buf, func, line);
1122 	} else {
1123 		if (!is_initial_mem_debug_disabled)
1124 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1125 		qdf_net_buf_debug_update_map_node(buf, func, line);
1126 	}
1127 
1128 	return status;
1129 }
1130 
1131 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
1132 
1133 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
1134 				 qdf_nbuf_t buf,
1135 				 qdf_dma_dir_t dir,
1136 				 int nbytes,
1137 				 const char *func,
1138 				 uint32_t line)
1139 {
1140 	qdf_nbuf_untrack_map(buf, func, line);
1141 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
1142 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1143 }
1144 
1145 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1146 
1147 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1148 					    qdf_nbuf_t buf,
1149 					    qdf_dma_dir_t dir,
1150 					    int nbytes,
1151 					    const char *func,
1152 					    uint32_t line)
1153 {
1154 	QDF_STATUS status;
1155 
1156 	status = qdf_nbuf_track_map(buf, func, line);
1157 	if (QDF_IS_STATUS_ERROR(status))
1158 		return status;
1159 
1160 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1161 	if (QDF_IS_STATUS_ERROR(status)) {
1162 		qdf_nbuf_untrack_map(buf, func, line);
1163 	} else {
1164 		if (!is_initial_mem_debug_disabled)
1165 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1166 		qdf_net_buf_debug_update_map_node(buf, func, line);
1167 	}
1168 
1169 	return status;
1170 }
1171 
1172 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1173 
1174 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1175 					qdf_nbuf_t buf,
1176 					qdf_dma_dir_t dir,
1177 					int nbytes,
1178 					const char *func,
1179 					uint32_t line)
1180 {
1181 	qdf_nbuf_untrack_map(buf, func, line);
1182 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1183 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1184 }
1185 
1186 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1187 
1188 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1189 					      qdf_nbuf_t buf,
1190 					      qdf_dma_addr_t phy_addr,
1191 					      qdf_dma_dir_t dir, int nbytes,
1192 					      const char *func, uint32_t line)
1193 {
1194 	qdf_nbuf_untrack_map(buf, func, line);
1195 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1196 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1197 }
1198 
1199 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1200 
1201 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1202 					     const char *func,
1203 					     uint32_t line)
1204 {
1205 	char map_func[QDF_TRACKER_FUNC_SIZE];
1206 	uint32_t map_line;
1207 
1208 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1209 				&map_func, &map_line))
1210 		return;
1211 
1212 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1213 			   func, line, map_func, map_line);
1214 }
1215 #else
1216 static inline void qdf_nbuf_map_tracking_init(void)
1217 {
1218 }
1219 
1220 static inline void qdf_nbuf_map_tracking_deinit(void)
1221 {
1222 }
1223 
1224 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1225 						    const char *func,
1226 						    uint32_t line)
1227 {
1228 }
1229 #endif /* NBUF_MAP_UNMAP_DEBUG */
1230 
1231 /**
1232  * __qdf_nbuf_map() - map a buffer to local bus address space
1233  * @osdev: OS device
1234  * @bmap: Bitmap
1235  * @skb: Pointer to network buffer
1236  * @dir: Direction
1237  *
1238  * Return: QDF_STATUS
1239  */
1240 #ifdef QDF_OS_DEBUG
1241 QDF_STATUS
1242 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1243 {
1244 	struct skb_shared_info *sh = skb_shinfo(skb);
1245 
1246 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1247 			|| (dir == QDF_DMA_FROM_DEVICE));
1248 
1249 	/*
1250 	 * Assume there's only a single fragment.
1251 	 * To support multiple fragments, it would be necessary to change
1252 	 * qdf_nbuf_t to be a separate object that stores meta-info
1253 	 * (including the bus address for each fragment) and a pointer
1254 	 * to the underlying sk_buff.
1255 	 */
1256 	qdf_assert(sh->nr_frags == 0);
1257 
1258 	return __qdf_nbuf_map_single(osdev, skb, dir);
1259 }
1260 qdf_export_symbol(__qdf_nbuf_map);
1261 
1262 #else
1263 QDF_STATUS
1264 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1265 {
1266 	return __qdf_nbuf_map_single(osdev, skb, dir);
1267 }
1268 qdf_export_symbol(__qdf_nbuf_map);
1269 #endif
1270 /**
1271  * __qdf_nbuf_unmap() - to unmap a previously mapped buf
1272  * @osdev: OS device
1273  * @skb: Pointer to network buffer
1274  * @dir: dma direction
1275  *
1276  * Return: none
1277  */
1278 void
1279 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1280 			qdf_dma_dir_t dir)
1281 {
1282 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1283 		   || (dir == QDF_DMA_FROM_DEVICE));
1284 
1285 	/*
1286 	 * Assume there's a single fragment.
1287 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1288 	 */
1289 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1290 }
1291 qdf_export_symbol(__qdf_nbuf_unmap);
1292 
1293 /**
1294  * __qdf_nbuf_map_single() - map a single buffer to local bus address space
1295  * @osdev: OS device
1296  * @skb: Pointer to network buffer
1297  * @dir: Direction
1298  *
1299  * Return: QDF_STATUS
1300  */
1301 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1302 QDF_STATUS
1303 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1304 {
1305 	qdf_dma_addr_t paddr;
1306 
1307 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1308 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1309 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1310 	return QDF_STATUS_SUCCESS;
1311 }
1312 qdf_export_symbol(__qdf_nbuf_map_single);
1313 #else
1314 QDF_STATUS
1315 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1316 {
1317 	qdf_dma_addr_t paddr;
1318 
1319 	/* assume that the OS only provides a single fragment */
1320 	QDF_NBUF_CB_PADDR(buf) = paddr =
1321 		dma_map_single(osdev->dev, buf->data,
1322 				skb_end_pointer(buf) - buf->data,
1323 				__qdf_dma_dir_to_os(dir));
1324 	__qdf_record_nbuf_nbytes(
1325 		__qdf_nbuf_get_end_offset(buf), dir, true);
1326 	return dma_mapping_error(osdev->dev, paddr)
1327 		? QDF_STATUS_E_FAILURE
1328 		: QDF_STATUS_SUCCESS;
1329 }
1330 qdf_export_symbol(__qdf_nbuf_map_single);
1331 #endif
1332 /**
1333  * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
1334  * @osdev: OS device
1335  * @skb: Pointer to network buffer
1336  * @dir: Direction
1337  *
1338  * Return: none
1339  */
1340 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1341 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1342 				qdf_dma_dir_t dir)
1343 {
1344 }
1345 #else
1346 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1347 					qdf_dma_dir_t dir)
1348 {
1349 	if (QDF_NBUF_CB_PADDR(buf)) {
1350 		__qdf_record_nbuf_nbytes(
1351 			__qdf_nbuf_get_end_offset(buf), dir, false);
1352 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1353 			skb_end_pointer(buf) - buf->data,
1354 			__qdf_dma_dir_to_os(dir));
1355 	}
1356 }
1357 #endif
1358 qdf_export_symbol(__qdf_nbuf_unmap_single);
1359 
1360 /**
1361  * __qdf_nbuf_set_rx_cksum() - set rx checksum
1362  * @skb: Pointer to network buffer
1363  * @cksum: Pointer to checksum value
1364  *
1365  * Return: QDF_STATUS
1366  */
1367 QDF_STATUS
1368 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1369 {
1370 	switch (cksum->l4_result) {
1371 	case QDF_NBUF_RX_CKSUM_NONE:
1372 		skb->ip_summed = CHECKSUM_NONE;
1373 		break;
1374 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1375 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1376 		break;
1377 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1378 		skb->ip_summed = CHECKSUM_PARTIAL;
1379 		skb->csum = cksum->val;
1380 		break;
1381 	default:
1382 		pr_err("Unknown checksum type\n");
1383 		qdf_assert(0);
1384 		return QDF_STATUS_E_NOSUPPORT;
1385 	}
1386 	return QDF_STATUS_SUCCESS;
1387 }
1388 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1389 
1390 /**
1391  * __qdf_nbuf_get_tx_cksum() - get tx checksum
1392  * @skb: Pointer to network buffer
1393  *
1394  * Return: TX checksum value
1395  */
1396 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1397 {
1398 	switch (skb->ip_summed) {
1399 	case CHECKSUM_NONE:
1400 		return QDF_NBUF_TX_CKSUM_NONE;
1401 	case CHECKSUM_PARTIAL:
1402 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1403 	case CHECKSUM_COMPLETE:
1404 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1405 	default:
1406 		return QDF_NBUF_TX_CKSUM_NONE;
1407 	}
1408 }
1409 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1410 
1411 /**
1412  * __qdf_nbuf_get_tid() - get tid
1413  * @skb: Pointer to network buffer
1414  *
1415  * Return: tid
1416  */
1417 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1418 {
1419 	return skb->priority;
1420 }
1421 qdf_export_symbol(__qdf_nbuf_get_tid);
1422 
1423 /**
1424  * __qdf_nbuf_set_tid() - set tid
1425  * @skb: Pointer to network buffer
1426  *
1427  * Return: none
1428  */
1429 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1430 {
1431 	skb->priority = tid;
1432 }
1433 qdf_export_symbol(__qdf_nbuf_set_tid);
1434 
1435 /**
1436  * __qdf_nbuf_set_tid() - set tid
1437  * @skb: Pointer to network buffer
1438  *
1439  * Return: none
1440  */
1441 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1442 {
1443 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1444 }
1445 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1446 
1447 /**
1448  * __qdf_nbuf_reg_trace_cb() - register trace callback
1449  * @cb_func_ptr: Pointer to trace callback function
1450  *
1451  * Return: none
1452  */
1453 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1454 {
1455 	qdf_trace_update_cb = cb_func_ptr;
1456 }
1457 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1458 
1459 /**
1460  * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1461  *              of DHCP packet.
1462  * @data: Pointer to DHCP packet data buffer
1463  *
1464  * This func. returns the subtype of DHCP packet.
1465  *
1466  * Return: subtype of the DHCP packet.
1467  */
1468 enum qdf_proto_subtype
1469 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1470 {
1471 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1472 
1473 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1474 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1475 					QDF_DHCP_OPTION53_LENGTH)) {
1476 
1477 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1478 		case QDF_DHCP_DISCOVER:
1479 			subtype = QDF_PROTO_DHCP_DISCOVER;
1480 			break;
1481 		case QDF_DHCP_REQUEST:
1482 			subtype = QDF_PROTO_DHCP_REQUEST;
1483 			break;
1484 		case QDF_DHCP_OFFER:
1485 			subtype = QDF_PROTO_DHCP_OFFER;
1486 			break;
1487 		case QDF_DHCP_ACK:
1488 			subtype = QDF_PROTO_DHCP_ACK;
1489 			break;
1490 		case QDF_DHCP_NAK:
1491 			subtype = QDF_PROTO_DHCP_NACK;
1492 			break;
1493 		case QDF_DHCP_RELEASE:
1494 			subtype = QDF_PROTO_DHCP_RELEASE;
1495 			break;
1496 		case QDF_DHCP_INFORM:
1497 			subtype = QDF_PROTO_DHCP_INFORM;
1498 			break;
1499 		case QDF_DHCP_DECLINE:
1500 			subtype = QDF_PROTO_DHCP_DECLINE;
1501 			break;
1502 		default:
1503 			break;
1504 		}
1505 	}
1506 
1507 	return subtype;
1508 }
1509 
1510 #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
1511 #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
1512 #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
1513 
1514 /**
1515  * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
1516  * @data: Pointer to EAPOL packet data buffer
1517  *
1518  * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
1519  * The ralationship between the ack bit and EAPOL type is as follows:
1520  *
1521  *  EAPOL type  |   M1    M2   M3  M4
1522  * --------------------------------------
1523  *     Ack      |   1     0    1   0
1524  * --------------------------------------
1525  *
1526  * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
1527  * M2/M4: by keyDataLength or Nonce value being 0 for M4.
1528  * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
1529  *
1530  * Return: subtype of the EAPOL packet.
1531  */
1532 static inline enum qdf_proto_subtype
1533 __qdf_nbuf_data_get_eapol_key(uint8_t *data)
1534 {
1535 	uint16_t key_info, key_data_length;
1536 	enum qdf_proto_subtype subtype;
1537 	uint64_t *key_nonce;
1538 
1539 	key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
1540 			(data + EAPOL_KEY_INFO_OFFSET)));
1541 
1542 	key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
1543 				(data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
1544 	key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
1545 
1546 	if (key_info & EAPOL_WPA_KEY_INFO_ACK)
1547 		if (key_info &
1548 		    (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
1549 			subtype = QDF_PROTO_EAPOL_M3;
1550 		else
1551 			subtype = QDF_PROTO_EAPOL_M1;
1552 	else
1553 		if (key_data_length == 0 ||
1554 		    !((*key_nonce) || (*(key_nonce + 1)) ||
1555 		      (*(key_nonce + 2)) || (*(key_nonce + 3))))
1556 			subtype = QDF_PROTO_EAPOL_M4;
1557 		else
1558 			subtype = QDF_PROTO_EAPOL_M2;
1559 
1560 	return subtype;
1561 }
1562 
1563 /**
1564  * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
1565  * @data: Pointer to EAPOL packet data buffer
1566  *
1567  * Return: subtype of the EAPOL packet.
1568  */
1569 static inline enum qdf_proto_subtype
1570 __qdf_nbuf_data_get_eap_code(uint8_t *data)
1571 {
1572 	uint8_t code = *(data + EAP_CODE_OFFSET);
1573 
1574 	switch (code) {
1575 	case QDF_EAP_REQUEST:
1576 		return QDF_PROTO_EAP_REQUEST;
1577 	case QDF_EAP_RESPONSE:
1578 		return QDF_PROTO_EAP_RESPONSE;
1579 	case QDF_EAP_SUCCESS:
1580 		return QDF_PROTO_EAP_SUCCESS;
1581 	case QDF_EAP_FAILURE:
1582 		return QDF_PROTO_EAP_FAILURE;
1583 	case QDF_EAP_INITIATE:
1584 		return QDF_PROTO_EAP_INITIATE;
1585 	case QDF_EAP_FINISH:
1586 		return QDF_PROTO_EAP_FINISH;
1587 	default:
1588 		return QDF_PROTO_INVALID;
1589 	}
1590 }
1591 
1592 /**
1593  * __qdf_nbuf_data_get_eapol_subtype() - get the subtype of EAPOL packet.
1594  * @data: Pointer to EAPOL packet data buffer
1595  *
1596  * This func. returns the subtype of EAPOL packet.
1597  *
1598  * Return: subtype of the EAPOL packet.
1599  */
1600 enum qdf_proto_subtype
1601 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1602 {
1603 	uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
1604 
1605 	switch (pkt_type) {
1606 	case EAPOL_PACKET_TYPE_EAP:
1607 		return __qdf_nbuf_data_get_eap_code(data);
1608 	case EAPOL_PACKET_TYPE_START:
1609 		return QDF_PROTO_EAPOL_START;
1610 	case EAPOL_PACKET_TYPE_LOGOFF:
1611 		return QDF_PROTO_EAPOL_LOGOFF;
1612 	case EAPOL_PACKET_TYPE_KEY:
1613 		return __qdf_nbuf_data_get_eapol_key(data);
1614 	case EAPOL_PACKET_TYPE_ASF:
1615 		return QDF_PROTO_EAPOL_ASF;
1616 	default:
1617 		return QDF_PROTO_INVALID;
1618 	}
1619 }
1620 
1621 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1622 
1623 /**
1624  * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1625  *            of ARP packet.
1626  * @data: Pointer to ARP packet data buffer
1627  *
1628  * This func. returns the subtype of ARP packet.
1629  *
1630  * Return: subtype of the ARP packet.
1631  */
1632 enum qdf_proto_subtype
1633 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1634 {
1635 	uint16_t subtype;
1636 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1637 
1638 	subtype = (uint16_t)(*(uint16_t *)
1639 			(data + ARP_SUB_TYPE_OFFSET));
1640 
1641 	switch (QDF_SWAP_U16(subtype)) {
1642 	case ARP_REQUEST:
1643 		proto_subtype = QDF_PROTO_ARP_REQ;
1644 		break;
1645 	case ARP_RESPONSE:
1646 		proto_subtype = QDF_PROTO_ARP_RES;
1647 		break;
1648 	default:
1649 		break;
1650 	}
1651 
1652 	return proto_subtype;
1653 }
1654 
1655 /**
1656  * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1657  *            of IPV4 ICMP packet.
1658  * @data: Pointer to IPV4 ICMP packet data buffer
1659  *
1660  * This func. returns the subtype of ICMP packet.
1661  *
1662  * Return: subtype of the ICMP packet.
1663  */
1664 enum qdf_proto_subtype
1665 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1666 {
1667 	uint8_t subtype;
1668 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1669 
1670 	subtype = (uint8_t)(*(uint8_t *)
1671 			(data + ICMP_SUBTYPE_OFFSET));
1672 
1673 	switch (subtype) {
1674 	case ICMP_REQUEST:
1675 		proto_subtype = QDF_PROTO_ICMP_REQ;
1676 		break;
1677 	case ICMP_RESPONSE:
1678 		proto_subtype = QDF_PROTO_ICMP_RES;
1679 		break;
1680 	default:
1681 		break;
1682 	}
1683 
1684 	return proto_subtype;
1685 }
1686 
1687 /**
1688  * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1689  *            of IPV6 ICMPV6 packet.
1690  * @data: Pointer to IPV6 ICMPV6 packet data buffer
1691  *
1692  * This func. returns the subtype of ICMPV6 packet.
1693  *
1694  * Return: subtype of the ICMPV6 packet.
1695  */
1696 enum qdf_proto_subtype
1697 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1698 {
1699 	uint8_t subtype;
1700 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1701 
1702 	subtype = (uint8_t)(*(uint8_t *)
1703 			(data + ICMPV6_SUBTYPE_OFFSET));
1704 
1705 	switch (subtype) {
1706 	case ICMPV6_REQUEST:
1707 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1708 		break;
1709 	case ICMPV6_RESPONSE:
1710 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1711 		break;
1712 	case ICMPV6_RS:
1713 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1714 		break;
1715 	case ICMPV6_RA:
1716 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1717 		break;
1718 	case ICMPV6_NS:
1719 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1720 		break;
1721 	case ICMPV6_NA:
1722 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1723 		break;
1724 	default:
1725 		break;
1726 	}
1727 
1728 	return proto_subtype;
1729 }
1730 
1731 /**
1732  * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1733  * @skb: Buffer
1734  *
1735  * This function checks IPv4 packet is last fragment or not.
1736  * Caller has to call this function for IPv4 packets only.
1737  *
1738  * Return: True if IPv4 packet is last fragment otherwise false
1739  */
1740 bool
1741 __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1742 {
1743 	if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1744 		return true;
1745 
1746 	return false;
1747 }
1748 
1749 /**
1750  * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1751  * @data: pointer to skb payload
1752  * @tos: value of TOS to be set
1753  *
1754  * This func. set the TOS field of IPv4 packet.
1755  *
1756  * Return: None
1757  */
1758 void
1759 __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1760 {
1761 	*(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1762 }
1763 
1764 /**
1765  * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1766  * @data: Pointer to skb payload
1767  *
1768  * This func. returns the TOS type of IPv4 packet.
1769  *
1770  * Return: TOS type of IPv4 packet.
1771  */
1772 uint8_t
1773 __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1774 {
1775 	uint8_t tos;
1776 
1777 	tos = (uint8_t)(*(uint8_t *)(data +
1778 			QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1779 	return tos;
1780 }
1781 
1782 /**
1783  * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1784  *            of IPV4 packet.
1785  * @data: Pointer to IPV4 packet data buffer
1786  *
1787  * This func. returns the proto type of IPV4 packet.
1788  *
1789  * Return: proto type of IPV4 packet.
1790  */
1791 uint8_t
1792 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1793 {
1794 	uint8_t proto_type;
1795 
1796 	proto_type = (uint8_t)(*(uint8_t *)(data +
1797 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1798 	return proto_type;
1799 }
1800 
1801 /**
1802  * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1803  *                                 of IPv6 packet.
1804  * @data: Pointer to IPv6 packet data buffer
1805  *
1806  * This func. returns the TC field of IPv6 packet.
1807  *
1808  * Return: traffic classification of IPv6 packet.
1809  */
1810 uint8_t
1811 __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1812 {
1813 	struct ipv6hdr *hdr;
1814 
1815 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1816 	return ip6_tclass(ip6_flowinfo(hdr));
1817 }
1818 
1819 /**
1820  * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1821  *                                 of IPv6 packet.
1822  * @data: Pointer to skb payload
1823  * @tc: value to set to IPv6 header TC field
1824  *
1825  * This func. set the TC field of IPv6 header.
1826  *
1827  * Return: None
1828  */
1829 void
1830 __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1831 {
1832 	struct ipv6hdr *hdr;
1833 
1834 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1835 	ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1836 }
1837 
1838 /**
1839  * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1840  *            of IPV6 packet.
1841  * @data: Pointer to IPV6 packet data buffer
1842  *
1843  * This func. returns the proto type of IPV6 packet.
1844  *
1845  * Return: proto type of IPV6 packet.
1846  */
1847 uint8_t
1848 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1849 {
1850 	uint8_t proto_type;
1851 
1852 	proto_type = (uint8_t)(*(uint8_t *)(data +
1853 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1854 	return proto_type;
1855 }
1856 
1857 /**
1858  * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
1859  * @data: Pointer to network data
1860  *
1861  * This api is for Tx packets.
1862  *
1863  * Return: true if packet is ipv4 packet
1864  *	   false otherwise
1865  */
1866 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1867 {
1868 	uint16_t ether_type;
1869 
1870 	ether_type = (uint16_t)(*(uint16_t *)(data +
1871 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1872 
1873 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1874 		return true;
1875 	else
1876 		return false;
1877 }
1878 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1879 
1880 /**
1881  * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
1882  * @data: Pointer to network data buffer
1883  *
1884  * This api is for ipv4 packet.
1885  *
1886  * Return: true if packet is DHCP packet
1887  *	   false otherwise
1888  */
1889 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1890 {
1891 	uint16_t sport;
1892 	uint16_t dport;
1893 	uint8_t ipv4_offset;
1894 	uint8_t ipv4_hdr_len;
1895 	struct iphdr *iphdr;
1896 
1897 	if (__qdf_nbuf_get_ether_type(data) !=
1898 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1899 		return false;
1900 
1901 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1902 	iphdr = (struct iphdr *)(data + ipv4_offset);
1903 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1904 
1905 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1906 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1907 			      sizeof(uint16_t));
1908 
1909 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1910 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1911 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1912 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1913 		return true;
1914 	else
1915 		return false;
1916 }
1917 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1918 
1919 /**
1920  * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
1921  * @data: Pointer to network data buffer
1922  *
1923  * This api is for ipv4 packet.
1924  *
1925  * Return: true if packet is EAPOL packet
1926  *	   false otherwise.
1927  */
1928 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1929 {
1930 	uint16_t ether_type;
1931 
1932 	ether_type = __qdf_nbuf_get_ether_type(data);
1933 
1934 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE))
1935 		return true;
1936 	else
1937 		return false;
1938 }
1939 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1940 
1941 /**
1942  * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
1943  * @skb: Pointer to network buffer
1944  *
1945  * This api is for ipv4 packet.
1946  *
1947  * Return: true if packet is WAPI packet
1948  *	   false otherwise.
1949  */
1950 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1951 {
1952 	uint16_t ether_type;
1953 
1954 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1955 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1956 
1957 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1958 		return true;
1959 	else
1960 		return false;
1961 }
1962 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1963 
1964 /**
1965  * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
1966  * @data: Pointer to network data buffer
1967  *
1968  * This api is for vlan header included ipv6 packet.
1969  *
1970  * Return: true if packet is vlan header included IPV6
1971  *	   false otherwise.
1972  */
1973 static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
1974 {
1975 	uint16_t ether_type;
1976 
1977 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1978 
1979 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1980 		ether_type = *(uint16_t *)(data +
1981 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1982 
1983 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1984 			return true;
1985 	}
1986 	return false;
1987 }
1988 
1989 /**
1990  * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
1991  * @data: Pointer to network data buffer
1992  *
1993  * This api is for vlan header included ipv4 packet.
1994  *
1995  * Return: true if packet is vlan header included IPV4
1996  *	   false otherwise.
1997  */
1998 static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
1999 {
2000 	uint16_t ether_type;
2001 
2002 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
2003 
2004 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
2005 		ether_type = *(uint16_t *)(data +
2006 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
2007 
2008 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
2009 			return true;
2010 	}
2011 	return false;
2012 }
2013 
2014 /**
2015  * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
2016  * @data: Pointer to network data buffer
2017  *
2018  * This api is for ipv4 packet.
2019  *
2020  * Return: true if packet is igmp packet
2021  *	   false otherwise.
2022  */
2023 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
2024 {
2025 	uint8_t pkt_type;
2026 
2027 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2028 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2029 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2030 		goto is_igmp;
2031 	}
2032 
2033 	if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
2034 		pkt_type = (uint8_t)(*(uint8_t *)(
2035 				data +
2036 				QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
2037 		goto is_igmp;
2038 	}
2039 
2040 	return false;
2041 is_igmp:
2042 	if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
2043 		return true;
2044 
2045 	return false;
2046 }
2047 
2048 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
2049 
2050 /**
2051  * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
2052  * @data: Pointer to network data buffer
2053  *
2054  * This api is for ipv6 packet.
2055  *
2056  * Return: true if packet is igmp packet
2057  *	   false otherwise.
2058  */
2059 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
2060 {
2061 	uint8_t pkt_type;
2062 	uint8_t next_hdr;
2063 
2064 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2065 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2066 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2067 		next_hdr = (uint8_t)(*(uint8_t *)(
2068 				data +
2069 				QDF_NBUF_TRAC_IPV6_OFFSET +
2070 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2071 		goto is_mld;
2072 	}
2073 
2074 	if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
2075 		pkt_type = (uint8_t)(*(uint8_t *)(
2076 				data +
2077 				QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
2078 		next_hdr = (uint8_t)(*(uint8_t *)(
2079 				data +
2080 				QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
2081 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2082 		goto is_mld;
2083 	}
2084 
2085 	return false;
2086 is_mld:
2087 	if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2088 		return true;
2089 	if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
2090 	    (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
2091 		return true;
2092 
2093 	return false;
2094 }
2095 
2096 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
2097 
2098 /**
2099  * __qdf_nbuf_is_ipv4_igmp_leave_pkt() - check if skb is a igmp leave packet
2100  * @data: Pointer to network buffer
2101  *
2102  * This api is for ipv4 packet.
2103  *
2104  * Return: true if packet is igmp packet
2105  *	   false otherwise.
2106  */
2107 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
2108 {
2109 	qdf_ether_header_t *eh = NULL;
2110 	uint16_t ether_type;
2111 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2112 
2113 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2114 	ether_type = eh->ether_type;
2115 
2116 	if (ether_type == htons(ETH_P_8021Q)) {
2117 		struct vlan_ethhdr *veth =
2118 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2119 		ether_type = veth->h_vlan_encapsulated_proto;
2120 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2121 	}
2122 
2123 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
2124 		struct iphdr *iph = NULL;
2125 		struct igmphdr *ih = NULL;
2126 
2127 		iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2128 		ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
2129 		switch (ih->type) {
2130 		case IGMP_HOST_LEAVE_MESSAGE:
2131 			return true;
2132 		case IGMPV3_HOST_MEMBERSHIP_REPORT:
2133 		{
2134 			struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
2135 			struct igmpv3_grec *grec = NULL;
2136 			int num = 0;
2137 			int i = 0;
2138 			int len = 0;
2139 			int type = 0;
2140 
2141 			num = ntohs(ihv3->ngrec);
2142 			for (i = 0; i < num; i++) {
2143 				grec = (void *)((uint8_t *)(ihv3->grec) + len);
2144 				type = grec->grec_type;
2145 				if ((type == IGMPV3_MODE_IS_INCLUDE) ||
2146 				    (type == IGMPV3_CHANGE_TO_INCLUDE))
2147 					return true;
2148 
2149 				len += sizeof(struct igmpv3_grec);
2150 				len += ntohs(grec->grec_nsrcs) * 4;
2151 			}
2152 			break;
2153 		}
2154 		default:
2155 			break;
2156 		}
2157 	}
2158 
2159 	return false;
2160 }
2161 
2162 qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
2163 
2164 /**
2165  * __qdf_nbuf_is_ipv6_igmp_leave_pkt() - check if skb is a igmp leave packet
2166  * @data: Pointer to network buffer
2167  *
2168  * This api is for ipv6 packet.
2169  *
2170  * Return: true if packet is igmp packet
2171  *	   false otherwise.
2172  */
2173 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
2174 {
2175 	qdf_ether_header_t *eh = NULL;
2176 	uint16_t ether_type;
2177 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2178 
2179 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2180 	ether_type = eh->ether_type;
2181 
2182 	if (ether_type == htons(ETH_P_8021Q)) {
2183 		struct vlan_ethhdr *veth =
2184 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2185 		ether_type = veth->h_vlan_encapsulated_proto;
2186 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2187 	}
2188 
2189 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
2190 		struct ipv6hdr *ip6h = NULL;
2191 		struct icmp6hdr *icmp6h = NULL;
2192 		uint8_t nexthdr;
2193 		uint16_t frag_off = 0;
2194 		int offset;
2195 		qdf_nbuf_t buf_copy = NULL;
2196 
2197 		ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2198 		if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
2199 		    ip6h->payload_len == 0)
2200 			return false;
2201 
2202 		buf_copy = qdf_nbuf_copy(buf);
2203 		if (qdf_likely(!buf_copy))
2204 			return false;
2205 
2206 		nexthdr = ip6h->nexthdr;
2207 		offset = ipv6_skip_exthdr(buf_copy,
2208 					  eth_hdr_size + sizeof(*ip6h),
2209 					  &nexthdr,
2210 					  &frag_off);
2211 		qdf_nbuf_free(buf_copy);
2212 		if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
2213 			return false;
2214 
2215 		icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
2216 
2217 		switch (icmp6h->icmp6_type) {
2218 		case ICMPV6_MGM_REDUCTION:
2219 			return true;
2220 		case ICMPV6_MLD2_REPORT:
2221 		{
2222 			struct mld2_report *mh = NULL;
2223 			struct mld2_grec *grec = NULL;
2224 			int num = 0;
2225 			int i = 0;
2226 			int len = 0;
2227 			int type = -1;
2228 
2229 			mh = (struct mld2_report *)icmp6h;
2230 			num = ntohs(mh->mld2r_ngrec);
2231 			for (i = 0; i < num; i++) {
2232 				grec = (void *)(((uint8_t *)mh->mld2r_grec) +
2233 						len);
2234 				type = grec->grec_type;
2235 				if ((type == MLD2_MODE_IS_INCLUDE) ||
2236 				    (type == MLD2_CHANGE_TO_INCLUDE))
2237 					return true;
2238 				else if (type == MLD2_BLOCK_OLD_SOURCES)
2239 					return true;
2240 
2241 				len += sizeof(struct mld2_grec);
2242 				len += ntohs(grec->grec_nsrcs) *
2243 						sizeof(struct in6_addr);
2244 			}
2245 			break;
2246 		}
2247 		default:
2248 			break;
2249 		}
2250 	}
2251 
2252 	return false;
2253 }
2254 
2255 qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
2256 
2257 /**
2258  * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
2259  * @skb: Pointer to network buffer
2260  *
2261  * This api is for ipv4 packet.
2262  *
2263  * Return: true if packet is tdls packet
2264  *	   false otherwise.
2265  */
2266 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
2267 {
2268 	uint16_t ether_type;
2269 
2270 	ether_type = *(uint16_t *)(skb->data +
2271 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
2272 
2273 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
2274 		return true;
2275 	else
2276 		return false;
2277 }
2278 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
2279 
2280 /**
2281  * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
2282  * @data: Pointer to network data buffer
2283  *
2284  * This api is for ipv4 packet.
2285  *
2286  * Return: true if packet is ARP packet
2287  *	   false otherwise.
2288  */
2289 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
2290 {
2291 	uint16_t ether_type;
2292 
2293 	ether_type = __qdf_nbuf_get_ether_type(data);
2294 
2295 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
2296 		return true;
2297 	else
2298 		return false;
2299 }
2300 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
2301 
2302 /**
2303  * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
2304  * @data: Pointer to network data buffer
2305  *
2306  * This api is for ipv4 packet.
2307  *
2308  * Return: true if packet is ARP request
2309  *	   false otherwise.
2310  */
2311 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
2312 {
2313 	uint16_t op_code;
2314 
2315 	op_code = (uint16_t)(*(uint16_t *)(data +
2316 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2317 
2318 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
2319 		return true;
2320 	return false;
2321 }
2322 
2323 /**
2324  * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
2325  * @data: Pointer to network data buffer
2326  *
2327  * This api is for ipv4 packet.
2328  *
2329  * Return: true if packet is ARP response
2330  *	   false otherwise.
2331  */
2332 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
2333 {
2334 	uint16_t op_code;
2335 
2336 	op_code = (uint16_t)(*(uint16_t *)(data +
2337 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2338 
2339 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
2340 		return true;
2341 	return false;
2342 }
2343 
2344 /**
2345  * __qdf_nbuf_data_get_arp_src_ip() - get arp src IP
2346  * @data: Pointer to network data buffer
2347  *
2348  * This api is for ipv4 packet.
2349  *
2350  * Return: ARP packet source IP value.
2351  */
2352 uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
2353 {
2354 	uint32_t src_ip;
2355 
2356 	src_ip = (uint32_t)(*(uint32_t *)(data +
2357 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
2358 
2359 	return src_ip;
2360 }
2361 
2362 /**
2363  * __qdf_nbuf_data_get_arp_tgt_ip() - get arp target IP
2364  * @data: Pointer to network data buffer
2365  *
2366  * This api is for ipv4 packet.
2367  *
2368  * Return: ARP packet target IP value.
2369  */
2370 uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
2371 {
2372 	uint32_t tgt_ip;
2373 
2374 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2375 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
2376 
2377 	return tgt_ip;
2378 }
2379 
2380 /**
2381  * __qdf_nbuf_get_dns_domain_name() - get dns domain name
2382  * @data: Pointer to network data buffer
2383  * @len: length to copy
2384  *
2385  * This api is for dns domain name
2386  *
2387  * Return: dns domain name.
2388  */
2389 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
2390 {
2391 	uint8_t *domain_name;
2392 
2393 	domain_name = (uint8_t *)
2394 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
2395 	return domain_name;
2396 }
2397 
2398 
2399 /**
2400  * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
2401  * @data: Pointer to network data buffer
2402  *
2403  * This api is for dns query packet.
2404  *
2405  * Return: true if packet is dns query packet.
2406  *	   false otherwise.
2407  */
2408 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
2409 {
2410 	uint16_t op_code;
2411 	uint16_t tgt_port;
2412 
2413 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2414 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
2415 	/* Standard DNS query always happen on Dest Port 53. */
2416 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2417 		op_code = (uint16_t)(*(uint16_t *)(data +
2418 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2419 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2420 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
2421 			return true;
2422 	}
2423 	return false;
2424 }
2425 
2426 /**
2427  * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
2428  * @data: Pointer to network data buffer
2429  *
2430  * This api is for dns query response.
2431  *
2432  * Return: true if packet is dns response packet.
2433  *	   false otherwise.
2434  */
2435 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
2436 {
2437 	uint16_t op_code;
2438 	uint16_t src_port;
2439 
2440 	src_port = (uint16_t)(*(uint16_t *)(data +
2441 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
2442 	/* Standard DNS response always comes on Src Port 53. */
2443 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2444 		op_code = (uint16_t)(*(uint16_t *)(data +
2445 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2446 
2447 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2448 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
2449 			return true;
2450 	}
2451 	return false;
2452 }
2453 
2454 /**
2455  * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
2456  * @data: Pointer to network data buffer
2457  *
2458  * This api is to check if the packet is tcp fin.
2459  *
2460  * Return: true if packet is tcp fin packet.
2461  *         false otherwise.
2462  */
2463 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
2464 {
2465 	uint8_t op_code;
2466 
2467 	op_code = (uint8_t)(*(uint8_t *)(data +
2468 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2469 
2470 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
2471 		return true;
2472 
2473 	return false;
2474 }
2475 
2476 /**
2477  * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
2478  * @data: Pointer to network data buffer
2479  *
2480  * This api is to check if the tcp packet is fin ack.
2481  *
2482  * Return: true if packet is tcp fin ack packet.
2483  *         false otherwise.
2484  */
2485 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
2486 {
2487 	uint8_t op_code;
2488 
2489 	op_code = (uint8_t)(*(uint8_t *)(data +
2490 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2491 
2492 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2493 		return true;
2494 
2495 	return false;
2496 }
2497 
2498 /**
2499  * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
2500  * @data: Pointer to network data buffer
2501  *
2502  * This api is for tcp syn packet.
2503  *
2504  * Return: true if packet is tcp syn packet.
2505  *	   false otherwise.
2506  */
2507 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2508 {
2509 	uint8_t op_code;
2510 
2511 	op_code = (uint8_t)(*(uint8_t *)(data +
2512 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2513 
2514 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2515 		return true;
2516 	return false;
2517 }
2518 
2519 /**
2520  * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
2521  * @data: Pointer to network data buffer
2522  *
2523  * This api is for tcp syn ack packet.
2524  *
2525  * Return: true if packet is tcp syn ack packet.
2526  *	   false otherwise.
2527  */
2528 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2529 {
2530 	uint8_t op_code;
2531 
2532 	op_code = (uint8_t)(*(uint8_t *)(data +
2533 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2534 
2535 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2536 		return true;
2537 	return false;
2538 }
2539 
2540 /**
2541  * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
2542  * @data: Pointer to network data buffer
2543  *
2544  * This api is to check if the tcp packet is rst.
2545  *
2546  * Return: true if packet is tcp rst packet.
2547  *         false otherwise.
2548  */
2549 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2550 {
2551 	uint8_t op_code;
2552 
2553 	op_code = (uint8_t)(*(uint8_t *)(data +
2554 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2555 
2556 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2557 		return true;
2558 
2559 	return false;
2560 }
2561 
2562 /**
2563  * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
2564  * @data: Pointer to network data buffer
2565  *
2566  * This api is for tcp ack packet.
2567  *
2568  * Return: true if packet is tcp ack packet.
2569  *	   false otherwise.
2570  */
2571 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2572 {
2573 	uint8_t op_code;
2574 
2575 	op_code = (uint8_t)(*(uint8_t *)(data +
2576 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2577 
2578 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2579 		return true;
2580 	return false;
2581 }
2582 
2583 /**
2584  * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
2585  * @data: Pointer to network data buffer
2586  *
2587  * This api is for tcp packet.
2588  *
2589  * Return: tcp source port value.
2590  */
2591 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2592 {
2593 	uint16_t src_port;
2594 
2595 	src_port = (uint16_t)(*(uint16_t *)(data +
2596 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2597 
2598 	return src_port;
2599 }
2600 
2601 /**
2602  * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
2603  * @data: Pointer to network data buffer
2604  *
2605  * This api is for tcp packet.
2606  *
2607  * Return: tcp destination port value.
2608  */
2609 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2610 {
2611 	uint16_t tgt_port;
2612 
2613 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2614 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2615 
2616 	return tgt_port;
2617 }
2618 
2619 /**
2620  * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
2621  * @data: Pointer to network data buffer
2622  *
2623  * This api is for ipv4 req packet.
2624  *
2625  * Return: true if packet is icmpv4 request
2626  *	   false otherwise.
2627  */
2628 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2629 {
2630 	uint8_t op_code;
2631 
2632 	op_code = (uint8_t)(*(uint8_t *)(data +
2633 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2634 
2635 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2636 		return true;
2637 	return false;
2638 }
2639 
2640 /**
2641  * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
2642  * @data: Pointer to network data buffer
2643  *
2644  * This api is for ipv4 res packet.
2645  *
2646  * Return: true if packet is icmpv4 response
2647  *	   false otherwise.
2648  */
2649 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2650 {
2651 	uint8_t op_code;
2652 
2653 	op_code = (uint8_t)(*(uint8_t *)(data +
2654 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2655 
2656 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2657 		return true;
2658 	return false;
2659 }
2660 
2661 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2662 {
2663 	uint8_t op_code;
2664 
2665 	op_code = (uint8_t)(*(uint8_t *)(data +
2666 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2667 
2668 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2669 		return true;
2670 	return false;
2671 }
2672 
2673 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2674 
2675 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2676 {
2677 	uint8_t subtype;
2678 
2679 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2680 
2681 	if (subtype == ICMPV6_REDIRECT)
2682 		return true;
2683 	return false;
2684 }
2685 
2686 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2687 
2688 /**
2689  * __qdf_nbuf_data_get_icmpv4_src_ip() - get icmpv4 src IP
2690  * @data: Pointer to network data buffer
2691  *
2692  * This api is for ipv4 packet.
2693  *
2694  * Return: icmpv4 packet source IP value.
2695  */
2696 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2697 {
2698 	uint32_t src_ip;
2699 
2700 	src_ip = (uint32_t)(*(uint32_t *)(data +
2701 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2702 
2703 	return src_ip;
2704 }
2705 
2706 /**
2707  * __qdf_nbuf_data_get_icmpv4_tgt_ip() - get icmpv4 target IP
2708  * @data: Pointer to network data buffer
2709  *
2710  * This api is for ipv4 packet.
2711  *
2712  * Return: icmpv4 packet target IP value.
2713  */
2714 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2715 {
2716 	uint32_t tgt_ip;
2717 
2718 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2719 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2720 
2721 	return tgt_ip;
2722 }
2723 
2724 
2725 /**
2726  * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
2727  * @data: Pointer to IPV6 packet data buffer
2728  *
2729  * This func. checks whether it is a IPV6 packet or not.
2730  *
2731  * Return: TRUE if it is a IPV6 packet
2732  *         FALSE if not
2733  */
2734 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2735 {
2736 	uint16_t ether_type;
2737 
2738 	ether_type = (uint16_t)(*(uint16_t *)(data +
2739 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2740 
2741 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2742 		return true;
2743 	else
2744 		return false;
2745 }
2746 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2747 
2748 /**
2749  * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
2750  * @data: Pointer to network data buffer
2751  *
2752  * This api is for ipv6 packet.
2753  *
2754  * Return: true if packet is DHCP packet
2755  *	   false otherwise
2756  */
2757 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2758 {
2759 	uint16_t sport;
2760 	uint16_t dport;
2761 	uint8_t ipv6_offset;
2762 
2763 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2764 		return false;
2765 
2766 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2767 	sport = *(uint16_t *)(data + ipv6_offset +
2768 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2769 	dport = *(uint16_t *)(data + ipv6_offset +
2770 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2771 			      sizeof(uint16_t));
2772 
2773 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2774 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2775 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2776 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2777 		return true;
2778 	else
2779 		return false;
2780 }
2781 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2782 
2783 /**
2784  * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
2785  * @data: Pointer to network data buffer
2786  *
2787  * This api is for ipv6 packet.
2788  *
2789  * Return: true if packet is MDNS packet
2790  *	   false otherwise
2791  */
2792 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2793 {
2794 	uint16_t sport;
2795 	uint16_t dport;
2796 
2797 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2798 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2799 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2800 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2801 					sizeof(uint16_t));
2802 
2803 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2804 	    dport == sport)
2805 		return true;
2806 	else
2807 		return false;
2808 }
2809 
2810 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2811 
2812 /**
2813  * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
2814  * @data: Pointer to IPV4 packet data buffer
2815  *
2816  * This func. checks whether it is a IPV4 multicast packet or not.
2817  *
2818  * Return: TRUE if it is a IPV4 multicast packet
2819  *         FALSE if not
2820  */
2821 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2822 {
2823 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2824 		uint32_t *dst_addr =
2825 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2826 
2827 		/*
2828 		 * Check first word of the IPV4 address and if it is
2829 		 * equal to 0xE then it represents multicast IP.
2830 		 */
2831 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2832 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2833 			return true;
2834 		else
2835 			return false;
2836 	} else
2837 		return false;
2838 }
2839 
2840 /**
2841  * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
2842  * @data: Pointer to IPV6 packet data buffer
2843  *
2844  * This func. checks whether it is a IPV6 multicast packet or not.
2845  *
2846  * Return: TRUE if it is a IPV6 multicast packet
2847  *         FALSE if not
2848  */
2849 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2850 {
2851 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2852 		uint16_t *dst_addr;
2853 
2854 		dst_addr = (uint16_t *)
2855 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2856 
2857 		/*
2858 		 * Check first byte of the IP address and if it
2859 		 * 0xFF00 then it is a IPV6 mcast packet.
2860 		 */
2861 		if (*dst_addr ==
2862 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2863 			return true;
2864 		else
2865 			return false;
2866 	} else
2867 		return false;
2868 }
2869 
2870 /**
2871  * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
2872  * @data: Pointer to IPV4 ICMP packet data buffer
2873  *
2874  * This func. checks whether it is a ICMP packet or not.
2875  *
2876  * Return: TRUE if it is a ICMP packet
2877  *         FALSE if not
2878  */
2879 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2880 {
2881 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2882 		uint8_t pkt_type;
2883 
2884 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2885 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2886 
2887 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2888 			return true;
2889 		else
2890 			return false;
2891 	} else
2892 		return false;
2893 }
2894 
2895 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2896 
2897 /**
2898  * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
2899  * @data: Pointer to IPV6 ICMPV6 packet data buffer
2900  *
2901  * This func. checks whether it is a ICMPV6 packet or not.
2902  *
2903  * Return: TRUE if it is a ICMPV6 packet
2904  *         FALSE if not
2905  */
2906 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2907 {
2908 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2909 		uint8_t pkt_type;
2910 
2911 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2912 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2913 
2914 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2915 			return true;
2916 		else
2917 			return false;
2918 	} else
2919 		return false;
2920 }
2921 
2922 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2923 
2924 /**
2925  * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
2926  * @data: Pointer to IPV4 UDP packet data buffer
2927  *
2928  * This func. checks whether it is a IPV4 UDP packet or not.
2929  *
2930  * Return: TRUE if it is a IPV4 UDP packet
2931  *         FALSE if not
2932  */
2933 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2934 {
2935 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2936 		uint8_t pkt_type;
2937 
2938 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2939 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2940 
2941 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2942 			return true;
2943 		else
2944 			return false;
2945 	} else
2946 		return false;
2947 }
2948 
2949 /**
2950  * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
2951  * @data: Pointer to IPV4 TCP packet data buffer
2952  *
2953  * This func. checks whether it is a IPV4 TCP packet or not.
2954  *
2955  * Return: TRUE if it is a IPV4 TCP packet
2956  *         FALSE if not
2957  */
2958 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2959 {
2960 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2961 		uint8_t pkt_type;
2962 
2963 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2964 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2965 
2966 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2967 			return true;
2968 		else
2969 			return false;
2970 	} else
2971 		return false;
2972 }
2973 
2974 /**
2975  * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
2976  * @data: Pointer to IPV6 UDP packet data buffer
2977  *
2978  * This func. checks whether it is a IPV6 UDP packet or not.
2979  *
2980  * Return: TRUE if it is a IPV6 UDP packet
2981  *         FALSE if not
2982  */
2983 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2984 {
2985 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2986 		uint8_t pkt_type;
2987 
2988 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2989 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2990 
2991 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2992 			return true;
2993 		else
2994 			return false;
2995 	} else
2996 		return false;
2997 }
2998 
2999 /**
3000  * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
3001  * @data: Pointer to IPV6 TCP packet data buffer
3002  *
3003  * This func. checks whether it is a IPV6 TCP packet or not.
3004  *
3005  * Return: TRUE if it is a IPV6 TCP packet
3006  *         FALSE if not
3007  */
3008 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
3009 {
3010 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
3011 		uint8_t pkt_type;
3012 
3013 		pkt_type = (uint8_t)(*(uint8_t *)(data +
3014 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
3015 
3016 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
3017 			return true;
3018 		else
3019 			return false;
3020 	} else
3021 		return false;
3022 }
3023 
3024 /**
3025  * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
3026  * @nbuf - sk buff
3027  *
3028  * Return: true if packet is broadcast
3029  *	   false otherwise
3030  */
3031 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
3032 {
3033 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
3034 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
3035 }
3036 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
3037 
3038 /**
3039  * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
3040  * @nbuf - sk buff
3041  *
3042  * Return: true if packet is multicast replay
3043  *	   false otherwise
3044  */
3045 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
3046 {
3047 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
3048 
3049 	if (unlikely(nbuf->pkt_type == PACKET_MULTICAST)) {
3050 		if (unlikely(ether_addr_equal(eh->h_source,
3051 					      nbuf->dev->dev_addr)))
3052 			return true;
3053 	}
3054 	return false;
3055 }
3056 
3057 /**
3058  * __qdf_nbuf_is_arp_local() - check if local or non local arp
3059  * @skb: pointer to sk_buff
3060  *
3061  * Return: true if local arp or false otherwise.
3062  */
3063 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
3064 {
3065 	struct arphdr *arp;
3066 	struct in_ifaddr **ifap = NULL;
3067 	struct in_ifaddr *ifa = NULL;
3068 	struct in_device *in_dev;
3069 	unsigned char *arp_ptr;
3070 	__be32 tip;
3071 
3072 	arp = (struct arphdr *)skb->data;
3073 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
3074 		/* if fail to acquire rtnl lock, assume it's local arp */
3075 		if (!rtnl_trylock())
3076 			return true;
3077 
3078 		in_dev = __in_dev_get_rtnl(skb->dev);
3079 		if (in_dev) {
3080 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
3081 				ifap = &ifa->ifa_next) {
3082 				if (!strcmp(skb->dev->name, ifa->ifa_label))
3083 					break;
3084 			}
3085 		}
3086 
3087 		if (ifa && ifa->ifa_local) {
3088 			arp_ptr = (unsigned char *)(arp + 1);
3089 			arp_ptr += (skb->dev->addr_len + 4 +
3090 					skb->dev->addr_len);
3091 			memcpy(&tip, arp_ptr, 4);
3092 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
3093 				  ifa->ifa_local, tip);
3094 			if (ifa->ifa_local == tip) {
3095 				rtnl_unlock();
3096 				return true;
3097 			}
3098 		}
3099 		rtnl_unlock();
3100 	}
3101 
3102 	return false;
3103 }
3104 
3105 /**
3106  * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
3107  * @data: pointer to data of network buffer
3108  * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
3109  *
3110  * Return: TCP header length in unit of byte
3111  */
3112 static inline
3113 uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
3114 					uint8_t tcp_hdr_len_offset)
3115 {
3116 	uint8_t tcp_hdr_len;
3117 
3118 	tcp_hdr_len =
3119 		*((uint8_t *)(data + tcp_hdr_len_offset));
3120 
3121 	tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
3122 		       QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
3123 		       QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
3124 
3125 	return tcp_hdr_len;
3126 }
3127 
3128 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
3129 {
3130 	bool is_tcp_ack = false;
3131 	uint8_t op_code, tcp_hdr_len;
3132 	uint16_t ip_payload_len;
3133 	uint8_t *data = skb->data;
3134 
3135 	/*
3136 	 * If packet length > TCP ACK max length or it's nonlinearized,
3137 	 * then it must not be TCP ACK.
3138 	 */
3139 	if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
3140 	    qdf_nbuf_is_nonlinear(skb))
3141 		return false;
3142 
3143 	if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
3144 		ip_payload_len =
3145 			QDF_SWAP_U16(*((uint16_t *)(data +
3146 				     QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
3147 					- QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
3148 
3149 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
3150 					data,
3151 					QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
3152 
3153 		op_code = (uint8_t)(*(uint8_t *)(data +
3154 				QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
3155 
3156 		if (ip_payload_len == tcp_hdr_len &&
3157 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
3158 			is_tcp_ack = true;
3159 
3160 	} else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
3161 		ip_payload_len =
3162 			QDF_SWAP_U16(*((uint16_t *)(data +
3163 				QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
3164 
3165 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
3166 					data,
3167 					QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
3168 		op_code = (uint8_t)(*(uint8_t *)(data +
3169 				QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
3170 
3171 		if (ip_payload_len == tcp_hdr_len &&
3172 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
3173 			is_tcp_ack = true;
3174 	}
3175 
3176 	return is_tcp_ack;
3177 }
3178 
3179 #ifdef NBUF_MEMORY_DEBUG
3180 
3181 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
3182 
3183 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
3184 static struct kmem_cache *nbuf_tracking_cache;
3185 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
3186 static spinlock_t qdf_net_buf_track_free_list_lock;
3187 static uint32_t qdf_net_buf_track_free_list_count;
3188 static uint32_t qdf_net_buf_track_used_list_count;
3189 static uint32_t qdf_net_buf_track_max_used;
3190 static uint32_t qdf_net_buf_track_max_free;
3191 static uint32_t qdf_net_buf_track_max_allocated;
3192 static uint32_t qdf_net_buf_track_fail_count;
3193 
3194 /**
3195  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
3196  *
3197  * tracks the max number of network buffers that the wlan driver was tracking
3198  * at any one time.
3199  *
3200  * Return: none
3201  */
3202 static inline void update_max_used(void)
3203 {
3204 	int sum;
3205 
3206 	if (qdf_net_buf_track_max_used <
3207 	    qdf_net_buf_track_used_list_count)
3208 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
3209 	sum = qdf_net_buf_track_free_list_count +
3210 		qdf_net_buf_track_used_list_count;
3211 	if (qdf_net_buf_track_max_allocated < sum)
3212 		qdf_net_buf_track_max_allocated = sum;
3213 }
3214 
3215 /**
3216  * update_max_free() - update qdf_net_buf_track_free_list_count
3217  *
3218  * tracks the max number tracking buffers kept in the freelist.
3219  *
3220  * Return: none
3221  */
3222 static inline void update_max_free(void)
3223 {
3224 	if (qdf_net_buf_track_max_free <
3225 	    qdf_net_buf_track_free_list_count)
3226 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
3227 }
3228 
3229 /**
3230  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
3231  *
3232  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
3233  * This function also ads fexibility to adjust the allocation and freelist
3234  * scheems.
3235  *
3236  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
3237  */
3238 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
3239 {
3240 	int flags = GFP_KERNEL;
3241 	unsigned long irq_flag;
3242 	QDF_NBUF_TRACK *new_node = NULL;
3243 
3244 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3245 	qdf_net_buf_track_used_list_count++;
3246 	if (qdf_net_buf_track_free_list) {
3247 		new_node = qdf_net_buf_track_free_list;
3248 		qdf_net_buf_track_free_list =
3249 			qdf_net_buf_track_free_list->p_next;
3250 		qdf_net_buf_track_free_list_count--;
3251 	}
3252 	update_max_used();
3253 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3254 
3255 	if (new_node)
3256 		return new_node;
3257 
3258 	if (in_interrupt() || irqs_disabled() || in_atomic())
3259 		flags = GFP_ATOMIC;
3260 
3261 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
3262 }
3263 
3264 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
3265 #define FREEQ_POOLSIZE 2048
3266 
3267 /**
3268  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
3269  *
3270  * Matches calls to qdf_nbuf_track_alloc.
3271  * Either frees the tracking cookie to kernel or an internal
3272  * freelist based on the size of the freelist.
3273  *
3274  * Return: none
3275  */
3276 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
3277 {
3278 	unsigned long irq_flag;
3279 
3280 	if (!node)
3281 		return;
3282 
3283 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
3284 	 * only shrink the freelist if it is bigger than twice the number of
3285 	 * nbufs in use. If the driver is stalling in a consistent bursty
3286 	 * fashion, this will keep 3/4 of thee allocations from the free list
3287 	 * while also allowing the system to recover memory as less frantic
3288 	 * traffic occurs.
3289 	 */
3290 
3291 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3292 
3293 	qdf_net_buf_track_used_list_count--;
3294 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
3295 	   (qdf_net_buf_track_free_list_count >
3296 	    qdf_net_buf_track_used_list_count << 1)) {
3297 		kmem_cache_free(nbuf_tracking_cache, node);
3298 	} else {
3299 		node->p_next = qdf_net_buf_track_free_list;
3300 		qdf_net_buf_track_free_list = node;
3301 		qdf_net_buf_track_free_list_count++;
3302 	}
3303 	update_max_free();
3304 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3305 }
3306 
3307 /**
3308  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
3309  *
3310  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
3311  * the freelist first makes it performant for the first iperf udp burst
3312  * as well as steady state.
3313  *
3314  * Return: None
3315  */
3316 static void qdf_nbuf_track_prefill(void)
3317 {
3318 	int i;
3319 	QDF_NBUF_TRACK *node, *head;
3320 
3321 	/* prepopulate the freelist */
3322 	head = NULL;
3323 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
3324 		node = qdf_nbuf_track_alloc();
3325 		if (!node)
3326 			continue;
3327 		node->p_next = head;
3328 		head = node;
3329 	}
3330 	while (head) {
3331 		node = head->p_next;
3332 		qdf_nbuf_track_free(head);
3333 		head = node;
3334 	}
3335 
3336 	/* prefilled buffers should not count as used */
3337 	qdf_net_buf_track_max_used = 0;
3338 }
3339 
3340 /**
3341  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
3342  *
3343  * This initializes the memory manager for the nbuf tracking cookies.  Because
3344  * these cookies are all the same size and only used in this feature, we can
3345  * use a kmem_cache to provide tracking as well as to speed up allocations.
3346  * To avoid the overhead of allocating and freeing the buffers (including SLUB
3347  * features) a freelist is prepopulated here.
3348  *
3349  * Return: None
3350  */
3351 static void qdf_nbuf_track_memory_manager_create(void)
3352 {
3353 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
3354 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
3355 						sizeof(QDF_NBUF_TRACK),
3356 						0, 0, NULL);
3357 
3358 	qdf_nbuf_track_prefill();
3359 }
3360 
3361 /**
3362  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
3363  *
3364  * Empty the freelist and print out usage statistics when it is no longer
3365  * needed. Also the kmem_cache should be destroyed here so that it can warn if
3366  * any nbuf tracking cookies were leaked.
3367  *
3368  * Return: None
3369  */
3370 static void qdf_nbuf_track_memory_manager_destroy(void)
3371 {
3372 	QDF_NBUF_TRACK *node, *tmp;
3373 	unsigned long irq_flag;
3374 
3375 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3376 	node = qdf_net_buf_track_free_list;
3377 
3378 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
3379 		qdf_print("%s: unexpectedly large max_used count %d",
3380 			  __func__, qdf_net_buf_track_max_used);
3381 
3382 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
3383 		qdf_print("%s: %d unused trackers were allocated",
3384 			  __func__,
3385 			  qdf_net_buf_track_max_allocated -
3386 			  qdf_net_buf_track_max_used);
3387 
3388 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
3389 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
3390 		qdf_print("%s: check freelist shrinking functionality",
3391 			  __func__);
3392 
3393 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3394 		  "%s: %d residual freelist size",
3395 		  __func__, qdf_net_buf_track_free_list_count);
3396 
3397 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3398 		  "%s: %d max freelist size observed",
3399 		  __func__, qdf_net_buf_track_max_free);
3400 
3401 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3402 		  "%s: %d max buffers used observed",
3403 		  __func__, qdf_net_buf_track_max_used);
3404 
3405 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3406 		  "%s: %d max buffers allocated observed",
3407 		  __func__, qdf_net_buf_track_max_allocated);
3408 
3409 	while (node) {
3410 		tmp = node;
3411 		node = node->p_next;
3412 		kmem_cache_free(nbuf_tracking_cache, tmp);
3413 		qdf_net_buf_track_free_list_count--;
3414 	}
3415 
3416 	if (qdf_net_buf_track_free_list_count != 0)
3417 		qdf_info("%d unfreed tracking memory lost in freelist",
3418 			 qdf_net_buf_track_free_list_count);
3419 
3420 	if (qdf_net_buf_track_used_list_count != 0)
3421 		qdf_info("%d unfreed tracking memory still in use",
3422 			 qdf_net_buf_track_used_list_count);
3423 
3424 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3425 	kmem_cache_destroy(nbuf_tracking_cache);
3426 	qdf_net_buf_track_free_list = NULL;
3427 }
3428 
3429 /**
3430  * qdf_net_buf_debug_init() - initialize network buffer debug functionality
3431  *
3432  * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
3433  * in a hash table and when driver is unloaded it reports about leaked SKBs.
3434  * WLAN driver module whose allocated SKB is freed by network stack are
3435  * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
3436  * reported as memory leak.
3437  *
3438  * Return: none
3439  */
3440 void qdf_net_buf_debug_init(void)
3441 {
3442 	uint32_t i;
3443 
3444 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
3445 
3446 	if (is_initial_mem_debug_disabled)
3447 		return;
3448 
3449 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
3450 
3451 	qdf_nbuf_map_tracking_init();
3452 	qdf_nbuf_smmu_map_tracking_init();
3453 	qdf_nbuf_track_memory_manager_create();
3454 
3455 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3456 		gp_qdf_net_buf_track_tbl[i] = NULL;
3457 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
3458 	}
3459 }
3460 qdf_export_symbol(qdf_net_buf_debug_init);
3461 
3462 /**
3463  * qdf_net_buf_debug_init() - exit network buffer debug functionality
3464  *
3465  * Exit network buffer tracking debug functionality and log SKB memory leaks
3466  * As part of exiting the functionality, free the leaked memory and
3467  * cleanup the tracking buffers.
3468  *
3469  * Return: none
3470  */
3471 void qdf_net_buf_debug_exit(void)
3472 {
3473 	uint32_t i;
3474 	uint32_t count = 0;
3475 	unsigned long irq_flag;
3476 	QDF_NBUF_TRACK *p_node;
3477 	QDF_NBUF_TRACK *p_prev;
3478 
3479 	if (is_initial_mem_debug_disabled)
3480 		return;
3481 
3482 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3483 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3484 		p_node = gp_qdf_net_buf_track_tbl[i];
3485 		while (p_node) {
3486 			p_prev = p_node;
3487 			p_node = p_node->p_next;
3488 			count++;
3489 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
3490 				 p_prev->func_name, p_prev->line_num,
3491 				 p_prev->size, p_prev->net_buf);
3492 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
3493 				 p_prev->map_func_name,
3494 				 p_prev->map_line_num,
3495 				 p_prev->unmap_func_name,
3496 				 p_prev->unmap_line_num,
3497 				 p_prev->is_nbuf_mapped);
3498 			qdf_nbuf_track_free(p_prev);
3499 		}
3500 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3501 	}
3502 
3503 	qdf_nbuf_track_memory_manager_destroy();
3504 	qdf_nbuf_map_tracking_deinit();
3505 	qdf_nbuf_smmu_map_tracking_deinit();
3506 
3507 #ifdef CONFIG_HALT_KMEMLEAK
3508 	if (count) {
3509 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
3510 		QDF_BUG(0);
3511 	}
3512 #endif
3513 }
3514 qdf_export_symbol(qdf_net_buf_debug_exit);
3515 
3516 /**
3517  * qdf_net_buf_debug_hash() - hash network buffer pointer
3518  *
3519  * Return: hash value
3520  */
3521 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
3522 {
3523 	uint32_t i;
3524 
3525 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
3526 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
3527 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
3528 
3529 	return i;
3530 }
3531 
3532 /**
3533  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
3534  *
3535  * Return: If skb is found in hash table then return pointer to network buffer
3536  *	else return %NULL
3537  */
3538 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
3539 {
3540 	uint32_t i;
3541 	QDF_NBUF_TRACK *p_node;
3542 
3543 	i = qdf_net_buf_debug_hash(net_buf);
3544 	p_node = gp_qdf_net_buf_track_tbl[i];
3545 
3546 	while (p_node) {
3547 		if (p_node->net_buf == net_buf)
3548 			return p_node;
3549 		p_node = p_node->p_next;
3550 	}
3551 
3552 	return NULL;
3553 }
3554 
3555 /**
3556  * qdf_net_buf_debug_add_node() - store skb in debug hash table
3557  *
3558  * Return: none
3559  */
3560 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
3561 				const char *func_name, uint32_t line_num)
3562 {
3563 	uint32_t i;
3564 	unsigned long irq_flag;
3565 	QDF_NBUF_TRACK *p_node;
3566 	QDF_NBUF_TRACK *new_node;
3567 
3568 	if (is_initial_mem_debug_disabled)
3569 		return;
3570 
3571 	new_node = qdf_nbuf_track_alloc();
3572 
3573 	i = qdf_net_buf_debug_hash(net_buf);
3574 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3575 
3576 	p_node = qdf_net_buf_debug_look_up(net_buf);
3577 
3578 	if (p_node) {
3579 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3580 			  p_node->net_buf, p_node->func_name, p_node->line_num,
3581 			  net_buf, func_name, line_num);
3582 		qdf_nbuf_track_free(new_node);
3583 	} else {
3584 		p_node = new_node;
3585 		if (p_node) {
3586 			p_node->net_buf = net_buf;
3587 			qdf_str_lcopy(p_node->func_name, func_name,
3588 				      QDF_MEM_FUNC_NAME_SIZE);
3589 			p_node->line_num = line_num;
3590 			p_node->is_nbuf_mapped = false;
3591 			p_node->map_line_num = 0;
3592 			p_node->unmap_line_num = 0;
3593 			p_node->map_func_name[0] = '\0';
3594 			p_node->unmap_func_name[0] = '\0';
3595 			p_node->size = size;
3596 			p_node->time = qdf_get_log_timestamp();
3597 			qdf_net_buf_update_smmu_params(p_node);
3598 			qdf_mem_skb_inc(size);
3599 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3600 			gp_qdf_net_buf_track_tbl[i] = p_node;
3601 		} else {
3602 			qdf_net_buf_track_fail_count++;
3603 			qdf_print(
3604 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3605 				  func_name, line_num, size);
3606 		}
3607 	}
3608 
3609 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3610 }
3611 qdf_export_symbol(qdf_net_buf_debug_add_node);
3612 
3613 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3614 				   uint32_t line_num)
3615 {
3616 	uint32_t i;
3617 	unsigned long irq_flag;
3618 	QDF_NBUF_TRACK *p_node;
3619 
3620 	if (is_initial_mem_debug_disabled)
3621 		return;
3622 
3623 	i = qdf_net_buf_debug_hash(net_buf);
3624 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3625 
3626 	p_node = qdf_net_buf_debug_look_up(net_buf);
3627 
3628 	if (p_node) {
3629 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3630 			      QDF_MEM_FUNC_NAME_SIZE);
3631 		p_node->line_num = line_num;
3632 	}
3633 
3634 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3635 }
3636 
3637 qdf_export_symbol(qdf_net_buf_debug_update_node);
3638 
3639 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3640 				       const char *func_name,
3641 				       uint32_t line_num)
3642 {
3643 	uint32_t i;
3644 	unsigned long irq_flag;
3645 	QDF_NBUF_TRACK *p_node;
3646 
3647 	if (is_initial_mem_debug_disabled)
3648 		return;
3649 
3650 	i = qdf_net_buf_debug_hash(net_buf);
3651 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3652 
3653 	p_node = qdf_net_buf_debug_look_up(net_buf);
3654 
3655 	if (p_node) {
3656 		qdf_str_lcopy(p_node->map_func_name, func_name,
3657 			      QDF_MEM_FUNC_NAME_SIZE);
3658 		p_node->map_line_num = line_num;
3659 		p_node->is_nbuf_mapped = true;
3660 	}
3661 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3662 }
3663 
3664 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
3665 void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
3666 					    unsigned long iova,
3667 					    unsigned long pa,
3668 					    const char *func,
3669 					    uint32_t line)
3670 {
3671 	uint32_t i;
3672 	unsigned long irq_flag;
3673 	QDF_NBUF_TRACK *p_node;
3674 
3675 	if (is_initial_mem_debug_disabled)
3676 		return;
3677 
3678 	i = qdf_net_buf_debug_hash(nbuf);
3679 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3680 
3681 	p_node = qdf_net_buf_debug_look_up(nbuf);
3682 
3683 	if (p_node) {
3684 		qdf_str_lcopy(p_node->smmu_map_func_name, func,
3685 			      QDF_MEM_FUNC_NAME_SIZE);
3686 		p_node->smmu_map_line_num = line;
3687 		p_node->is_nbuf_smmu_mapped = true;
3688 		p_node->smmu_map_iova_addr = iova;
3689 		p_node->smmu_map_pa_addr = pa;
3690 	}
3691 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3692 }
3693 
3694 void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
3695 					      unsigned long iova,
3696 					      unsigned long pa,
3697 					      const char *func,
3698 					      uint32_t line)
3699 {
3700 	uint32_t i;
3701 	unsigned long irq_flag;
3702 	QDF_NBUF_TRACK *p_node;
3703 
3704 	if (is_initial_mem_debug_disabled)
3705 		return;
3706 
3707 	i = qdf_net_buf_debug_hash(nbuf);
3708 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3709 
3710 	p_node = qdf_net_buf_debug_look_up(nbuf);
3711 
3712 	if (p_node) {
3713 		qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
3714 			      QDF_MEM_FUNC_NAME_SIZE);
3715 		p_node->smmu_unmap_line_num = line;
3716 		p_node->is_nbuf_smmu_mapped = false;
3717 		p_node->smmu_unmap_iova_addr = iova;
3718 		p_node->smmu_unmap_pa_addr = pa;
3719 	}
3720 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3721 }
3722 #endif
3723 
3724 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3725 					 const char *func_name,
3726 					 uint32_t line_num)
3727 {
3728 	uint32_t i;
3729 	unsigned long irq_flag;
3730 	QDF_NBUF_TRACK *p_node;
3731 
3732 	if (is_initial_mem_debug_disabled)
3733 		return;
3734 
3735 	i = qdf_net_buf_debug_hash(net_buf);
3736 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3737 
3738 	p_node = qdf_net_buf_debug_look_up(net_buf);
3739 
3740 	if (p_node) {
3741 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3742 			      QDF_MEM_FUNC_NAME_SIZE);
3743 		p_node->unmap_line_num = line_num;
3744 		p_node->is_nbuf_mapped = false;
3745 	}
3746 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3747 }
3748 
3749 /**
3750  * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
3751  *
3752  * Return: none
3753  */
3754 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3755 {
3756 	uint32_t i;
3757 	QDF_NBUF_TRACK *p_head;
3758 	QDF_NBUF_TRACK *p_node = NULL;
3759 	unsigned long irq_flag;
3760 	QDF_NBUF_TRACK *p_prev;
3761 
3762 	if (is_initial_mem_debug_disabled)
3763 		return;
3764 
3765 	i = qdf_net_buf_debug_hash(net_buf);
3766 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3767 
3768 	p_head = gp_qdf_net_buf_track_tbl[i];
3769 
3770 	/* Unallocated SKB */
3771 	if (!p_head)
3772 		goto done;
3773 
3774 	p_node = p_head;
3775 	/* Found at head of the table */
3776 	if (p_head->net_buf == net_buf) {
3777 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3778 		goto done;
3779 	}
3780 
3781 	/* Search in collision list */
3782 	while (p_node) {
3783 		p_prev = p_node;
3784 		p_node = p_node->p_next;
3785 		if ((p_node) && (p_node->net_buf == net_buf)) {
3786 			p_prev->p_next = p_node->p_next;
3787 			break;
3788 		}
3789 	}
3790 
3791 done:
3792 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3793 
3794 	if (p_node) {
3795 		qdf_mem_skb_dec(p_node->size);
3796 		qdf_nbuf_track_free(p_node);
3797 	} else {
3798 		if (qdf_net_buf_track_fail_count) {
3799 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3800 				  net_buf, qdf_net_buf_track_fail_count);
3801 		} else
3802 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3803 					   net_buf);
3804 	}
3805 }
3806 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3807 
3808 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3809 				   const char *func_name, uint32_t line_num)
3810 {
3811 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3812 
3813 	if (is_initial_mem_debug_disabled)
3814 		return;
3815 
3816 	while (ext_list) {
3817 		/*
3818 		 * Take care to add if it is Jumbo packet connected using
3819 		 * frag_list
3820 		 */
3821 		qdf_nbuf_t next;
3822 
3823 		next = qdf_nbuf_queue_next(ext_list);
3824 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3825 		ext_list = next;
3826 	}
3827 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3828 }
3829 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3830 
3831 /**
3832  * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
3833  * @net_buf: Network buf holding head segment (single)
3834  *
3835  * WLAN driver module whose allocated SKB is freed by network stack are
3836  * suppose to call this API before returning SKB to network stack such
3837  * that the SKB is not reported as memory leak.
3838  *
3839  * Return: none
3840  */
3841 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3842 {
3843 	qdf_nbuf_t ext_list;
3844 
3845 	if (is_initial_mem_debug_disabled)
3846 		return;
3847 
3848 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3849 	while (ext_list) {
3850 		/*
3851 		 * Take care to free if it is Jumbo packet connected using
3852 		 * frag_list
3853 		 */
3854 		qdf_nbuf_t next;
3855 
3856 		next = qdf_nbuf_queue_next(ext_list);
3857 
3858 		if (qdf_nbuf_get_users(ext_list) > 1) {
3859 			ext_list = next;
3860 			continue;
3861 		}
3862 
3863 		qdf_net_buf_debug_delete_node(ext_list);
3864 		ext_list = next;
3865 	}
3866 
3867 	if (qdf_nbuf_get_users(net_buf) > 1)
3868 		return;
3869 
3870 	qdf_net_buf_debug_delete_node(net_buf);
3871 }
3872 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3873 
3874 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3875 				int reserve, int align, int prio,
3876 				const char *func, uint32_t line)
3877 {
3878 	qdf_nbuf_t nbuf;
3879 
3880 	if (is_initial_mem_debug_disabled)
3881 		return __qdf_nbuf_alloc(osdev, size,
3882 					reserve, align,
3883 					prio, func, line);
3884 
3885 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3886 
3887 	/* Store SKB in internal QDF tracking table */
3888 	if (qdf_likely(nbuf)) {
3889 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3890 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3891 	} else {
3892 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3893 	}
3894 
3895 	return nbuf;
3896 }
3897 qdf_export_symbol(qdf_nbuf_alloc_debug);
3898 
3899 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3900 					    const char *func, uint32_t line)
3901 {
3902 	qdf_nbuf_t nbuf;
3903 
3904 	if (is_initial_mem_debug_disabled)
3905 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3906 						    line);
3907 
3908 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3909 
3910 	/* Store SKB in internal QDF tracking table */
3911 	if (qdf_likely(nbuf)) {
3912 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3913 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3914 	} else {
3915 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3916 	}
3917 
3918 	return nbuf;
3919 }
3920 
3921 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3922 
3923 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3924 {
3925 	qdf_nbuf_t ext_list;
3926 	qdf_frag_t p_frag;
3927 	uint32_t num_nr_frags;
3928 	uint32_t idx = 0;
3929 
3930 	if (qdf_unlikely(!nbuf))
3931 		return;
3932 
3933 	if (is_initial_mem_debug_disabled)
3934 		goto free_buf;
3935 
3936 	if (qdf_nbuf_get_users(nbuf) > 1)
3937 		goto free_buf;
3938 
3939 	/* Remove SKB from internal QDF tracking table */
3940 	qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
3941 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3942 	qdf_net_buf_debug_delete_node(nbuf);
3943 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3944 
3945 	/* Take care to delete the debug entries for frags */
3946 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3947 
3948 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3949 
3950 	while (idx < num_nr_frags) {
3951 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3952 		if (qdf_likely(p_frag))
3953 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3954 		idx++;
3955 	}
3956 
3957 	/**
3958 	 * Take care to update the debug entries for frag_list and also
3959 	 * for the frags attached to frag_list
3960 	 */
3961 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3962 	while (ext_list) {
3963 		if (qdf_nbuf_get_users(ext_list) == 1) {
3964 			qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
3965 							      line);
3966 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3967 			idx = 0;
3968 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3969 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3970 			while (idx < num_nr_frags) {
3971 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3972 				if (qdf_likely(p_frag))
3973 					qdf_frag_debug_refcount_dec(p_frag,
3974 								    func, line);
3975 				idx++;
3976 			}
3977 			qdf_net_buf_debug_delete_node(ext_list);
3978 		}
3979 
3980 		ext_list = qdf_nbuf_queue_next(ext_list);
3981 	}
3982 
3983 free_buf:
3984 	__qdf_nbuf_free(nbuf);
3985 }
3986 qdf_export_symbol(qdf_nbuf_free_debug);
3987 
3988 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3989 					const char *func, uint32_t line)
3990 {
3991 	struct sk_buff *skb;
3992 	int flags = GFP_KERNEL;
3993 
3994 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3995 		flags = GFP_ATOMIC;
3996 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3997 		/*
3998 		 * Observed that kcompactd burns out CPU to make order-3 page.
3999 		 *__netdev_alloc_skb has 4k page fallback option just in case of
4000 		 * failing high order page allocation so we don't need to be
4001 		 * hard. Make kcompactd rest in piece.
4002 		 */
4003 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
4004 #endif
4005 	}
4006 
4007 	skb = __netdev_alloc_skb(NULL, size, flags);
4008 
4009 
4010 	if (qdf_likely(is_initial_mem_debug_disabled)) {
4011 		if (qdf_likely(skb))
4012 			qdf_nbuf_count_inc(skb);
4013 	} else {
4014 		if (qdf_likely(skb)) {
4015 			qdf_nbuf_count_inc(skb);
4016 			qdf_net_buf_debug_add_node(skb, size, func, line);
4017 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
4018 		} else {
4019 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
4020 		}
4021 	}
4022 
4023 
4024 	return skb;
4025 }
4026 
4027 qdf_export_symbol(__qdf_nbuf_alloc_simple);
4028 
4029 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
4030 				uint32_t line)
4031 {
4032 	if (qdf_likely(nbuf)) {
4033 		if (is_initial_mem_debug_disabled) {
4034 			dev_kfree_skb_any(nbuf);
4035 		} else {
4036 			qdf_nbuf_free_debug(nbuf, func, line);
4037 		}
4038 	}
4039 }
4040 
4041 qdf_export_symbol(qdf_nbuf_free_debug_simple);
4042 
4043 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
4044 {
4045 	uint32_t num_nr_frags;
4046 	uint32_t idx = 0;
4047 	qdf_nbuf_t ext_list;
4048 	qdf_frag_t p_frag;
4049 
4050 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
4051 
4052 	if (is_initial_mem_debug_disabled)
4053 		return cloned_buf;
4054 
4055 	if (qdf_unlikely(!cloned_buf))
4056 		return NULL;
4057 
4058 	/* Take care to update the debug entries for frags */
4059 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
4060 
4061 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
4062 
4063 	while (idx < num_nr_frags) {
4064 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
4065 		if (qdf_likely(p_frag))
4066 			qdf_frag_debug_refcount_inc(p_frag, func, line);
4067 		idx++;
4068 	}
4069 
4070 	/* Take care to update debug entries for frags attached to frag_list */
4071 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
4072 	while (ext_list) {
4073 		idx = 0;
4074 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
4075 
4076 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
4077 
4078 		while (idx < num_nr_frags) {
4079 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
4080 			if (qdf_likely(p_frag))
4081 				qdf_frag_debug_refcount_inc(p_frag, func, line);
4082 			idx++;
4083 		}
4084 		ext_list = qdf_nbuf_queue_next(ext_list);
4085 	}
4086 
4087 	/* Store SKB in internal QDF tracking table */
4088 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
4089 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
4090 
4091 	return cloned_buf;
4092 }
4093 qdf_export_symbol(qdf_nbuf_clone_debug);
4094 
4095 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
4096 {
4097 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
4098 
4099 	if (is_initial_mem_debug_disabled)
4100 		return copied_buf;
4101 
4102 	if (qdf_unlikely(!copied_buf))
4103 		return NULL;
4104 
4105 	/* Store SKB in internal QDF tracking table */
4106 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
4107 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
4108 
4109 	return copied_buf;
4110 }
4111 qdf_export_symbol(qdf_nbuf_copy_debug);
4112 
4113 qdf_nbuf_t
4114 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
4115 			   const char *func, uint32_t line)
4116 {
4117 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
4118 
4119 	if (qdf_unlikely(!copied_buf))
4120 		return NULL;
4121 
4122 	if (is_initial_mem_debug_disabled)
4123 		return copied_buf;
4124 
4125 	/* Store SKB in internal QDF tracking table */
4126 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
4127 	qdf_nbuf_history_add(copied_buf, func, line,
4128 			     QDF_NBUF_ALLOC_COPY_EXPAND);
4129 
4130 	return copied_buf;
4131 }
4132 
4133 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
4134 
4135 qdf_nbuf_t
4136 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
4137 		       uint32_t line_num)
4138 {
4139 	qdf_nbuf_t unshared_buf;
4140 	qdf_frag_t p_frag;
4141 	uint32_t num_nr_frags;
4142 	uint32_t idx = 0;
4143 	qdf_nbuf_t ext_list, next;
4144 
4145 	if (is_initial_mem_debug_disabled)
4146 		return __qdf_nbuf_unshare(buf);
4147 
4148 	/* Not a shared buffer, nothing to do */
4149 	if (!qdf_nbuf_is_cloned(buf))
4150 		return buf;
4151 
4152 	if (qdf_nbuf_get_users(buf) > 1)
4153 		goto unshare_buf;
4154 
4155 	/* Take care to delete the debug entries for frags */
4156 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
4157 
4158 	while (idx < num_nr_frags) {
4159 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
4160 		if (qdf_likely(p_frag))
4161 			qdf_frag_debug_refcount_dec(p_frag, func_name,
4162 						    line_num);
4163 		idx++;
4164 	}
4165 
4166 	qdf_net_buf_debug_delete_node(buf);
4167 
4168 	 /* Take care of jumbo packet connected using frag_list and frags */
4169 	ext_list = qdf_nbuf_get_ext_list(buf);
4170 	while (ext_list) {
4171 		idx = 0;
4172 		next = qdf_nbuf_queue_next(ext_list);
4173 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
4174 
4175 		if (qdf_nbuf_get_users(ext_list) > 1) {
4176 			ext_list = next;
4177 			continue;
4178 		}
4179 
4180 		while (idx < num_nr_frags) {
4181 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
4182 			if (qdf_likely(p_frag))
4183 				qdf_frag_debug_refcount_dec(p_frag, func_name,
4184 							    line_num);
4185 			idx++;
4186 		}
4187 
4188 		qdf_net_buf_debug_delete_node(ext_list);
4189 		ext_list = next;
4190 	}
4191 
4192 unshare_buf:
4193 	unshared_buf = __qdf_nbuf_unshare(buf);
4194 
4195 	if (qdf_likely(unshared_buf))
4196 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
4197 					   line_num);
4198 
4199 	return unshared_buf;
4200 }
4201 
4202 qdf_export_symbol(qdf_nbuf_unshare_debug);
4203 
4204 void
4205 qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
4206 			      const char *func, uint32_t line)
4207 {
4208 	qdf_nbuf_t  buf;
4209 
4210 	if (qdf_nbuf_queue_empty(nbuf_queue_head))
4211 		return;
4212 
4213 	if (is_initial_mem_debug_disabled)
4214 		return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
4215 
4216 	while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
4217 		qdf_nbuf_free_debug(buf, func, line);
4218 }
4219 
4220 qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
4221 #endif /* NBUF_MEMORY_DEBUG */
4222 
4223 #if defined(QCA_DP_NBUF_FAST_PPEDS)
4224 struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
4225 					const char *func, uint32_t line)
4226 {
4227 	struct sk_buff *skb;
4228 	int flags = GFP_KERNEL;
4229 
4230 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
4231 		flags = GFP_ATOMIC;
4232 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
4233 		/*
4234 		 * Observed that kcompactd burns out CPU to make order-3
4235 		 * page.__netdev_alloc_skb has 4k page fallback option
4236 		 * just in case of
4237 		 * failing high order page allocation so we don't need
4238 		 * to be hard. Make kcompactd rest in piece.
4239 		 */
4240 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
4241 #endif
4242 	}
4243 	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
4244 	if (qdf_likely(is_initial_mem_debug_disabled)) {
4245 		if (qdf_likely(skb))
4246 			qdf_nbuf_count_inc(skb);
4247 	} else {
4248 		if (qdf_likely(skb)) {
4249 			qdf_nbuf_count_inc(skb);
4250 			qdf_net_buf_debug_add_node(skb, size, func, line);
4251 			qdf_nbuf_history_add(skb, func, line,
4252 					     QDF_NBUF_ALLOC);
4253 		} else {
4254 			qdf_nbuf_history_add(skb, func, line,
4255 					     QDF_NBUF_ALLOC_FAILURE);
4256 		}
4257 	}
4258 	return skb;
4259 }
4260 
4261 qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
4262 #endif
4263 
4264 #if defined(FEATURE_TSO)
4265 
4266 /**
4267  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
4268  *
4269  * @ethproto: ethernet type of the msdu
4270  * @ip_tcp_hdr_len: ip + tcp length for the msdu
4271  * @l2_len: L2 length for the msdu
4272  * @eit_hdr: pointer to EIT header
4273  * @eit_hdr_len: EIT header length for the msdu
4274  * @eit_hdr_dma_map_addr: dma addr for EIT header
4275  * @tcphdr: pointer to tcp header
4276  * @ipv4_csum_en: ipv4 checksum enable
4277  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
4278  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
4279  * @ip_id: IP id
4280  * @tcp_seq_num: TCP sequence number
4281  *
4282  * This structure holds the TSO common info that is common
4283  * across all the TCP segments of the jumbo packet.
4284  */
4285 struct qdf_tso_cmn_seg_info_t {
4286 	uint16_t ethproto;
4287 	uint16_t ip_tcp_hdr_len;
4288 	uint16_t l2_len;
4289 	uint8_t *eit_hdr;
4290 	uint32_t eit_hdr_len;
4291 	qdf_dma_addr_t eit_hdr_dma_map_addr;
4292 	struct tcphdr *tcphdr;
4293 	uint16_t ipv4_csum_en;
4294 	uint16_t tcp_ipv4_csum_en;
4295 	uint16_t tcp_ipv6_csum_en;
4296 	uint16_t ip_id;
4297 	uint32_t tcp_seq_num;
4298 };
4299 
4300 /**
4301  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
4302  *
4303  * @skb: network buffer
4304  *
4305  * Return: byte offset length of 8 bytes aligned.
4306  */
4307 #ifdef FIX_TXDMA_LIMITATION
4308 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4309 {
4310 	uint32_t eit_hdr_len;
4311 	uint8_t *eit_hdr;
4312 	uint8_t byte_8_align_offset;
4313 
4314 	eit_hdr = skb->data;
4315 	eit_hdr_len = (skb_transport_header(skb)
4316 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4317 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
4318 	if (qdf_unlikely(byte_8_align_offset)) {
4319 		TSO_DEBUG("%pK,Len %d %d",
4320 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
4321 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
4322 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
4323 				  __LINE__, skb->head, skb->data,
4324 				 byte_8_align_offset);
4325 			return 0;
4326 		}
4327 		qdf_nbuf_push_head(skb, byte_8_align_offset);
4328 		qdf_mem_move(skb->data,
4329 			     skb->data + byte_8_align_offset,
4330 			     eit_hdr_len);
4331 		skb->len -= byte_8_align_offset;
4332 		skb->mac_header -= byte_8_align_offset;
4333 		skb->network_header -= byte_8_align_offset;
4334 		skb->transport_header -= byte_8_align_offset;
4335 	}
4336 	return byte_8_align_offset;
4337 }
4338 #else
4339 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4340 {
4341 	return 0;
4342 }
4343 #endif
4344 
4345 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
4346 void qdf_record_nbuf_nbytes(
4347 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
4348 {
4349 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
4350 }
4351 
4352 qdf_export_symbol(qdf_record_nbuf_nbytes);
4353 
4354 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
4355 
4356 /**
4357  * qdf_nbuf_tso_map_frag() - Map TSO segment
4358  * @osdev: qdf device handle
4359  * @tso_frag_vaddr: addr of tso fragment
4360  * @nbytes: number of bytes
4361  * @dir: direction
4362  *
4363  * Map TSO segment and for MCL record the amount of memory mapped
4364  *
4365  * Return: DMA address of mapped TSO fragment in success and
4366  * NULL in case of DMA mapping failure
4367  */
4368 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
4369 	qdf_device_t osdev, void *tso_frag_vaddr,
4370 	uint32_t nbytes, qdf_dma_dir_t dir)
4371 {
4372 	qdf_dma_addr_t tso_frag_paddr = 0;
4373 
4374 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
4375 					nbytes, __qdf_dma_dir_to_os(dir));
4376 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
4377 		qdf_err("DMA mapping error!");
4378 		qdf_assert_always(0);
4379 		return 0;
4380 	}
4381 	qdf_record_nbuf_nbytes(nbytes, dir, true);
4382 	return tso_frag_paddr;
4383 }
4384 
4385 /**
4386  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
4387  * @osdev: qdf device handle
4388  * @tso_frag_paddr: DMA addr of tso fragment
4389  * @dir: direction
4390  * @nbytes: number of bytes
4391  *
4392  * Unmap TSO segment and for MCL record the amount of memory mapped
4393  *
4394  * Return: None
4395  */
4396 static inline void qdf_nbuf_tso_unmap_frag(
4397 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
4398 	uint32_t nbytes, qdf_dma_dir_t dir)
4399 {
4400 	qdf_record_nbuf_nbytes(nbytes, dir, false);
4401 	dma_unmap_single(osdev->dev, tso_frag_paddr,
4402 			 nbytes, __qdf_dma_dir_to_os(dir));
4403 }
4404 
4405 /**
4406  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
4407  * information
4408  * @osdev: qdf device handle
4409  * @skb: skb buffer
4410  * @tso_info: Parameters common to all segments
4411  *
4412  * Get the TSO information that is common across all the TCP
4413  * segments of the jumbo packet
4414  *
4415  * Return: 0 - success 1 - failure
4416  */
4417 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
4418 			struct sk_buff *skb,
4419 			struct qdf_tso_cmn_seg_info_t *tso_info)
4420 {
4421 	/* Get ethernet type and ethernet header length */
4422 	tso_info->ethproto = vlan_get_protocol(skb);
4423 
4424 	/* Determine whether this is an IPv4 or IPv6 packet */
4425 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
4426 		/* for IPv4, get the IP ID and enable TCP and IP csum */
4427 		struct iphdr *ipv4_hdr = ip_hdr(skb);
4428 
4429 		tso_info->ip_id = ntohs(ipv4_hdr->id);
4430 		tso_info->ipv4_csum_en = 1;
4431 		tso_info->tcp_ipv4_csum_en = 1;
4432 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
4433 			qdf_err("TSO IPV4 proto 0x%x not TCP",
4434 				ipv4_hdr->protocol);
4435 			return 1;
4436 		}
4437 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
4438 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
4439 		tso_info->tcp_ipv6_csum_en = 1;
4440 	} else {
4441 		qdf_err("TSO: ethertype 0x%x is not supported!",
4442 			tso_info->ethproto);
4443 		return 1;
4444 	}
4445 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
4446 	tso_info->tcphdr = tcp_hdr(skb);
4447 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
4448 	/* get pointer to the ethernet + IP + TCP header and their length */
4449 	tso_info->eit_hdr = skb->data;
4450 	tso_info->eit_hdr_len = (skb_transport_header(skb)
4451 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4452 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
4453 						osdev, tso_info->eit_hdr,
4454 						tso_info->eit_hdr_len,
4455 						QDF_DMA_TO_DEVICE);
4456 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
4457 		return 1;
4458 
4459 	if (tso_info->ethproto == htons(ETH_P_IP)) {
4460 		/* include IPv4 header length for IPV4 (total length) */
4461 		tso_info->ip_tcp_hdr_len =
4462 			tso_info->eit_hdr_len - tso_info->l2_len;
4463 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
4464 		/* exclude IPv6 header length for IPv6 (payload length) */
4465 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
4466 	}
4467 	/*
4468 	 * The length of the payload (application layer data) is added to
4469 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
4470 	 * descriptor.
4471 	 */
4472 
4473 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
4474 		tso_info->tcp_seq_num,
4475 		tso_info->eit_hdr_len,
4476 		tso_info->l2_len,
4477 		skb->len);
4478 	return 0;
4479 }
4480 
4481 
4482 /**
4483  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
4484  *
4485  * @curr_seg: Segment whose contents are initialized
4486  * @tso_cmn_info: Parameters common to all segments
4487  *
4488  * Return: None
4489  */
4490 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
4491 				struct qdf_tso_seg_elem_t *curr_seg,
4492 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
4493 {
4494 	/* Initialize the flags to 0 */
4495 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
4496 
4497 	/*
4498 	 * The following fields remain the same across all segments of
4499 	 * a jumbo packet
4500 	 */
4501 	curr_seg->seg.tso_flags.tso_enable = 1;
4502 	curr_seg->seg.tso_flags.ipv4_checksum_en =
4503 		tso_cmn_info->ipv4_csum_en;
4504 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
4505 		tso_cmn_info->tcp_ipv6_csum_en;
4506 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
4507 		tso_cmn_info->tcp_ipv4_csum_en;
4508 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
4509 
4510 	/* The following fields change for the segments */
4511 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
4512 	tso_cmn_info->ip_id++;
4513 
4514 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
4515 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
4516 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
4517 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
4518 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
4519 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
4520 
4521 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
4522 
4523 	/*
4524 	 * First fragment for each segment always contains the ethernet,
4525 	 * IP and TCP header
4526 	 */
4527 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
4528 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
4529 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
4530 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
4531 
4532 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
4533 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
4534 		   tso_cmn_info->eit_hdr_len,
4535 		   curr_seg->seg.tso_flags.tcp_seq_num,
4536 		   curr_seg->seg.total_len);
4537 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
4538 }
4539 
4540 /**
4541  * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
4542  * into segments
4543  * @nbuf: network buffer to be segmented
4544  * @tso_info: This is the output. The information about the
4545  *           TSO segments will be populated within this.
4546  *
4547  * This function fragments a TCP jumbo packet into smaller
4548  * segments to be transmitted by the driver. It chains the TSO
4549  * segments created into a list.
4550  *
4551  * Return: number of TSO segments
4552  */
4553 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
4554 		struct qdf_tso_info_t *tso_info)
4555 {
4556 	/* common across all segments */
4557 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
4558 	/* segment specific */
4559 	void *tso_frag_vaddr;
4560 	qdf_dma_addr_t tso_frag_paddr = 0;
4561 	uint32_t num_seg = 0;
4562 	struct qdf_tso_seg_elem_t *curr_seg;
4563 	struct qdf_tso_num_seg_elem_t *total_num_seg;
4564 	skb_frag_t *frag = NULL;
4565 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
4566 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
4567 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
4568 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4569 	int j = 0; /* skb fragment index */
4570 	uint8_t byte_8_align_offset;
4571 
4572 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
4573 	total_num_seg = tso_info->tso_num_seg_list;
4574 	curr_seg = tso_info->tso_seg_list;
4575 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
4576 
4577 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
4578 
4579 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
4580 						skb, &tso_cmn_info))) {
4581 		qdf_warn("TSO: error getting common segment info");
4582 		return 0;
4583 	}
4584 
4585 	/* length of the first chunk of data in the skb */
4586 	skb_frag_len = skb_headlen(skb);
4587 
4588 	/* the 0th tso segment's 0th fragment always contains the EIT header */
4589 	/* update the remaining skb fragment length and TSO segment length */
4590 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
4591 	skb_proc -= tso_cmn_info.eit_hdr_len;
4592 
4593 	/* get the address to the next tso fragment */
4594 	tso_frag_vaddr = skb->data +
4595 			 tso_cmn_info.eit_hdr_len +
4596 			 byte_8_align_offset;
4597 	/* get the length of the next tso fragment */
4598 	tso_frag_len = min(skb_frag_len, tso_seg_size);
4599 
4600 	if (tso_frag_len != 0) {
4601 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
4602 					osdev, tso_frag_vaddr, tso_frag_len,
4603 					QDF_DMA_TO_DEVICE);
4604 		if (qdf_unlikely(!tso_frag_paddr))
4605 			return 0;
4606 	}
4607 
4608 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
4609 		__LINE__, skb_frag_len, tso_frag_len);
4610 	num_seg = tso_info->num_segs;
4611 	tso_info->num_segs = 0;
4612 	tso_info->is_tso = 1;
4613 
4614 	while (num_seg && curr_seg) {
4615 		int i = 1; /* tso fragment index */
4616 		uint8_t more_tso_frags = 1;
4617 
4618 		curr_seg->seg.num_frags = 0;
4619 		tso_info->num_segs++;
4620 		total_num_seg->num_seg.tso_cmn_num_seg++;
4621 
4622 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
4623 						 &tso_cmn_info);
4624 
4625 		/* If TCP PSH flag is set, set it in the last or only segment */
4626 		if (num_seg == 1)
4627 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
4628 
4629 		if (unlikely(skb_proc == 0))
4630 			return tso_info->num_segs;
4631 
4632 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
4633 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
4634 		/* frag len is added to ip_len in while loop below*/
4635 
4636 		curr_seg->seg.num_frags++;
4637 
4638 		while (more_tso_frags) {
4639 			if (tso_frag_len != 0) {
4640 				curr_seg->seg.tso_frags[i].vaddr =
4641 					tso_frag_vaddr;
4642 				curr_seg->seg.tso_frags[i].length =
4643 					tso_frag_len;
4644 				curr_seg->seg.total_len += tso_frag_len;
4645 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
4646 				curr_seg->seg.num_frags++;
4647 				skb_proc = skb_proc - tso_frag_len;
4648 
4649 				/* increment the TCP sequence number */
4650 
4651 				tso_cmn_info.tcp_seq_num += tso_frag_len;
4652 				curr_seg->seg.tso_frags[i].paddr =
4653 					tso_frag_paddr;
4654 
4655 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
4656 			}
4657 
4658 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
4659 					__func__, __LINE__,
4660 					i,
4661 					tso_frag_len,
4662 					curr_seg->seg.total_len,
4663 					curr_seg->seg.tso_frags[i].vaddr);
4664 
4665 			/* if there is no more data left in the skb */
4666 			if (!skb_proc)
4667 				return tso_info->num_segs;
4668 
4669 			/* get the next payload fragment information */
4670 			/* check if there are more fragments in this segment */
4671 			if (tso_frag_len < tso_seg_size) {
4672 				more_tso_frags = 1;
4673 				if (tso_frag_len != 0) {
4674 					tso_seg_size = tso_seg_size -
4675 						tso_frag_len;
4676 					i++;
4677 					if (curr_seg->seg.num_frags ==
4678 								FRAG_NUM_MAX) {
4679 						more_tso_frags = 0;
4680 						/*
4681 						 * reset i and the tso
4682 						 * payload size
4683 						 */
4684 						i = 1;
4685 						tso_seg_size =
4686 							skb_shinfo(skb)->
4687 								gso_size;
4688 					}
4689 				}
4690 			} else {
4691 				more_tso_frags = 0;
4692 				/* reset i and the tso payload size */
4693 				i = 1;
4694 				tso_seg_size = skb_shinfo(skb)->gso_size;
4695 			}
4696 
4697 			/* if the next fragment is contiguous */
4698 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
4699 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4700 				skb_frag_len = skb_frag_len - tso_frag_len;
4701 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4702 
4703 			} else { /* the next fragment is not contiguous */
4704 				if (skb_shinfo(skb)->nr_frags == 0) {
4705 					qdf_info("TSO: nr_frags == 0!");
4706 					qdf_assert(0);
4707 					return 0;
4708 				}
4709 				if (j >= skb_shinfo(skb)->nr_frags) {
4710 					qdf_info("TSO: nr_frags %d j %d",
4711 						 skb_shinfo(skb)->nr_frags, j);
4712 					qdf_assert(0);
4713 					return 0;
4714 				}
4715 				frag = &skb_shinfo(skb)->frags[j];
4716 				skb_frag_len = skb_frag_size(frag);
4717 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4718 				tso_frag_vaddr = skb_frag_address_safe(frag);
4719 				j++;
4720 			}
4721 
4722 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4723 				__func__, __LINE__, skb_frag_len, tso_frag_len,
4724 				tso_seg_size);
4725 
4726 			if (!(tso_frag_vaddr)) {
4727 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
4728 						__func__);
4729 				return 0;
4730 			}
4731 
4732 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
4733 						osdev, tso_frag_vaddr,
4734 						tso_frag_len,
4735 						QDF_DMA_TO_DEVICE);
4736 			if (qdf_unlikely(!tso_frag_paddr))
4737 				return 0;
4738 		}
4739 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4740 				curr_seg->seg.tso_flags.tcp_seq_num);
4741 		num_seg--;
4742 		/* if TCP FIN flag was set, set it in the last segment */
4743 		if (!num_seg)
4744 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4745 
4746 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4747 		curr_seg = curr_seg->next;
4748 	}
4749 	return tso_info->num_segs;
4750 }
4751 qdf_export_symbol(__qdf_nbuf_get_tso_info);
4752 
4753 /**
4754  * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
4755  *
4756  * @osdev: qdf device handle
4757  * @tso_seg: TSO segment element to be unmapped
4758  * @is_last_seg: whether this is last tso seg or not
4759  *
4760  * Return: none
4761  */
4762 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4763 			  struct qdf_tso_seg_elem_t *tso_seg,
4764 			  bool is_last_seg)
4765 {
4766 	uint32_t num_frags = 0;
4767 
4768 	if (tso_seg->seg.num_frags > 0)
4769 		num_frags = tso_seg->seg.num_frags - 1;
4770 
4771 	/*Num of frags in a tso seg cannot be less than 2 */
4772 	if (num_frags < 1) {
4773 		/*
4774 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
4775 		 * this may happen when qdf_nbuf_get_tso_info failed,
4776 		 * do dma unmap for the 0th frag in this seg.
4777 		 */
4778 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4779 			goto last_seg_free_first_frag;
4780 
4781 		qdf_assert(0);
4782 		qdf_err("ERROR: num of frags in a tso segment is %d",
4783 			(num_frags + 1));
4784 		return;
4785 	}
4786 
4787 	while (num_frags) {
4788 		/*Do dma unmap the tso seg except the 0th frag */
4789 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4790 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4791 				num_frags);
4792 			qdf_assert(0);
4793 			return;
4794 		}
4795 		qdf_nbuf_tso_unmap_frag(
4796 			osdev,
4797 			tso_seg->seg.tso_frags[num_frags].paddr,
4798 			tso_seg->seg.tso_frags[num_frags].length,
4799 			QDF_DMA_TO_DEVICE);
4800 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4801 		num_frags--;
4802 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4803 	}
4804 
4805 last_seg_free_first_frag:
4806 	if (is_last_seg) {
4807 		/*Do dma unmap for the tso seg 0th frag */
4808 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4809 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4810 			qdf_assert(0);
4811 			return;
4812 		}
4813 		qdf_nbuf_tso_unmap_frag(osdev,
4814 					tso_seg->seg.tso_frags[0].paddr,
4815 					tso_seg->seg.tso_frags[0].length,
4816 					QDF_DMA_TO_DEVICE);
4817 		tso_seg->seg.tso_frags[0].paddr = 0;
4818 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4819 	}
4820 }
4821 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4822 
4823 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4824 {
4825 	size_t packet_len;
4826 
4827 	packet_len = skb->len -
4828 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4829 		 tcp_hdrlen(skb));
4830 
4831 	return packet_len;
4832 }
4833 
4834 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4835 
4836 /**
4837  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
4838  * into segments
4839  * @nbuf:   network buffer to be segmented
4840  * @tso_info:  This is the output. The information about the
4841  *      TSO segments will be populated within this.
4842  *
4843  * This function fragments a TCP jumbo packet into smaller
4844  * segments to be transmitted by the driver. It chains the TSO
4845  * segments created into a list.
4846  *
4847  * Return: 0 - success, 1 - failure
4848  */
4849 #ifndef BUILD_X86
4850 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4851 {
4852 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4853 	uint32_t remainder, num_segs = 0;
4854 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4855 	uint8_t frags_per_tso = 0;
4856 	uint32_t skb_frag_len = 0;
4857 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4858 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4859 	skb_frag_t *frag = NULL;
4860 	int j = 0;
4861 	uint32_t temp_num_seg = 0;
4862 
4863 	/* length of the first chunk of data in the skb minus eit header*/
4864 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4865 
4866 	/* Calculate num of segs for skb's first chunk of data*/
4867 	remainder = skb_frag_len % tso_seg_size;
4868 	num_segs = skb_frag_len / tso_seg_size;
4869 	/**
4870 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4871 	 * In that case, one more tso seg is required to accommodate
4872 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4873 	 * then remaining data will be accommodated while doing the calculation
4874 	 * for nr_frags data. Hence, frags_per_tso++.
4875 	 */
4876 	if (remainder) {
4877 		if (!skb_nr_frags)
4878 			num_segs++;
4879 		else
4880 			frags_per_tso++;
4881 	}
4882 
4883 	while (skb_nr_frags) {
4884 		if (j >= skb_shinfo(skb)->nr_frags) {
4885 			qdf_info("TSO: nr_frags %d j %d",
4886 				 skb_shinfo(skb)->nr_frags, j);
4887 			qdf_assert(0);
4888 			return 0;
4889 		}
4890 		/**
4891 		 * Calculate the number of tso seg for nr_frags data:
4892 		 * Get the length of each frag in skb_frag_len, add to
4893 		 * remainder.Get the number of segments by dividing it to
4894 		 * tso_seg_size and calculate the new remainder.
4895 		 * Decrement the nr_frags value and keep
4896 		 * looping all the skb_fragments.
4897 		 */
4898 		frag = &skb_shinfo(skb)->frags[j];
4899 		skb_frag_len = skb_frag_size(frag);
4900 		temp_num_seg = num_segs;
4901 		remainder += skb_frag_len;
4902 		num_segs += remainder / tso_seg_size;
4903 		remainder = remainder % tso_seg_size;
4904 		skb_nr_frags--;
4905 		if (remainder) {
4906 			if (num_segs > temp_num_seg)
4907 				frags_per_tso = 0;
4908 			/**
4909 			 * increment the tso per frags whenever remainder is
4910 			 * positive. If frags_per_tso reaches the (max-1),
4911 			 * [First frags always have EIT header, therefore max-1]
4912 			 * increment the num_segs as no more data can be
4913 			 * accommodated in the curr tso seg. Reset the remainder
4914 			 * and frags per tso and keep looping.
4915 			 */
4916 			frags_per_tso++;
4917 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4918 				num_segs++;
4919 				frags_per_tso = 0;
4920 				remainder = 0;
4921 			}
4922 			/**
4923 			 * If this is the last skb frag and still remainder is
4924 			 * non-zero(frags_per_tso is not reached to the max-1)
4925 			 * then increment the num_segs to take care of the
4926 			 * remaining length.
4927 			 */
4928 			if (!skb_nr_frags && remainder) {
4929 				num_segs++;
4930 				frags_per_tso = 0;
4931 			}
4932 		} else {
4933 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4934 			frags_per_tso = 0;
4935 		}
4936 		j++;
4937 	}
4938 
4939 	return num_segs;
4940 }
4941 #elif !defined(QCA_WIFI_QCN9000)
4942 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4943 {
4944 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4945 	skb_frag_t *frag = NULL;
4946 
4947 	/*
4948 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4949 	 * region which cannot be accessed by Target
4950 	 */
4951 	if (virt_to_phys(skb->data) < 0x50000040) {
4952 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4953 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4954 				virt_to_phys(skb->data));
4955 		goto fail;
4956 
4957 	}
4958 
4959 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4960 		frag = &skb_shinfo(skb)->frags[i];
4961 
4962 		if (!frag)
4963 			goto fail;
4964 
4965 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4966 			goto fail;
4967 	}
4968 
4969 
4970 	gso_size = skb_shinfo(skb)->gso_size;
4971 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4972 			+ tcp_hdrlen(skb));
4973 	while (tmp_len) {
4974 		num_segs++;
4975 		if (tmp_len > gso_size)
4976 			tmp_len -= gso_size;
4977 		else
4978 			break;
4979 	}
4980 
4981 	return num_segs;
4982 
4983 	/*
4984 	 * Do not free this frame, just do socket level accounting
4985 	 * so that this is not reused.
4986 	 */
4987 fail:
4988 	if (skb->sk)
4989 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4990 
4991 	return 0;
4992 }
4993 #else
4994 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4995 {
4996 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4997 	skb_frag_t *frag = NULL;
4998 
4999 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5000 		frag = &skb_shinfo(skb)->frags[i];
5001 
5002 		if (!frag)
5003 			goto fail;
5004 	}
5005 
5006 	gso_size = skb_shinfo(skb)->gso_size;
5007 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
5008 			+ tcp_hdrlen(skb));
5009 	while (tmp_len) {
5010 		num_segs++;
5011 		if (tmp_len > gso_size)
5012 			tmp_len -= gso_size;
5013 		else
5014 			break;
5015 	}
5016 
5017 	return num_segs;
5018 
5019 	/*
5020 	 * Do not free this frame, just do socket level accounting
5021 	 * so that this is not reused.
5022 	 */
5023 fail:
5024 	if (skb->sk)
5025 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
5026 
5027 	return 0;
5028 }
5029 #endif
5030 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
5031 
5032 #endif /* FEATURE_TSO */
5033 
5034 /**
5035  * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
5036  *
5037  * Returns the high and low 32-bits of the DMA addr in the provided ptrs
5038  *
5039  * Return: N/A
5040  */
5041 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
5042 			  uint32_t *lo, uint32_t *hi)
5043 {
5044 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
5045 		*lo = lower_32_bits(dmaaddr);
5046 		*hi = upper_32_bits(dmaaddr);
5047 	} else {
5048 		*lo = dmaaddr;
5049 		*hi = 0;
5050 	}
5051 }
5052 
5053 qdf_export_symbol(__qdf_dmaaddr_to_32s);
5054 
5055 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
5056 {
5057 	qdf_nbuf_users_inc(&skb->users);
5058 	return skb;
5059 }
5060 qdf_export_symbol(__qdf_nbuf_inc_users);
5061 
5062 int __qdf_nbuf_get_users(struct sk_buff *skb)
5063 {
5064 	return qdf_nbuf_users_read(&skb->users);
5065 }
5066 qdf_export_symbol(__qdf_nbuf_get_users);
5067 
5068 /**
5069  * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
5070  * @skb: sk_buff handle
5071  *
5072  * Return: none
5073  */
5074 
5075 void __qdf_nbuf_ref(struct sk_buff *skb)
5076 {
5077 	skb_get(skb);
5078 }
5079 qdf_export_symbol(__qdf_nbuf_ref);
5080 
5081 /**
5082  * __qdf_nbuf_shared() - Check whether the buffer is shared
5083  *  @skb: sk_buff buffer
5084  *
5085  *  Return: true if more than one person has a reference to this buffer.
5086  */
5087 int __qdf_nbuf_shared(struct sk_buff *skb)
5088 {
5089 	return skb_shared(skb);
5090 }
5091 qdf_export_symbol(__qdf_nbuf_shared);
5092 
5093 /**
5094  * __qdf_nbuf_dmamap_create() - create a DMA map.
5095  * @osdev: qdf device handle
5096  * @dmap: dma map handle
5097  *
5098  * This can later be used to map networking buffers. They :
5099  * - need space in adf_drv's software descriptor
5100  * - are typically created during adf_drv_create
5101  * - need to be created before any API(qdf_nbuf_map) that uses them
5102  *
5103  * Return: QDF STATUS
5104  */
5105 QDF_STATUS
5106 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
5107 {
5108 	QDF_STATUS error = QDF_STATUS_SUCCESS;
5109 	/*
5110 	 * driver can tell its SG capability, it must be handled.
5111 	 * Bounce buffers if they are there
5112 	 */
5113 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
5114 	if (!(*dmap))
5115 		error = QDF_STATUS_E_NOMEM;
5116 
5117 	return error;
5118 }
5119 qdf_export_symbol(__qdf_nbuf_dmamap_create);
5120 /**
5121  * __qdf_nbuf_dmamap_destroy() - delete a dma map
5122  * @osdev: qdf device handle
5123  * @dmap: dma map handle
5124  *
5125  * Return: none
5126  */
5127 void
5128 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
5129 {
5130 	kfree(dmap);
5131 }
5132 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
5133 
5134 /**
5135  * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
5136  * @osdev: os device
5137  * @skb: skb handle
5138  * @dir: dma direction
5139  * @nbytes: number of bytes to be mapped
5140  *
5141  * Return: QDF_STATUS
5142  */
5143 #ifdef QDF_OS_DEBUG
5144 QDF_STATUS
5145 __qdf_nbuf_map_nbytes(
5146 	qdf_device_t osdev,
5147 	struct sk_buff *skb,
5148 	qdf_dma_dir_t dir,
5149 	int nbytes)
5150 {
5151 	struct skb_shared_info  *sh = skb_shinfo(skb);
5152 
5153 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
5154 
5155 	/*
5156 	 * Assume there's only a single fragment.
5157 	 * To support multiple fragments, it would be necessary to change
5158 	 * adf_nbuf_t to be a separate object that stores meta-info
5159 	 * (including the bus address for each fragment) and a pointer
5160 	 * to the underlying sk_buff.
5161 	 */
5162 	qdf_assert(sh->nr_frags == 0);
5163 
5164 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
5165 }
5166 qdf_export_symbol(__qdf_nbuf_map_nbytes);
5167 #else
5168 QDF_STATUS
5169 __qdf_nbuf_map_nbytes(
5170 	qdf_device_t osdev,
5171 	struct sk_buff *skb,
5172 	qdf_dma_dir_t dir,
5173 	int nbytes)
5174 {
5175 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
5176 }
5177 qdf_export_symbol(__qdf_nbuf_map_nbytes);
5178 #endif
5179 /**
5180  * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
5181  * @osdev: OS device
5182  * @skb: skb handle
5183  * @dir: direction
5184  * @nbytes: number of bytes
5185  *
5186  * Return: none
5187  */
5188 void
5189 __qdf_nbuf_unmap_nbytes(
5190 	qdf_device_t osdev,
5191 	struct sk_buff *skb,
5192 	qdf_dma_dir_t dir,
5193 	int nbytes)
5194 {
5195 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
5196 
5197 	/*
5198 	 * Assume there's a single fragment.
5199 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
5200 	 */
5201 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
5202 }
5203 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
5204 
5205 /**
5206  * __qdf_nbuf_dma_map_info() - return the dma map info
5207  * @bmap: dma map
5208  * @sg: dma map info
5209  *
5210  * Return: none
5211  */
5212 void
5213 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
5214 {
5215 	qdf_assert(bmap->mapped);
5216 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
5217 
5218 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
5219 			sizeof(struct __qdf_segment));
5220 	sg->nsegs = bmap->nsegs;
5221 }
5222 qdf_export_symbol(__qdf_nbuf_dma_map_info);
5223 /**
5224  * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
5225  *			specified by the index
5226  * @skb: sk buff
5227  * @sg: scatter/gather list of all the frags
5228  *
5229  * Return: none
5230  */
5231 #if defined(__QDF_SUPPORT_FRAG_MEM)
5232 void
5233 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
5234 {
5235 	qdf_assert(skb);
5236 	sg->sg_segs[0].vaddr = skb->data;
5237 	sg->sg_segs[0].len   = skb->len;
5238 	sg->nsegs            = 1;
5239 
5240 	for (int i = 1; i <= sh->nr_frags; i++) {
5241 		skb_frag_t    *f        = &sh->frags[i - 1];
5242 
5243 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
5244 			f->page_offset);
5245 		sg->sg_segs[i].len      = f->size;
5246 
5247 		qdf_assert(i < QDF_MAX_SGLIST);
5248 	}
5249 	sg->nsegs += i;
5250 
5251 }
5252 qdf_export_symbol(__qdf_nbuf_frag_info);
5253 #else
5254 #ifdef QDF_OS_DEBUG
5255 void
5256 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
5257 {
5258 
5259 	struct skb_shared_info  *sh = skb_shinfo(skb);
5260 
5261 	qdf_assert(skb);
5262 	sg->sg_segs[0].vaddr = skb->data;
5263 	sg->sg_segs[0].len   = skb->len;
5264 	sg->nsegs            = 1;
5265 
5266 	qdf_assert(sh->nr_frags == 0);
5267 }
5268 qdf_export_symbol(__qdf_nbuf_frag_info);
5269 #else
5270 void
5271 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
5272 {
5273 	sg->sg_segs[0].vaddr = skb->data;
5274 	sg->sg_segs[0].len   = skb->len;
5275 	sg->nsegs            = 1;
5276 }
5277 qdf_export_symbol(__qdf_nbuf_frag_info);
5278 #endif
5279 #endif
5280 /**
5281  * __qdf_nbuf_get_frag_size() - get frag size
5282  * @nbuf: sk buffer
5283  * @cur_frag: current frag
5284  *
5285  * Return: frag size
5286  */
5287 uint32_t
5288 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
5289 {
5290 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
5291 	const skb_frag_t *frag = sh->frags + cur_frag;
5292 
5293 	return skb_frag_size(frag);
5294 }
5295 qdf_export_symbol(__qdf_nbuf_get_frag_size);
5296 
5297 /**
5298  * __qdf_nbuf_frag_map() - dma map frag
5299  * @osdev: os device
5300  * @nbuf: sk buff
5301  * @offset: offset
5302  * @dir: direction
5303  * @cur_frag: current fragment
5304  *
5305  * Return: QDF status
5306  */
5307 #ifdef A_SIMOS_DEVHOST
5308 QDF_STATUS __qdf_nbuf_frag_map(
5309 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
5310 	int offset, qdf_dma_dir_t dir, int cur_frag)
5311 {
5312 	int32_t paddr, frag_len;
5313 
5314 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
5315 	return QDF_STATUS_SUCCESS;
5316 }
5317 qdf_export_symbol(__qdf_nbuf_frag_map);
5318 #else
5319 QDF_STATUS __qdf_nbuf_frag_map(
5320 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
5321 	int offset, qdf_dma_dir_t dir, int cur_frag)
5322 {
5323 	dma_addr_t paddr, frag_len;
5324 	struct skb_shared_info *sh = skb_shinfo(nbuf);
5325 	const skb_frag_t *frag = sh->frags + cur_frag;
5326 
5327 	frag_len = skb_frag_size(frag);
5328 
5329 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
5330 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
5331 					__qdf_dma_dir_to_os(dir));
5332 	return dma_mapping_error(osdev->dev, paddr) ?
5333 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
5334 }
5335 qdf_export_symbol(__qdf_nbuf_frag_map);
5336 #endif
5337 /**
5338  * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
5339  * @dmap: dma map
5340  * @cb: callback
5341  * @arg: argument
5342  *
5343  * Return: none
5344  */
5345 void
5346 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
5347 {
5348 	return;
5349 }
5350 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
5351 
5352 
5353 /**
5354  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
5355  * @osdev: os device
5356  * @buf: sk buff
5357  * @dir: direction
5358  *
5359  * Return: none
5360  */
5361 #if defined(A_SIMOS_DEVHOST)
5362 static void __qdf_nbuf_sync_single_for_cpu(
5363 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
5364 {
5365 	return;
5366 }
5367 #else
5368 static void __qdf_nbuf_sync_single_for_cpu(
5369 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
5370 {
5371 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
5372 		qdf_err("ERROR: NBUF mapped physical address is NULL");
5373 		return;
5374 	}
5375 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
5376 		skb_end_offset(buf) - skb_headroom(buf),
5377 		__qdf_dma_dir_to_os(dir));
5378 }
5379 #endif
5380 /**
5381  * __qdf_nbuf_sync_for_cpu() - nbuf sync
5382  * @osdev: os device
5383  * @skb: sk buff
5384  * @dir: direction
5385  *
5386  * Return: none
5387  */
5388 void
5389 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
5390 	struct sk_buff *skb, qdf_dma_dir_t dir)
5391 {
5392 	qdf_assert(
5393 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
5394 
5395 	/*
5396 	 * Assume there's a single fragment.
5397 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
5398 	 */
5399 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
5400 }
5401 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
5402 
5403 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
5404 /**
5405  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
5406  * @rx_status: Pointer to rx_status.
5407  * @rtap_buf: Buf to which VHT info has to be updated.
5408  * @rtap_len: Current length of radiotap buffer
5409  *
5410  * Return: Length of radiotap after VHT flags updated.
5411  */
5412 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5413 					struct mon_rx_status *rx_status,
5414 					int8_t *rtap_buf,
5415 					uint32_t rtap_len)
5416 {
5417 	uint16_t vht_flags = 0;
5418 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5419 
5420 	rtap_len = qdf_align(rtap_len, 2);
5421 
5422 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5423 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
5424 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
5425 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
5426 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
5427 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
5428 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
5429 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
5430 	rtap_len += 2;
5431 
5432 	rtap_buf[rtap_len] |=
5433 		(rx_status->is_stbc ?
5434 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
5435 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
5436 		(rx_status->ldpc ?
5437 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
5438 		(rx_status->beamformed ?
5439 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
5440 	rtap_len += 1;
5441 
5442 	if (!rx_user_status) {
5443 		switch (rx_status->vht_flag_values2) {
5444 		case IEEE80211_RADIOTAP_VHT_BW_20:
5445 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5446 			break;
5447 		case IEEE80211_RADIOTAP_VHT_BW_40:
5448 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5449 			break;
5450 		case IEEE80211_RADIOTAP_VHT_BW_80:
5451 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5452 			break;
5453 		case IEEE80211_RADIOTAP_VHT_BW_160:
5454 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5455 			break;
5456 		}
5457 		rtap_len += 1;
5458 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
5459 		rtap_len += 1;
5460 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
5461 		rtap_len += 1;
5462 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
5463 		rtap_len += 1;
5464 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
5465 		rtap_len += 1;
5466 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
5467 		rtap_len += 1;
5468 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
5469 		rtap_len += 1;
5470 		put_unaligned_le16(rx_status->vht_flag_values6,
5471 				   &rtap_buf[rtap_len]);
5472 		rtap_len += 2;
5473 	} else {
5474 		switch (rx_user_status->vht_flag_values2) {
5475 		case IEEE80211_RADIOTAP_VHT_BW_20:
5476 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5477 			break;
5478 		case IEEE80211_RADIOTAP_VHT_BW_40:
5479 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5480 			break;
5481 		case IEEE80211_RADIOTAP_VHT_BW_80:
5482 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5483 			break;
5484 		case IEEE80211_RADIOTAP_VHT_BW_160:
5485 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5486 			break;
5487 		}
5488 		rtap_len += 1;
5489 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
5490 		rtap_len += 1;
5491 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
5492 		rtap_len += 1;
5493 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
5494 		rtap_len += 1;
5495 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
5496 		rtap_len += 1;
5497 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
5498 		rtap_len += 1;
5499 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
5500 		rtap_len += 1;
5501 		put_unaligned_le16(rx_user_status->vht_flag_values6,
5502 				   &rtap_buf[rtap_len]);
5503 		rtap_len += 2;
5504 	}
5505 
5506 	return rtap_len;
5507 }
5508 
5509 /**
5510  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
5511  * @rx_status: Pointer to rx_status.
5512  * @rtap_buf: buffer to which radiotap has to be updated
5513  * @rtap_len: radiotap length
5514  *
5515  * API update high-efficiency (11ax) fields in the radiotap header
5516  *
5517  * Return: length of rtap_len updated.
5518  */
5519 static unsigned int
5520 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5521 				     int8_t *rtap_buf, uint32_t rtap_len)
5522 {
5523 	/*
5524 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
5525 	 * Enable all "known" HE radiotap flags for now
5526 	 */
5527 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5528 
5529 	rtap_len = qdf_align(rtap_len, 2);
5530 
5531 	if (!rx_user_status) {
5532 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5533 		rtap_len += 2;
5534 
5535 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5536 		rtap_len += 2;
5537 
5538 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5539 		rtap_len += 2;
5540 
5541 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5542 		rtap_len += 2;
5543 
5544 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5545 		rtap_len += 2;
5546 
5547 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5548 		rtap_len += 2;
5549 		qdf_rl_debug("he data %x %x %x %x %x %x",
5550 			     rx_status->he_data1,
5551 			     rx_status->he_data2, rx_status->he_data3,
5552 			     rx_status->he_data4, rx_status->he_data5,
5553 			     rx_status->he_data6);
5554 	} else {
5555 		put_unaligned_le16(rx_user_status->he_data1,
5556 				   &rtap_buf[rtap_len]);
5557 		rtap_len += 2;
5558 
5559 		put_unaligned_le16(rx_user_status->he_data2,
5560 				   &rtap_buf[rtap_len]);
5561 		rtap_len += 2;
5562 
5563 		put_unaligned_le16(rx_user_status->he_data3,
5564 				   &rtap_buf[rtap_len]);
5565 		rtap_len += 2;
5566 
5567 		put_unaligned_le16(rx_user_status->he_data4,
5568 				   &rtap_buf[rtap_len]);
5569 		rtap_len += 2;
5570 
5571 		put_unaligned_le16(rx_user_status->he_data5,
5572 				   &rtap_buf[rtap_len]);
5573 		rtap_len += 2;
5574 
5575 		put_unaligned_le16(rx_user_status->he_data6,
5576 				   &rtap_buf[rtap_len]);
5577 		rtap_len += 2;
5578 		qdf_rl_debug("he data %x %x %x %x %x %x",
5579 			     rx_user_status->he_data1,
5580 			     rx_user_status->he_data2, rx_user_status->he_data3,
5581 			     rx_user_status->he_data4, rx_user_status->he_data5,
5582 			     rx_user_status->he_data6);
5583 	}
5584 
5585 	return rtap_len;
5586 }
5587 
5588 
5589 /**
5590  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
5591  * @rx_status: Pointer to rx_status.
5592  * @rtap_buf: buffer to which radiotap has to be updated
5593  * @rtap_len: radiotap length
5594  *
5595  * API update HE-MU fields in the radiotap header
5596  *
5597  * Return: length of rtap_len updated.
5598  */
5599 static unsigned int
5600 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
5601 				     int8_t *rtap_buf, uint32_t rtap_len)
5602 {
5603 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5604 
5605 	rtap_len = qdf_align(rtap_len, 2);
5606 
5607 	/*
5608 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
5609 	 * Enable all "known" he-mu radiotap flags for now
5610 	 */
5611 
5612 	if (!rx_user_status) {
5613 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5614 		rtap_len += 2;
5615 
5616 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5617 		rtap_len += 2;
5618 
5619 		rtap_buf[rtap_len] = rx_status->he_RU[0];
5620 		rtap_len += 1;
5621 
5622 		rtap_buf[rtap_len] = rx_status->he_RU[1];
5623 		rtap_len += 1;
5624 
5625 		rtap_buf[rtap_len] = rx_status->he_RU[2];
5626 		rtap_len += 1;
5627 
5628 		rtap_buf[rtap_len] = rx_status->he_RU[3];
5629 		rtap_len += 1;
5630 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5631 			  rx_status->he_flags1,
5632 			  rx_status->he_flags2, rx_status->he_RU[0],
5633 			  rx_status->he_RU[1], rx_status->he_RU[2],
5634 			  rx_status->he_RU[3]);
5635 	} else {
5636 		put_unaligned_le16(rx_user_status->he_flags1,
5637 				   &rtap_buf[rtap_len]);
5638 		rtap_len += 2;
5639 
5640 		put_unaligned_le16(rx_user_status->he_flags2,
5641 				   &rtap_buf[rtap_len]);
5642 		rtap_len += 2;
5643 
5644 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
5645 		rtap_len += 1;
5646 
5647 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
5648 		rtap_len += 1;
5649 
5650 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
5651 		rtap_len += 1;
5652 
5653 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
5654 		rtap_len += 1;
5655 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5656 			  rx_user_status->he_flags1,
5657 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
5658 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
5659 			  rx_user_status->he_RU[3]);
5660 	}
5661 
5662 	return rtap_len;
5663 }
5664 
5665 /**
5666  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
5667  * @rx_status: Pointer to rx_status.
5668  * @rtap_buf: buffer to which radiotap has to be updated
5669  * @rtap_len: radiotap length
5670  *
5671  * API update he-mu-other fields in the radiotap header
5672  *
5673  * Return: length of rtap_len updated.
5674  */
5675 static unsigned int
5676 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
5677 				     int8_t *rtap_buf, uint32_t rtap_len)
5678 {
5679 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5680 
5681 	rtap_len = qdf_align(rtap_len, 2);
5682 
5683 	/*
5684 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
5685 	 * Enable all "known" he-mu-other radiotap flags for now
5686 	 */
5687 	if (!rx_user_status) {
5688 		put_unaligned_le16(rx_status->he_per_user_1,
5689 				   &rtap_buf[rtap_len]);
5690 		rtap_len += 2;
5691 
5692 		put_unaligned_le16(rx_status->he_per_user_2,
5693 				   &rtap_buf[rtap_len]);
5694 		rtap_len += 2;
5695 
5696 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
5697 		rtap_len += 1;
5698 
5699 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
5700 		rtap_len += 1;
5701 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5702 			  rx_status->he_per_user_1,
5703 			  rx_status->he_per_user_2,
5704 			  rx_status->he_per_user_position,
5705 			  rx_status->he_per_user_known);
5706 	} else {
5707 		put_unaligned_le16(rx_user_status->he_per_user_1,
5708 				   &rtap_buf[rtap_len]);
5709 		rtap_len += 2;
5710 
5711 		put_unaligned_le16(rx_user_status->he_per_user_2,
5712 				   &rtap_buf[rtap_len]);
5713 		rtap_len += 2;
5714 
5715 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5716 		rtap_len += 1;
5717 
5718 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5719 		rtap_len += 1;
5720 		qdf_debug("he_per_user %x %x pos %x knwn %x",
5721 			  rx_user_status->he_per_user_1,
5722 			  rx_user_status->he_per_user_2,
5723 			  rx_user_status->he_per_user_position,
5724 			  rx_user_status->he_per_user_known);
5725 	}
5726 
5727 	return rtap_len;
5728 }
5729 
5730 /**
5731  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5732  *						from rx_status
5733  * @rx_status: Pointer to rx_status.
5734  * @rtap_buf: buffer to which radiotap has to be updated
5735  * @rtap_len: radiotap length
5736  *
5737  * API update Extra High Throughput (11be) fields in the radiotap header
5738  *
5739  * Return: length of rtap_len updated.
5740  */
5741 static unsigned int
5742 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5743 				    int8_t *rtap_buf, uint32_t rtap_len)
5744 {
5745 	/*
5746 	 * IEEE80211_RADIOTAP_USIG:
5747 	 *		u32, u32, u32
5748 	 */
5749 	rtap_len = qdf_align(rtap_len, 4);
5750 
5751 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5752 	rtap_len += 4;
5753 
5754 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5755 	rtap_len += 4;
5756 
5757 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5758 	rtap_len += 4;
5759 
5760 	qdf_rl_debug("U-SIG data %x %x %x",
5761 		     rx_status->usig_common, rx_status->usig_value,
5762 		     rx_status->usig_mask);
5763 
5764 	return rtap_len;
5765 }
5766 
5767 /**
5768  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5769  *					from rx_status
5770  * @rx_status: Pointer to rx_status.
5771  * @rtap_buf: buffer to which radiotap has to be updated
5772  * @rtap_len: radiotap length
5773  *
5774  * API update Extra High Throughput (11be) fields in the radiotap header
5775  *
5776  * Return: length of rtap_len updated.
5777  */
5778 static unsigned int
5779 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5780 				   int8_t *rtap_buf, uint32_t rtap_len)
5781 {
5782 	uint32_t user;
5783 
5784 	/*
5785 	 * IEEE80211_RADIOTAP_EHT:
5786 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5787 	 */
5788 	rtap_len = qdf_align(rtap_len, 4);
5789 
5790 	put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5791 	rtap_len += 4;
5792 
5793 	put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5794 	rtap_len += 4;
5795 
5796 	put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5797 	rtap_len += 4;
5798 
5799 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5800 	rtap_len += 4;
5801 
5802 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5803 	rtap_len += 4;
5804 
5805 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5806 	rtap_len += 4;
5807 
5808 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5809 	rtap_len += 4;
5810 
5811 	for (user = 0; user < rx_status->num_eht_user_info_valid; user++) {
5812 		put_unaligned_le32(rx_status->eht_user_info[user],
5813 				   &rtap_buf[rtap_len]);
5814 		rtap_len += 4;
5815 	}
5816 
5817 	qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5818 		     rx_status->eht_known, rx_status->eht_data[0],
5819 		     rx_status->eht_data[1], rx_status->eht_data[2],
5820 		     rx_status->eht_data[3], rx_status->eht_data[4],
5821 		     rx_status->eht_data[5]);
5822 
5823 	return rtap_len;
5824 }
5825 
5826 #define IEEE80211_RADIOTAP_TX_STATUS 0
5827 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5828 #define IEEE80211_RADIOTAP_EXTENSION2 2
5829 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5830 
5831 /**
5832  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5833  * @rx_status: Pointer to rx_status.
5834  * @rtap_buf: Buf to which AMPDU info has to be updated.
5835  * @rtap_len: Current length of radiotap buffer
5836  *
5837  * Return: Length of radiotap after AMPDU flags updated.
5838  */
5839 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5840 					struct mon_rx_status *rx_status,
5841 					uint8_t *rtap_buf,
5842 					uint32_t rtap_len)
5843 {
5844 	/*
5845 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5846 	 * First 32 bits of AMPDU represents the reference number
5847 	 */
5848 
5849 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5850 	uint16_t ampdu_flags = 0;
5851 	uint16_t ampdu_reserved_flags = 0;
5852 
5853 	rtap_len = qdf_align(rtap_len, 4);
5854 
5855 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5856 	rtap_len += 4;
5857 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5858 	rtap_len += 2;
5859 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5860 	rtap_len += 2;
5861 
5862 	return rtap_len;
5863 }
5864 
5865 #ifdef DP_MON_RSSI_IN_DBM
5866 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5867 (rx_status->rssi_comb)
5868 #else
5869 #ifdef QCA_RSSI_DB2DBM
5870 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5871 (((rx_status)->rssi_dbm_conv_support) ? \
5872 ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
5873 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5874 #else
5875 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5876 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5877 #endif
5878 #endif
5879 
5880 /**
5881  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5882  * @rx_status: Pointer to rx_status.
5883  * @rtap_buf: Buf to which tx info has to be updated.
5884  * @rtap_len: Current length of radiotap buffer
5885  *
5886  * Return: Length of radiotap after tx flags updated.
5887  */
5888 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5889 						struct mon_rx_status *rx_status,
5890 						uint8_t *rtap_buf,
5891 						uint32_t rtap_len)
5892 {
5893 	/*
5894 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5895 	 */
5896 
5897 	uint16_t tx_flags = 0;
5898 
5899 	rtap_len = qdf_align(rtap_len, 2);
5900 
5901 	switch (rx_status->tx_status) {
5902 	case RADIOTAP_TX_STATUS_FAIL:
5903 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5904 		break;
5905 	case RADIOTAP_TX_STATUS_NOACK:
5906 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5907 		break;
5908 	}
5909 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5910 	rtap_len += 2;
5911 
5912 	return rtap_len;
5913 }
5914 
5915 /**
5916  * qdf_nbuf_update_radiotap() - Update radiotap header from rx_status
5917  * @rx_status: Pointer to rx_status.
5918  * @nbuf:      nbuf pointer to which radiotap has to be updated
5919  * @headroom_sz: Available headroom size.
5920  *
5921  * Return: length of rtap_len updated.
5922  */
5923 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5924 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5925 {
5926 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5927 	struct ieee80211_radiotap_header *rthdr =
5928 		(struct ieee80211_radiotap_header *)rtap_buf;
5929 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5930 	uint32_t rtap_len = rtap_hdr_len;
5931 	uint8_t length = rtap_len;
5932 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5933 	struct qdf_radiotap_ext2 *rtap_ext2;
5934 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5935 
5936 	/* per user info */
5937 	qdf_le32_t *it_present;
5938 	uint32_t it_present_val;
5939 	bool radiotap_ext1_hdr_present = false;
5940 
5941 	it_present = &rthdr->it_present;
5942 
5943 	/* Adding Extended Header space */
5944 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5945 	    rx_status->usig_flags || rx_status->eht_flags) {
5946 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5947 		rtap_len = rtap_hdr_len;
5948 		radiotap_ext1_hdr_present = true;
5949 	}
5950 
5951 	length = rtap_len;
5952 
5953 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5954 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5955 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5956 	rtap_len += 8;
5957 
5958 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5959 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5960 
5961 	if (rx_status->rs_fcs_err)
5962 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5963 
5964 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5965 	rtap_len += 1;
5966 
5967 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5968 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5969 	    !rx_status->he_flags) {
5970 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5971 		rtap_buf[rtap_len] = rx_status->rate;
5972 	} else
5973 		rtap_buf[rtap_len] = 0;
5974 	rtap_len += 1;
5975 
5976 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5977 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5978 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5979 	rtap_len += 2;
5980 	/* Channel flags. */
5981 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5982 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5983 	else
5984 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5985 	if (rx_status->cck_flag)
5986 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5987 	if (rx_status->ofdm_flag)
5988 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5989 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5990 	rtap_len += 2;
5991 
5992 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5993 	 *					(dBm)
5994 	 */
5995 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5996 	/*
5997 	 * rssi_comb is int dB, need to convert it to dBm.
5998 	 * normalize value to noise floor of -96 dBm
5999 	 */
6000 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
6001 	rtap_len += 1;
6002 
6003 	/* RX signal noise floor */
6004 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
6005 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
6006 	rtap_len += 1;
6007 
6008 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
6009 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
6010 	rtap_buf[rtap_len] = rx_status->nr_ant;
6011 	rtap_len += 1;
6012 
6013 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
6014 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
6015 		return 0;
6016 	}
6017 
6018 	/* update tx flags for pkt capture*/
6019 	if (rx_status->add_rtap_ext) {
6020 		rthdr->it_present |=
6021 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
6022 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
6023 							     rtap_buf,
6024 							     rtap_len);
6025 
6026 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
6027 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
6028 			return 0;
6029 		}
6030 	}
6031 
6032 	if (rx_status->ht_flags) {
6033 		length = rtap_len;
6034 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
6035 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
6036 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
6037 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
6038 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
6039 		rtap_len += 1;
6040 
6041 		if (rx_status->sgi)
6042 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
6043 		if (rx_status->bw)
6044 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
6045 		else
6046 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
6047 		rtap_len += 1;
6048 
6049 		rtap_buf[rtap_len] = rx_status->ht_mcs;
6050 		rtap_len += 1;
6051 
6052 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
6053 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
6054 			return 0;
6055 		}
6056 	}
6057 
6058 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
6059 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
6060 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
6061 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
6062 								rtap_buf,
6063 								rtap_len);
6064 	}
6065 
6066 	if (rx_status->vht_flags) {
6067 		length = rtap_len;
6068 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
6069 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
6070 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
6071 								rtap_buf,
6072 								rtap_len);
6073 
6074 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
6075 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
6076 			return 0;
6077 		}
6078 	}
6079 
6080 	if (rx_status->he_flags) {
6081 		length = rtap_len;
6082 		/* IEEE80211_RADIOTAP_HE */
6083 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
6084 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
6085 								rtap_buf,
6086 								rtap_len);
6087 
6088 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
6089 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
6090 			return 0;
6091 		}
6092 	}
6093 
6094 	if (rx_status->he_mu_flags) {
6095 		length = rtap_len;
6096 		/* IEEE80211_RADIOTAP_HE-MU */
6097 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
6098 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
6099 								rtap_buf,
6100 								rtap_len);
6101 
6102 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
6103 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
6104 			return 0;
6105 		}
6106 	}
6107 
6108 	if (rx_status->he_mu_other_flags) {
6109 		length = rtap_len;
6110 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
6111 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
6112 		rtap_len =
6113 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
6114 								rtap_buf,
6115 								rtap_len);
6116 
6117 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
6118 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
6119 			return 0;
6120 		}
6121 	}
6122 
6123 	rtap_len = qdf_align(rtap_len, 2);
6124 	/*
6125 	 * Radiotap Vendor Namespace
6126 	 */
6127 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
6128 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
6129 					(rtap_buf + rtap_len);
6130 	/*
6131 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
6132 	 */
6133 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
6134 	/*
6135 	 * Name space selector = 0
6136 	 * We only will have one namespace for now
6137 	 */
6138 	radiotap_vendor_ns_ath->hdr.selector = 0;
6139 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
6140 					sizeof(*radiotap_vendor_ns_ath) -
6141 					sizeof(radiotap_vendor_ns_ath->hdr));
6142 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
6143 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
6144 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
6145 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
6146 				cpu_to_le32(rx_status->ppdu_timestamp);
6147 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
6148 
6149 	/* Move to next it_present */
6150 	if (radiotap_ext1_hdr_present) {
6151 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
6152 		put_unaligned_le32(it_present_val, it_present);
6153 		it_present_val = 0;
6154 		it_present++;
6155 	}
6156 
6157 	/* Add Extension to Radiotap Header & corresponding data */
6158 	if (rx_status->add_rtap_ext) {
6159 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
6160 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
6161 
6162 		rtap_buf[rtap_len] = rx_status->tx_status;
6163 		rtap_len += 1;
6164 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
6165 		rtap_len += 1;
6166 	}
6167 
6168 	/* Add Extension2 to Radiotap Header */
6169 	if (rx_status->add_rtap_ext2) {
6170 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
6171 
6172 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
6173 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
6174 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
6175 		if (!rx_user_status) {
6176 			rtap_ext2->tid = rx_status->tid;
6177 			rtap_ext2->start_seq = rx_status->start_seq;
6178 			qdf_mem_copy(rtap_ext2->ba_bitmap,
6179 				     rx_status->ba_bitmap,
6180 				     8 * (sizeof(uint32_t)));
6181 		} else {
6182 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
6183 
6184 			/* set default bitmap sz if not set */
6185 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
6186 			rtap_ext2->tid = rx_user_status->tid;
6187 			rtap_ext2->start_seq = rx_user_status->start_seq;
6188 			qdf_mem_copy(rtap_ext2->ba_bitmap,
6189 				     rx_user_status->ba_bitmap,
6190 				     ba_bitmap_sz * (sizeof(uint32_t)));
6191 		}
6192 
6193 		rtap_len += sizeof(*rtap_ext2);
6194 	}
6195 
6196 	if (rx_status->usig_flags) {
6197 		length = rtap_len;
6198 		/* IEEE80211_RADIOTAP_USIG */
6199 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
6200 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
6201 							       rtap_buf,
6202 							       rtap_len);
6203 
6204 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
6205 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
6206 			return 0;
6207 		}
6208 	}
6209 
6210 	if (rx_status->eht_flags) {
6211 		length = rtap_len;
6212 		/* IEEE80211_RADIOTAP_EHT */
6213 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
6214 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
6215 							      rtap_buf,
6216 							      rtap_len);
6217 
6218 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
6219 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
6220 			return 0;
6221 		}
6222 	}
6223 
6224 	put_unaligned_le32(it_present_val, it_present);
6225 	rthdr->it_len = cpu_to_le16(rtap_len);
6226 
6227 	if (headroom_sz < rtap_len) {
6228 		qdf_debug("DEBUG: Not enough space to update radiotap");
6229 		return 0;
6230 	}
6231 
6232 	qdf_nbuf_push_head(nbuf, rtap_len);
6233 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
6234 	return rtap_len;
6235 }
6236 #else
6237 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
6238 					struct mon_rx_status *rx_status,
6239 					int8_t *rtap_buf,
6240 					uint32_t rtap_len)
6241 {
6242 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
6243 	return 0;
6244 }
6245 
6246 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
6247 				      int8_t *rtap_buf, uint32_t rtap_len)
6248 {
6249 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
6250 	return 0;
6251 }
6252 
6253 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
6254 					struct mon_rx_status *rx_status,
6255 					uint8_t *rtap_buf,
6256 					uint32_t rtap_len)
6257 {
6258 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
6259 	return 0;
6260 }
6261 
6262 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
6263 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
6264 {
6265 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
6266 	return 0;
6267 }
6268 #endif
6269 qdf_export_symbol(qdf_nbuf_update_radiotap);
6270 
6271 /**
6272  * __qdf_nbuf_reg_free_cb() - register nbuf free callback
6273  * @cb_func_ptr: function pointer to the nbuf free callback
6274  *
6275  * This function registers a callback function for nbuf free.
6276  *
6277  * Return: none
6278  */
6279 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
6280 {
6281 	nbuf_free_cb = cb_func_ptr;
6282 }
6283 
6284 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
6285 
6286 /**
6287  * qdf_nbuf_classify_pkt() - classify packet
6288  * @skb - sk buff
6289  *
6290  * Return: none
6291  */
6292 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
6293 {
6294 	struct ethhdr *eh = (struct ethhdr *)skb->data;
6295 
6296 	/* check destination mac address is broadcast/multicast */
6297 	if (is_broadcast_ether_addr((uint8_t *)eh))
6298 		QDF_NBUF_CB_SET_BCAST(skb);
6299 	else if (is_multicast_ether_addr((uint8_t *)eh))
6300 		QDF_NBUF_CB_SET_MCAST(skb);
6301 
6302 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
6303 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
6304 			QDF_NBUF_CB_PACKET_TYPE_ARP;
6305 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
6306 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
6307 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
6308 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
6309 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
6310 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
6311 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
6312 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
6313 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
6314 }
6315 qdf_export_symbol(qdf_nbuf_classify_pkt);
6316 
6317 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
6318 {
6319 	qdf_nbuf_users_set(&nbuf->users, 1);
6320 	nbuf->data = nbuf->head + NET_SKB_PAD;
6321 	skb_reset_tail_pointer(nbuf);
6322 }
6323 qdf_export_symbol(__qdf_nbuf_init);
6324 
6325 #ifdef WLAN_FEATURE_FASTPATH
6326 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
6327 {
6328 	qdf_nbuf_users_set(&nbuf->users, 1);
6329 	nbuf->data = nbuf->head + NET_SKB_PAD;
6330 	skb_reset_tail_pointer(nbuf);
6331 }
6332 qdf_export_symbol(qdf_nbuf_init_fast);
6333 #endif /* WLAN_FEATURE_FASTPATH */
6334 
6335 
6336 #ifdef QDF_NBUF_GLOBAL_COUNT
6337 /**
6338  * __qdf_nbuf_mod_init() - Initialization routine for qdf_nuf
6339  *
6340  * Return void
6341  */
6342 void __qdf_nbuf_mod_init(void)
6343 {
6344 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
6345 	qdf_atomic_init(&nbuf_count);
6346 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
6347 }
6348 
6349 /**
6350  * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nuf
6351  *
6352  * Return void
6353  */
6354 void __qdf_nbuf_mod_exit(void)
6355 {
6356 }
6357 #endif
6358 
6359 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
6360 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
6361 					    int offset)
6362 {
6363 	unsigned int frag_offset;
6364 	skb_frag_t *frag;
6365 
6366 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
6367 		return QDF_STATUS_E_FAILURE;
6368 
6369 	frag = &skb_shinfo(nbuf)->frags[idx];
6370 	frag_offset = skb_frag_off(frag);
6371 
6372 	frag_offset += offset;
6373 	skb_frag_off_set(frag, frag_offset);
6374 
6375 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
6376 
6377 	return QDF_STATUS_SUCCESS;
6378 }
6379 
6380 #else
6381 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
6382 					    int offset)
6383 {
6384 	uint16_t frag_offset;
6385 	skb_frag_t *frag;
6386 
6387 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
6388 		return QDF_STATUS_E_FAILURE;
6389 
6390 	frag = &skb_shinfo(nbuf)->frags[idx];
6391 	frag_offset = frag->page_offset;
6392 
6393 	frag_offset += offset;
6394 	frag->page_offset = frag_offset;
6395 
6396 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
6397 
6398 	return QDF_STATUS_SUCCESS;
6399 }
6400 #endif
6401 
6402 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
6403 
6404 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
6405 			    uint16_t idx,
6406 			    uint16_t truesize)
6407 {
6408 	struct page *page;
6409 	uint16_t frag_len;
6410 
6411 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
6412 
6413 	if (qdf_unlikely(!page))
6414 		return;
6415 
6416 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
6417 	put_page(page);
6418 	nbuf->len -= frag_len;
6419 	nbuf->data_len -= frag_len;
6420 	nbuf->truesize -= truesize;
6421 	skb_shinfo(nbuf)->nr_frags--;
6422 }
6423 
6424 qdf_export_symbol(__qdf_nbuf_remove_frag);
6425 
6426 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
6427 			    int offset, int frag_len,
6428 			    unsigned int truesize, bool take_frag_ref)
6429 {
6430 	struct page *page;
6431 	int frag_offset;
6432 	uint8_t nr_frag;
6433 
6434 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
6435 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
6436 
6437 	page = virt_to_head_page(buf);
6438 	frag_offset = buf - page_address(page);
6439 
6440 	skb_add_rx_frag(nbuf, nr_frag, page,
6441 			(frag_offset + offset),
6442 			frag_len, truesize);
6443 
6444 	if (unlikely(take_frag_ref)) {
6445 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
6446 		skb_frag_ref(nbuf, nr_frag);
6447 	}
6448 }
6449 
6450 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
6451 
6452 void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
6453 {
6454 	struct page *page;
6455 	skb_frag_t frag = {0};
6456 
6457 	page = virt_to_head_page(buf);
6458 	__skb_frag_set_page(&frag, page);
6459 
6460 	/*
6461 	 * since __skb_frag_ref() just use page to increase ref
6462 	 * we just decode page alone
6463 	 */
6464 	qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
6465 	__skb_frag_ref(&frag);
6466 }
6467 
6468 qdf_export_symbol(__qdf_nbuf_ref_frag);
6469 
6470 #ifdef NBUF_FRAG_MEMORY_DEBUG
6471 
6472 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
6473 						int offset, const char *func,
6474 						uint32_t line)
6475 {
6476 	QDF_STATUS result;
6477 	qdf_frag_t p_fragp, n_fragp;
6478 
6479 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
6480 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
6481 
6482 	if (qdf_likely(is_initial_mem_debug_disabled))
6483 		return result;
6484 
6485 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
6486 
6487 	/*
6488 	 * Update frag address in frag debug tracker
6489 	 * when frag offset is successfully changed in skb
6490 	 */
6491 	if (result == QDF_STATUS_SUCCESS)
6492 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
6493 
6494 	return result;
6495 }
6496 
6497 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
6498 
6499 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
6500 				int offset, int frag_len,
6501 				unsigned int truesize, bool take_frag_ref,
6502 				const char *func, uint32_t line)
6503 {
6504 	qdf_frag_t fragp;
6505 	uint32_t num_nr_frags;
6506 
6507 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
6508 			       frag_len, truesize, take_frag_ref);
6509 
6510 	if (qdf_likely(is_initial_mem_debug_disabled))
6511 		return;
6512 
6513 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
6514 
6515 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6516 
6517 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
6518 
6519 	/* Update frag address in frag debug tracking table */
6520 	if (fragp != buf && !take_frag_ref)
6521 		qdf_frag_debug_update_addr(buf, fragp, func, line);
6522 
6523 	/* Update frag refcount in frag debug tracking table */
6524 	qdf_frag_debug_refcount_inc(fragp, func, line);
6525 }
6526 
6527 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
6528 
6529 /**
6530  * qdf_nbuf_ref_frag_debug() - get frag reference
6531  * @buf: Frag pointer needs to be taken reference.
6532  *
6533  * return: void
6534  */
6535 void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
6536 {
6537 	__qdf_nbuf_ref_frag(buf);
6538 
6539 	if (qdf_likely(is_initial_mem_debug_disabled))
6540 		return;
6541 
6542 	/* Update frag refcount in frag debug tracking table */
6543 	qdf_frag_debug_refcount_inc(buf, func, line);
6544 }
6545 
6546 qdf_export_symbol(qdf_nbuf_ref_frag_debug);
6547 
6548 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
6549 				    uint32_t line)
6550 {
6551 	uint32_t num_nr_frags;
6552 	uint32_t idx = 0;
6553 	qdf_nbuf_t ext_list;
6554 	qdf_frag_t p_frag;
6555 
6556 	if (qdf_likely(is_initial_mem_debug_disabled))
6557 		return;
6558 
6559 	if (qdf_unlikely(!buf))
6560 		return;
6561 
6562 	/* Take care to update the refcount in the debug entries for frags */
6563 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6564 
6565 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6566 
6567 	while (idx < num_nr_frags) {
6568 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6569 		if (qdf_likely(p_frag))
6570 			qdf_frag_debug_refcount_inc(p_frag, func, line);
6571 		idx++;
6572 	}
6573 
6574 	/**
6575 	 * Take care to update the refcount in the debug entries for the
6576 	 * frags attached to frag_list
6577 	 */
6578 	ext_list = qdf_nbuf_get_ext_list(buf);
6579 	while (ext_list) {
6580 		idx = 0;
6581 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6582 
6583 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6584 
6585 		while (idx < num_nr_frags) {
6586 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6587 			if (qdf_likely(p_frag))
6588 				qdf_frag_debug_refcount_inc(p_frag, func, line);
6589 			idx++;
6590 		}
6591 		ext_list = qdf_nbuf_queue_next(ext_list);
6592 	}
6593 }
6594 
6595 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
6596 
6597 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
6598 				    uint32_t line)
6599 {
6600 	uint32_t num_nr_frags;
6601 	qdf_nbuf_t ext_list;
6602 	uint32_t idx = 0;
6603 	qdf_frag_t p_frag;
6604 
6605 	if (qdf_likely(is_initial_mem_debug_disabled))
6606 		return;
6607 
6608 	if (qdf_unlikely(!buf))
6609 		return;
6610 
6611 	/**
6612 	 * Decrement refcount for frag debug nodes only when last user
6613 	 * of nbuf calls this API so as to avoid decrementing refcount
6614 	 * on every call expect the last one in case where nbuf has multiple
6615 	 * users
6616 	 */
6617 	if (qdf_nbuf_get_users(buf) > 1)
6618 		return;
6619 
6620 	/* Take care to update the refcount in the debug entries for frags */
6621 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6622 
6623 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6624 
6625 	while (idx < num_nr_frags) {
6626 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6627 		if (qdf_likely(p_frag))
6628 			qdf_frag_debug_refcount_dec(p_frag, func, line);
6629 		idx++;
6630 	}
6631 
6632 	/* Take care to update debug entries for frags attached to frag_list */
6633 	ext_list = qdf_nbuf_get_ext_list(buf);
6634 	while (ext_list) {
6635 		if (qdf_nbuf_get_users(ext_list) == 1) {
6636 			idx = 0;
6637 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6638 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6639 			while (idx < num_nr_frags) {
6640 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6641 				if (qdf_likely(p_frag))
6642 					qdf_frag_debug_refcount_dec(p_frag,
6643 								    func, line);
6644 				idx++;
6645 			}
6646 		}
6647 		ext_list = qdf_nbuf_queue_next(ext_list);
6648 	}
6649 }
6650 
6651 qdf_export_symbol(qdf_net_buf_debug_release_frag);
6652 
6653 /**
6654  * qdf_nbuf_remove_frag_debug - Remove frag from nbuf
6655  * @nbuf: nbuf  where frag will be removed
6656  * @idx: frag index
6657  * @truesize: truesize of frag
6658  * @func: Caller function name
6659  * @line:  Caller function line no.
6660  *
6661  * Return: QDF_STATUS
6662  *
6663  */
6664 QDF_STATUS
6665 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
6666 			   uint16_t idx,
6667 			   uint16_t truesize,
6668 			   const char *func,
6669 			   uint32_t line)
6670 {
6671 	uint16_t num_frags;
6672 	qdf_frag_t frag;
6673 
6674 	if (qdf_unlikely(!nbuf))
6675 		return QDF_STATUS_E_INVAL;
6676 
6677 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6678 	if (idx >= num_frags)
6679 		return QDF_STATUS_E_INVAL;
6680 
6681 	if (qdf_likely(is_initial_mem_debug_disabled)) {
6682 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6683 		return QDF_STATUS_SUCCESS;
6684 	}
6685 
6686 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
6687 	if (qdf_likely(frag))
6688 		qdf_frag_debug_refcount_dec(frag, func, line);
6689 
6690 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6691 
6692 	return QDF_STATUS_SUCCESS;
6693 }
6694 
6695 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6696 
6697 #endif /* NBUF_FRAG_MEMORY_DEBUG */
6698 
6699 /**
6700  * qdf_get_nbuf_valid_frag() - Get nbuf to store frag
6701  * @nbuf: qdf_nbuf_t master nbuf
6702  *
6703  * Return: qdf_nbuf_t
6704  */
6705 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6706 {
6707 	qdf_nbuf_t last_nbuf;
6708 	uint32_t num_frags;
6709 
6710 	if (qdf_unlikely(!nbuf))
6711 		return NULL;
6712 
6713 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6714 
6715 	/* Check nbuf has enough memory to store frag memory */
6716 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6717 		return nbuf;
6718 
6719 	if (!__qdf_nbuf_has_fraglist(nbuf))
6720 		return NULL;
6721 
6722 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6723 	if (qdf_unlikely(!last_nbuf))
6724 		return NULL;
6725 
6726 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6727 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6728 		return last_nbuf;
6729 
6730 	return NULL;
6731 }
6732 
6733 qdf_export_symbol(qdf_get_nbuf_valid_frag);
6734 
6735 /**
6736  * qdf_nbuf_add_frag_debug() - Add frag to nbuf
6737  * @osdev: Device handle
6738  * @buf: Frag pointer needs to be added in nbuf frag
6739  * @nbuf: qdf_nbuf_t where frag will be added
6740  * @offset: Offset in frag to be added to nbuf_frags
6741  * @frag_len: Frag length
6742  * @truesize: truesize
6743  * @take_frag_ref: Whether to take ref for frag or not
6744  *      This bool must be set as per below comdition:
6745  *      1. False: If this frag is being added in any nbuf
6746  *              for the first time after allocation
6747  *      2. True: If frag is already attached part of any
6748  *              nbuf
6749  * @minsize: Minimum size to allocate
6750  * @func: Caller function name
6751  * @line: Caller function line no.
6752  *
6753  * if number of frag exceed maximum frag array. A new nbuf is allocated
6754  * with minimum headroom and frag it added to that nbuf.
6755  * new nbuf is added as frag_list to the master nbuf.
6756  *
6757  * Return: QDF_STATUS
6758  */
6759 QDF_STATUS
6760 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6761 			qdf_nbuf_t nbuf, int offset,
6762 			int frag_len, unsigned int truesize,
6763 			bool take_frag_ref, unsigned int minsize,
6764 			const char *func, uint32_t line)
6765 {
6766 	qdf_nbuf_t cur_nbuf;
6767 	qdf_nbuf_t this_nbuf;
6768 
6769 	cur_nbuf = nbuf;
6770 	this_nbuf = nbuf;
6771 
6772 	if (qdf_unlikely(!frag_len || !buf)) {
6773 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6774 			     func, line,
6775 			     buf, frag_len);
6776 		return QDF_STATUS_E_INVAL;
6777 	}
6778 
6779 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6780 
6781 	if (this_nbuf) {
6782 		cur_nbuf = this_nbuf;
6783 	} else {
6784 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6785 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6786 		if (qdf_unlikely(!this_nbuf)) {
6787 			qdf_nofl_err("%s : %d no memory to allocate\n",
6788 				     func, line);
6789 			return QDF_STATUS_E_NOMEM;
6790 		}
6791 	}
6792 
6793 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6794 			     take_frag_ref);
6795 
6796 	if (this_nbuf != cur_nbuf) {
6797 		/* add new skb to frag list */
6798 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6799 					 qdf_nbuf_len(this_nbuf));
6800 	}
6801 
6802 	return QDF_STATUS_SUCCESS;
6803 }
6804 
6805 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6806 
6807 #ifdef MEMORY_DEBUG
6808 void qdf_nbuf_acquire_track_lock(uint32_t index,
6809 				 unsigned long irq_flag)
6810 {
6811 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6812 			  irq_flag);
6813 }
6814 
6815 void qdf_nbuf_release_track_lock(uint32_t index,
6816 				 unsigned long irq_flag)
6817 {
6818 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6819 			       irq_flag);
6820 }
6821 
6822 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6823 {
6824 	return gp_qdf_net_buf_track_tbl[index];
6825 }
6826 #endif /* MEMORY_DEBUG */
6827 
6828 #ifdef ENHANCED_OS_ABSTRACTION
6829 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6830 {
6831 	__qdf_nbuf_set_timestamp(buf);
6832 }
6833 
6834 qdf_export_symbol(qdf_nbuf_set_timestamp);
6835 
6836 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6837 {
6838 	return __qdf_nbuf_get_timestamp(buf);
6839 }
6840 
6841 qdf_export_symbol(qdf_nbuf_get_timestamp);
6842 
6843 uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
6844 {
6845 	return __qdf_nbuf_get_timestamp_us(buf);
6846 }
6847 
6848 qdf_export_symbol(qdf_nbuf_get_timestamp_us);
6849 
6850 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6851 {
6852 	return __qdf_nbuf_get_timedelta_us(buf);
6853 }
6854 
6855 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6856 
6857 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6858 {
6859 	return __qdf_nbuf_get_timedelta_ms(buf);
6860 }
6861 
6862 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6863 
6864 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6865 {
6866 	return __qdf_nbuf_net_timedelta(t);
6867 }
6868 
6869 qdf_export_symbol(qdf_nbuf_net_timedelta);
6870 #endif
6871